positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def process_timer(self, key, fields): """ Process a received timer event :param key: Key of timer :param fields: Received fields """ try: if key not in self.timers: self.timers[key] = [] self.timers[key].append(float(fields[0])) if self.stats_seen >= maxint: self.logger.info("hit maxint, reset seen counter") self.stats_seen = 0 self.stats_seen += 1 except Exception as err: self.logger.info("error decoding timer event: %s" % err) if self.debug: print "error decoding timer event: %s" % err
Process a received timer event :param key: Key of timer :param fields: Received fields
def _get_indirect_shadowing_information(contract): """ Obtain a string that describes variable shadowing for the given variable. None if no shadowing exists. :param var: The variable to collect shadowing information for. :param contract: The contract in which this variable is being analyzed. :return: Returns a string describing variable shadowing for the given variable. None if no shadowing exists. """ # If this variable is an overshadowing variable, we'll want to return information describing it. result = [] indirect_shadows = detect_c3_function_shadowing(contract) if indirect_shadows: for collision_set in sorted(indirect_shadows, key=lambda x: x[0][1].name): winner = collision_set[-1][1].contract.name collision_steps = [colliding_function.contract.name for _, colliding_function in collision_set] collision_steps = ', '.join(collision_steps) result.append(f"'{collision_set[0][1].full_name}' collides in inherited contracts {collision_steps} where {winner} is chosen.") return '\n'.join(result)
Obtain a string that describes variable shadowing for the given variable. None if no shadowing exists. :param var: The variable to collect shadowing information for. :param contract: The contract in which this variable is being analyzed. :return: Returns a string describing variable shadowing for the given variable. None if no shadowing exists.
def bosonic_constraints(a): """Return a set of constraints that define fermionic ladder operators. :param a: The non-Hermitian variables. :type a: list of :class:`sympy.physics.quantum.operator.Operator`. :returns: a dict of substitutions. """ substitutions = {} for i, ai in enumerate(a): substitutions[ai * Dagger(ai)] = 1.0 + Dagger(ai) * ai for aj in a[i+1:]: # substitutions[ai*Dagger(aj)] = -Dagger(ai)*aj substitutions[ai*Dagger(aj)] = Dagger(aj)*ai substitutions[Dagger(ai)*aj] = aj*Dagger(ai) substitutions[ai*aj] = aj*ai substitutions[Dagger(ai) * Dagger(aj)] = Dagger(aj) * Dagger(ai) return substitutions
Return a set of constraints that define fermionic ladder operators. :param a: The non-Hermitian variables. :type a: list of :class:`sympy.physics.quantum.operator.Operator`. :returns: a dict of substitutions.
def _convert_to_floats(self, data): """ Convert all values in a dict to floats """ for key, value in data.items(): data[key] = float(value) return data
Convert all values in a dict to floats
def invert(self, points): """Invert the distortion Parameters ------------------ points : ndarray Input image points Returns ----------------- ndarray Undistorted points """ X = points if not points.ndim == 1 else points.reshape((points.size, 1)) wx, wy = self.wc # Switch to polar coordinates rn = np.sqrt((X[0,:] - wx)**2 + (X[1,:] - wy)**2) phi = np.arctan2(X[1,:] - wy, X[0,:]-wx) # 'atan' method r = np.tan(rn * self.lgamma) / self.lgamma; # Switch back to rectangular coordinates Y = np.ones(X.shape) Y[0,:] = wx + r * np.cos(phi) Y[1,:]= wy + r * np.sin(phi) return Y
Invert the distortion Parameters ------------------ points : ndarray Input image points Returns ----------------- ndarray Undistorted points
def _update_grammar(self): """ We create a new ``Grammar`` object from the one in ``AtisSqlTableContext``, that also has the new entities that are extracted from the utterance. Stitching together the expressions to form the grammar is a little tedious here, but it is worth it because we don't have to create a new grammar from scratch. Creating a new grammar is expensive because we have many production rules that have all database values in the column on the right hand side. We update the expressions bottom up, since the higher level expressions may refer to the lower level ones. For example, the ternary expression will refer to the start and end times. """ # This will give us a shallow copy. We have to be careful here because the ``Grammar`` object # contains ``Expression`` objects that have tuples containing the members of that expression. # We have to create new sub-expression objects so that original grammar is not mutated. new_grammar = copy(AtisWorld.sql_table_context.grammar) for numeric_nonterminal in NUMERIC_NONTERMINALS: self._add_numeric_nonterminal_to_grammar(numeric_nonterminal, new_grammar) self._update_expression_reference(new_grammar, 'pos_value', 'number') ternary_expressions = [self._get_sequence_with_spacing(new_grammar, [new_grammar['col_ref'], Literal('BETWEEN'), new_grammar['time_range_start'], Literal(f'AND'), new_grammar['time_range_end']]), self._get_sequence_with_spacing(new_grammar, [new_grammar['col_ref'], Literal('NOT'), Literal('BETWEEN'), new_grammar['time_range_start'], Literal(f'AND'), new_grammar['time_range_end']]), self._get_sequence_with_spacing(new_grammar, [new_grammar['col_ref'], Literal('not'), Literal('BETWEEN'), new_grammar['time_range_start'], Literal(f'AND'), new_grammar['time_range_end']])] new_grammar['ternaryexpr'] = OneOf(*ternary_expressions, name='ternaryexpr') self._update_expression_reference(new_grammar, 'condition', 'ternaryexpr') new_binary_expressions = [] fare_round_trip_cost_expression = \ self._get_sequence_with_spacing(new_grammar, [Literal('fare'), Literal('.'), Literal('round_trip_cost'), new_grammar['binaryop'], new_grammar['fare_round_trip_cost']]) new_binary_expressions.append(fare_round_trip_cost_expression) fare_one_direction_cost_expression = \ self._get_sequence_with_spacing(new_grammar, [Literal('fare'), Literal('.'), Literal('one_direction_cost'), new_grammar['binaryop'], new_grammar['fare_one_direction_cost']]) new_binary_expressions.append(fare_one_direction_cost_expression) flight_number_expression = \ self._get_sequence_with_spacing(new_grammar, [Literal('flight'), Literal('.'), Literal('flight_number'), new_grammar['binaryop'], new_grammar['flight_number']]) new_binary_expressions.append(flight_number_expression) if self.dates: year_binary_expression = self._get_sequence_with_spacing(new_grammar, [Literal('date_day'), Literal('.'), Literal('year'), new_grammar['binaryop'], new_grammar['year_number']]) month_binary_expression = self._get_sequence_with_spacing(new_grammar, [Literal('date_day'), Literal('.'), Literal('month_number'), new_grammar['binaryop'], new_grammar['month_number']]) day_binary_expression = self._get_sequence_with_spacing(new_grammar, [Literal('date_day'), Literal('.'), Literal('day_number'), new_grammar['binaryop'], new_grammar['day_number']]) new_binary_expressions.extend([year_binary_expression, month_binary_expression, day_binary_expression]) new_binary_expressions = new_binary_expressions + list(new_grammar['biexpr'].members) new_grammar['biexpr'] = OneOf(*new_binary_expressions, name='biexpr') self._update_expression_reference(new_grammar, 'condition', 'biexpr') return new_grammar
We create a new ``Grammar`` object from the one in ``AtisSqlTableContext``, that also has the new entities that are extracted from the utterance. Stitching together the expressions to form the grammar is a little tedious here, but it is worth it because we don't have to create a new grammar from scratch. Creating a new grammar is expensive because we have many production rules that have all database values in the column on the right hand side. We update the expressions bottom up, since the higher level expressions may refer to the lower level ones. For example, the ternary expression will refer to the start and end times.
def string_array_to_list(a): """ Turns the Java string array into Python unicode string list. :param a: the string array to convert :type a: JB_Object :return: the string list :rtype: list """ result = [] length = javabridge.get_env().get_array_length(a) wrapped = javabridge.get_env().get_object_array_elements(a) for i in range(length): result.append(javabridge.get_env().get_string(wrapped[i])) return result
Turns the Java string array into Python unicode string list. :param a: the string array to convert :type a: JB_Object :return: the string list :rtype: list
def tree_libs(start_path, filt_func=None): """ Return analysis of library dependencies within `start_path` Parameters ---------- start_path : str root path of tree to search for libraries depending on other libraries. filt_func : None or callable, optional If None, inspect all files for library dependencies. If callable, accepts filename as argument, returns True if we should inspect the file, False otherwise. Returns ------- lib_dict : dict dictionary with (key, value) pairs of (``libpath``, ``dependings_dict``). ``libpath`` is canonical (``os.path.realpath``) filename of library, or library name starting with {'@rpath', '@loader_path', '@executable_path'}. ``dependings_dict`` is a dict with (key, value) pairs of (``depending_libpath``, ``install_name``), where ``dependings_libpath`` is the canonical (``os.path.realpath``) filename of the library depending on ``libpath``, and ``install_name`` is the "install_name" by which ``depending_libpath`` refers to ``libpath``. Notes ----- See: * https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html * http://matthew-brett.github.io/pydagogue/mac_runtime_link.html """ lib_dict = {} for dirpath, dirnames, basenames in os.walk(start_path): for base in basenames: depending_libpath = realpath(pjoin(dirpath, base)) if not filt_func is None and not filt_func(depending_libpath): continue rpaths = get_rpaths(depending_libpath) for install_name in get_install_names(depending_libpath): lib_path = (install_name if install_name.startswith('@') else realpath(install_name)) lib_path = resolve_rpath(lib_path, rpaths) if lib_path in lib_dict: lib_dict[lib_path][depending_libpath] = install_name else: lib_dict[lib_path] = {depending_libpath: install_name} return lib_dict
Return analysis of library dependencies within `start_path` Parameters ---------- start_path : str root path of tree to search for libraries depending on other libraries. filt_func : None or callable, optional If None, inspect all files for library dependencies. If callable, accepts filename as argument, returns True if we should inspect the file, False otherwise. Returns ------- lib_dict : dict dictionary with (key, value) pairs of (``libpath``, ``dependings_dict``). ``libpath`` is canonical (``os.path.realpath``) filename of library, or library name starting with {'@rpath', '@loader_path', '@executable_path'}. ``dependings_dict`` is a dict with (key, value) pairs of (``depending_libpath``, ``install_name``), where ``dependings_libpath`` is the canonical (``os.path.realpath``) filename of the library depending on ``libpath``, and ``install_name`` is the "install_name" by which ``depending_libpath`` refers to ``libpath``. Notes ----- See: * https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html * http://matthew-brett.github.io/pydagogue/mac_runtime_link.html
def build_config(ctx, target, config_path, c, extra_path, ignore, verbose, silent, debug): """ Creates a LintConfig object based on a set of commandline parameters. """ config_builder = LintConfigBuilder() try: # Config precedence: # First, load default config or config from configfile if config_path: config_builder.set_from_config_file(config_path) elif os.path.exists(DEFAULT_CONFIG_FILE): config_builder.set_from_config_file(DEFAULT_CONFIG_FILE) # Then process any commandline configuration flags config_builder.set_config_from_string_list(c) # Finally, overwrite with any convenience commandline flags if ignore: config_builder.set_option('general', 'ignore', ignore) if silent: config_builder.set_option('general', 'verbosity', 0) elif verbose > 0: config_builder.set_option('general', 'verbosity', verbose) if extra_path: config_builder.set_option('general', 'extra-path', extra_path) if target: config_builder.set_option('general', 'target', target) if debug: config_builder.set_option('general', 'debug', debug) config = config_builder.build() return config, config_builder except LintConfigError as e: click.echo(u"Config Error: {0}".format(ustr(e))) ctx.exit(CONFIG_ERROR_CODE)
Creates a LintConfig object based on a set of commandline parameters.
def parse_man_page(command, platform): """Parse the man page and return the parsed lines.""" page_path = find_page_location(command, platform) output_lines = parse_page(page_path) return output_lines
Parse the man page and return the parsed lines.
def unhumanize_bandwidth(bitsstr): ''' Take a string representing a link capacity, e.g., 10 Mb/s, and return an integer representing the number of bits/sec. Recognizes: - 'bits/sec' or 'b/s' are treated as plain bits per second - 'Kb' or 'kb' as thousand bits/sec - 'Mb' or 'mb' as million bits/sec - 'Gb' or 'gb' as billion bits/sec - 'Tb' or 'tb' as trillion bits/sec - if second character is 'B', quantity is interpreted as bytes/sec - any subsequent characters after the first two are ignored, so Kb/s Kb/sec Kbps are interpreted identically. Returns None if the string doesn't contain anything parseable. ''' if isinstance(bitsstr, int): return bitsstr mobj = re.match('^\s*([\d\.]+)\s*(.*)\s*$', bitsstr) if not mobj: return None value, units = mobj.groups() value = float(value) multipliers = { 'b':1, 'k':1e3, 'm':1e6, 'g':1e9, 't':1e12 } if not units: units = 'bits' mult = multipliers.get(units[0].lower(), 0) bits = 1 if len(units) > 1: if units[1] == 'B': bits = 8 # print (bitsstr, value, mult, bits) return int(value * mult * bits)
Take a string representing a link capacity, e.g., 10 Mb/s, and return an integer representing the number of bits/sec. Recognizes: - 'bits/sec' or 'b/s' are treated as plain bits per second - 'Kb' or 'kb' as thousand bits/sec - 'Mb' or 'mb' as million bits/sec - 'Gb' or 'gb' as billion bits/sec - 'Tb' or 'tb' as trillion bits/sec - if second character is 'B', quantity is interpreted as bytes/sec - any subsequent characters after the first two are ignored, so Kb/s Kb/sec Kbps are interpreted identically. Returns None if the string doesn't contain anything parseable.
def _assertion(self, matcher, value): """ Perform the actual assertion for the given matcher and value. Override this method to apply a special configuration when performing the assertion. If the assertion fails it should raise an AssertionError. """ # To support the syntax `any_of(subject) | should ...` we check if the # value to check is an Expectation object and if it is we use the descriptor # protocol to bind the value's assertion logic to this expectation. if isinstance(value, Expectation): assertion = value._assertion.__get__(self, Expectation) assertion(matcher, value.value) else: hc.assert_that(value, matcher)
Perform the actual assertion for the given matcher and value. Override this method to apply a special configuration when performing the assertion. If the assertion fails it should raise an AssertionError.
def aes_b64_encrypt(value, secret, block_size=AES.block_size): """ AES encrypt @value with @secret using the |CFB| mode of AES with a cryptographically secure initialization vector. -> (#str) AES encrypted @value .. from vital.security import aes_encrypt, aes_decrypt aes_encrypt("Hello, world", "aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW") # -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=' aes_decrypt( "zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=", "aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW") # -> 'Hello, world' .. """ # iv = randstr(block_size * 2, rng=random) iv = randstr(block_size * 2) cipher = AES.new(secret[:32], AES.MODE_CFB, iv[:block_size].encode()) return iv + b64encode(cipher.encrypt( uniorbytes(value, bytes))).decode('utf-8')
AES encrypt @value with @secret using the |CFB| mode of AES with a cryptographically secure initialization vector. -> (#str) AES encrypted @value .. from vital.security import aes_encrypt, aes_decrypt aes_encrypt("Hello, world", "aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW") # -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=' aes_decrypt( "zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=", "aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW") # -> 'Hello, world' ..
def GetValue(self): ''' NOTE: Added on plane. Cannot remember exact implementation of counter objects. I believe that they count sequentail pairings of options e.g. -vvvvv But I'm not sure. That's what I'm going with for now. Returns str(action.options_string[0]) * DropDown Value ''' dropdown_value = self._widget.GetValue() if not str(dropdown_value).isdigit(): return None arg = str(self._action.option_strings[0]).replace('-', '') repeated_args = arg * int(dropdown_value) return '-' + repeated_args
NOTE: Added on plane. Cannot remember exact implementation of counter objects. I believe that they count sequentail pairings of options e.g. -vvvvv But I'm not sure. That's what I'm going with for now. Returns str(action.options_string[0]) * DropDown Value
def add_signature_headers(mail, sigs, error_msg): '''Add pseudo headers to the mail indicating whether the signature verification was successful. :param mail: :class:`email.message.Message` the message to entitle :param sigs: list of :class:`gpg.results.Signature` :param error_msg: An error message if there is one, or None :type error_msg: :class:`str` or `None` ''' sig_from = '' sig_known = True uid_trusted = False assert error_msg is None or isinstance(error_msg, str) if not sigs: error_msg = error_msg or u'no signature found' elif not error_msg: try: key = crypto.get_key(sigs[0].fpr) for uid in key.uids: if crypto.check_uid_validity(key, uid.email): sig_from = uid.uid uid_trusted = True break else: # No trusted uid found, since we did not break from the loop. sig_from = key.uids[0].uid except GPGProblem: sig_from = sigs[0].fpr sig_known = False if error_msg: msg = 'Invalid: {}'.format(error_msg) elif uid_trusted: msg = 'Valid: {}'.format(sig_from) else: msg = 'Untrusted: {}'.format(sig_from) mail.add_header(X_SIGNATURE_VALID_HEADER, 'False' if (error_msg or not sig_known) else 'True') mail.add_header(X_SIGNATURE_MESSAGE_HEADER, msg)
Add pseudo headers to the mail indicating whether the signature verification was successful. :param mail: :class:`email.message.Message` the message to entitle :param sigs: list of :class:`gpg.results.Signature` :param error_msg: An error message if there is one, or None :type error_msg: :class:`str` or `None`
def invalidate_model_cache(self): """ Invalidate model cache by generating new key for the model. """ logger.info('Invalidating cache for table {0}'.format(self.model._meta.db_table)) if django.VERSION >= (1, 8): related_tables = set( [f.related_model._meta.db_table for f in self.model._meta.get_fields() if ((f.one_to_many or f.one_to_one) and f.auto_created) or f.many_to_one or (f.many_to_many and not f.auto_created)]) else: related_tables = set([rel.model._meta.db_table for rel in self.model._meta.get_all_related_objects()]) # temporary fix for m2m relations with an intermediate model, goes away after better join caching related_tables |= set([field.rel.to._meta.db_table for field in self.model._meta.fields if issubclass(type(field), RelatedField)]) logger.debug('Related tables of model {0} are {1}'.format(self.model, related_tables)) update_model_cache(self.model._meta.db_table) for related_table in related_tables: update_model_cache(related_table)
Invalidate model cache by generating new key for the model.
def clean_data(self, data, rename_col=None, drop_col=None, resample=True, freq='h', resampler='mean', interpolate=True, limit=1, method='linear', remove_na=True, remove_na_how='any', remove_outliers=True, sd_val=3, remove_out_of_bounds=True, low_bound=0, high_bound=float('inf'), save_file=True): """ Cleans dataframe according to user specifications and stores result in self.cleaned_data. Parameters ---------- data : pd.DataFrame() Dataframe to be cleaned. rename_col : list(str) List of new column names. drop_col : list(str) Columns to be dropped. resample : bool Indicates whether to resample data or not. freq : str Resampling frequency i.e. d, h, 15T... resampler : str Resampling type i.e. mean, max. interpolate : bool Indicates whether to interpolate data or not. limit : int Interpolation limit. method : str Interpolation method. remove_na : bool Indicates whether to remove NAs or not. remove_na_how : str Specificies how to remove NA i.e. all, any... remove_outliers : bool Indicates whether to remove outliers or not. sd_val : int Standard Deviation Value (specifices how many SDs away is a point considered an outlier) remove_out_of_bounds : bool Indicates whether to remove out of bounds datapoints or not. low_bound : int Low bound of the data. high_bound : int High bound of the data. save_file : bool Specifies whether to save file or not. Defaults to True. Returns ------- pd.DataFrame() Dataframe containing cleaned data. """ # Check to ensure data is a pandas dataframe if not isinstance(data, pd.DataFrame): raise TypeError('data has to be a pandas dataframe.') # Create instance and clean the data clean_data_obj = Clean_Data(data) clean_data_obj.clean_data(resample=resample, freq=freq, resampler=resampler, interpolate=interpolate, limit=limit, method=method, remove_na=remove_na, remove_na_how=remove_na_how, remove_outliers=remove_outliers, sd_val=sd_val, remove_out_of_bounds=remove_out_of_bounds, low_bound=low_bound, high_bound=high_bound) # Correlation plot # fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data) # fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png') if rename_col: # Rename columns of dataframe clean_data_obj.rename_columns(rename_col) if drop_col: # Drop columns of dataframe clean_data_obj.drop_columns(drop_col) # Store cleaned data in wrapper class self.cleaned_data = clean_data_obj.cleaned_data # Logging self.result['Clean'] = { 'Rename Col': rename_col, 'Drop Col': drop_col, 'Resample': resample, 'Frequency': freq, 'Resampler': resampler, 'Interpolate': interpolate, 'Limit': limit, 'Method': method, 'Remove NA': remove_na, 'Remove NA How': remove_na_how, 'Remove Outliers': remove_outliers, 'SD Val': sd_val, 'Remove Out of Bounds': remove_out_of_bounds, 'Low Bound': low_bound, 'High Bound': str(high_bound) if high_bound == float('inf') else high_bound, 'Save File': save_file } if save_file: f = self.results_folder_name + '/cleaned_data-' + str(self.get_global_count()) + '.csv' self.cleaned_data.to_csv(f) self.result['Clean']['Saved File'] = f else: self.result['Clean']['Saved File'] = '' return self.cleaned_data
Cleans dataframe according to user specifications and stores result in self.cleaned_data. Parameters ---------- data : pd.DataFrame() Dataframe to be cleaned. rename_col : list(str) List of new column names. drop_col : list(str) Columns to be dropped. resample : bool Indicates whether to resample data or not. freq : str Resampling frequency i.e. d, h, 15T... resampler : str Resampling type i.e. mean, max. interpolate : bool Indicates whether to interpolate data or not. limit : int Interpolation limit. method : str Interpolation method. remove_na : bool Indicates whether to remove NAs or not. remove_na_how : str Specificies how to remove NA i.e. all, any... remove_outliers : bool Indicates whether to remove outliers or not. sd_val : int Standard Deviation Value (specifices how many SDs away is a point considered an outlier) remove_out_of_bounds : bool Indicates whether to remove out of bounds datapoints or not. low_bound : int Low bound of the data. high_bound : int High bound of the data. save_file : bool Specifies whether to save file or not. Defaults to True. Returns ------- pd.DataFrame() Dataframe containing cleaned data.
def normalize_col_name(self, col_name, used_column_names, is_relation): """ Modify the column name to make it Python-compatible as a field name """ field_params = {} field_notes = [] new_name = col_name.lower() if new_name != col_name: field_notes.append('Field name made lowercase.') if is_relation: if new_name.endswith('_id'): new_name = new_name[:-3] else: field_params['db_column'] = col_name new_name, num_repl = re.subn(r'\W', '_', new_name) if num_repl > 0: field_notes.append('Field renamed to remove unsuitable characters.') if new_name.find('__') >= 0: while new_name.find('__') >= 0: new_name = new_name.replace('__', '_') if col_name.lower().find('__') >= 0: # Only add the comment if the double underscore was in the original name field_notes.append("Field renamed because it contained more than one '_' in a row.") if new_name.startswith('_'): new_name = 'field%s' % new_name field_notes.append("Field renamed because it started with '_'.") if new_name.endswith('_'): new_name = '%sfield' % new_name field_notes.append("Field renamed because it ended with '_'.") if keyword.iskeyword(new_name): new_name += '_field' field_notes.append('Field renamed because it was a Python reserved word.') if new_name[0].isdigit(): new_name = 'number_%s' % new_name field_notes.append("Field renamed because it wasn't a valid Python identifier.") if new_name in used_column_names: num = 0 while '%s_%d' % (new_name, num) in used_column_names: num += 1 new_name = '%s_%d' % (new_name, num) field_notes.append('Field renamed because of name conflict.') if col_name != new_name and field_notes: field_params['db_column'] = col_name return new_name, field_params, field_notes
Modify the column name to make it Python-compatible as a field name
def classes_can_admin(self): """Return all the classes (sorted) that this user can admin.""" if self.is_admin: return sorted(Session.query(Class).all()) else: return sorted(self.admin_for)
Return all the classes (sorted) that this user can admin.
async def send_cred_def(self, s_id: str, revocation: bool = True, rr_size: int = None) -> str: """ Create a credential definition as Issuer, store it in its wallet, and send it to the ledger. Raise CorruptWallet for wallet not pertaining to current ledger, BadLedgerTxn on failure to send credential definition to ledger if need be, or IndyError for any other failure to create and store credential definition in wallet. :param s_id: schema identifier :param revocation: whether to support revocation for cred def :param rr_size: size of initial revocation registry (default as per _create_rev_reg()), if revocation supported :return: json credential definition as it appears on ledger """ LOGGER.debug('Issuer.send_cred_def >>> s_id: %s, revocation: %s, rr_size: %s', s_id, revocation, rr_size) rv_json = json.dumps({}) schema_json = await self.get_schema(schema_key(s_id)) schema = json.loads(schema_json) cd_id = cred_def_id(self.did, schema['seqNo']) private_key_ok = True with CRED_DEF_CACHE.lock: try: rv_json = await self.get_cred_def(cd_id) LOGGER.info( 'Cred def on schema %s version %s already exists on ledger; Issuer %s not sending another', schema['name'], schema['version'], self.wallet.name) except AbsentCredDef: pass # OK - about to create, store, and send it try: (_, cred_def_json) = await anoncreds.issuer_create_and_store_credential_def( self.wallet.handle, self.did, # issuer DID schema_json, CD_ID_TAG, # expect only one cred def per schema and issuer 'CL', json.dumps({'support_revocation': revocation})) if json.loads(rv_json): private_key_ok = False LOGGER.warning( 'New cred def on %s in wallet shadows existing one on ledger: private key not usable', cd_id) # carry on though, this agent may have other roles so public key may be good enough except IndyError as x_indy: if x_indy.error_code == ErrorCode.AnoncredsCredDefAlreadyExistsError: if json.loads(rv_json): LOGGER.info( 'Issuer wallet %s reusing existing cred def on schema %s version %s', self.wallet.name, schema['name'], schema['version']) else: LOGGER.debug('Issuer.send_cred_def: <!< corrupt wallet %s', self.wallet.name) raise CorruptWallet( 'Corrupt Issuer wallet {} has cred def on schema {} version {} not on ledger'.format( self.wallet.name, schema['name'], schema['version'])) else: LOGGER.debug( 'Issuer.send_cred_def: <!< cannot store cred def in wallet %s: indy error code %s', self.wallet.name, x_indy.error_code) raise if not json.loads(rv_json): # checking the ledger returned no cred def: send it req_json = await ledger.build_cred_def_request(self.did, cred_def_json) await self._sign_submit(req_json) rv_json = await self.get_cred_def(cd_id) # pick up from ledger and parse; add to cache if revocation: await self._sync_revoc(rev_reg_id(cd_id, 0), rr_size) # create new rev reg, tails file for tag 0 if revocation and private_key_ok: for tag in [str(t) for t in range(int(Tails.next_tag(self._dir_tails, cd_id)[0]))]: # '0' to str(next-1) await self._sync_revoc(rev_reg_id(cd_id, tag), rr_size if tag == 0 else None) dir_cred_def = join(self._dir_tails, cd_id) if not isdir(dir_cred_def): # make sure a directory exists for box id collection when required, revo or not makedirs(dir_cred_def, exist_ok=True) LOGGER.debug('Issuer.send_cred_def <<< %s', rv_json) return rv_json
Create a credential definition as Issuer, store it in its wallet, and send it to the ledger. Raise CorruptWallet for wallet not pertaining to current ledger, BadLedgerTxn on failure to send credential definition to ledger if need be, or IndyError for any other failure to create and store credential definition in wallet. :param s_id: schema identifier :param revocation: whether to support revocation for cred def :param rr_size: size of initial revocation registry (default as per _create_rev_reg()), if revocation supported :return: json credential definition as it appears on ledger
def Decrypt(self, encrypted_data): """Decrypts the encrypted data. Args: encrypted_data (bytes): encrypted data. Returns: tuple[bytes, bytes]: decrypted data and remaining encrypted data. """ index_split = -(len(encrypted_data) % AES.block_size) if index_split: remaining_encrypted_data = encrypted_data[index_split:] encrypted_data = encrypted_data[:index_split] else: remaining_encrypted_data = b'' decrypted_data = self._aes_cipher.decrypt(encrypted_data) return decrypted_data, remaining_encrypted_data
Decrypts the encrypted data. Args: encrypted_data (bytes): encrypted data. Returns: tuple[bytes, bytes]: decrypted data and remaining encrypted data.
def list(self, enabled=values.unset, date_created_after=values.unset, date_created_before=values.unset, friendly_name=values.unset, limit=None, page_size=None): """ Lists CompositionHookInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param bool enabled: Only show Composition Hooks enabled or disabled. :param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone. :param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone. :param unicode friendly_name: Only show Composition Hooks with friendly name that match this name. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.video.v1.composition_hook.CompositionHookInstance] """ return list(self.stream( enabled=enabled, date_created_after=date_created_after, date_created_before=date_created_before, friendly_name=friendly_name, limit=limit, page_size=page_size, ))
Lists CompositionHookInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param bool enabled: Only show Composition Hooks enabled or disabled. :param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone. :param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone. :param unicode friendly_name: Only show Composition Hooks with friendly name that match this name. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.video.v1.composition_hook.CompositionHookInstance]
def use_isolated_objective_bank_view(self): """Pass through to provider ObjectiveLookupSession.use_isolated_objective_bank_view""" self._objective_bank_view = ISOLATED # self._get_provider_session('objective_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_isolated_objective_bank_view() except AttributeError: pass
Pass through to provider ObjectiveLookupSession.use_isolated_objective_bank_view
def do_driz(insci, input_wcs, inwht, output_wcs, outsci, outwht, outcon, expin, in_units, wt_scl, wcslin_pscale=1.0,uniqid=1, pixfrac=1.0, kernel='square', fillval="INDEF", stepsize=10,wcsmap=None): """ Core routine for performing 'drizzle' operation on a single input image All input values will be Python objects such as ndarrays, instead of filenames. File handling (input and output) will be performed by calling routine. """ # Insure that the fillval parameter gets properly interpreted for use with tdriz if util.is_blank(fillval): fillval = 'INDEF' else: fillval = str(fillval) if in_units == 'cps': expscale = 1.0 else: expscale = expin # Compute what plane of the context image this input would # correspond to: planeid = int((uniqid-1) / 32) # Check if the context image has this many planes if outcon.ndim == 3: nplanes = outcon.shape[0] elif outcon.ndim == 2: nplanes = 1 else: nplanes = 0 if nplanes <= planeid: raise IndexError("Not enough planes in drizzle context image") # Alias context image to the requested plane if 3d if outcon.ndim == 2: outctx = outcon else: outctx = outcon[planeid] pix_ratio = output_wcs.pscale/wcslin_pscale if wcsmap is None and cdriz is not None: log.info('Using WCSLIB-based coordinate transformation...') log.info('stepsize = %s' % stepsize) mapping = cdriz.DefaultWCSMapping( input_wcs, output_wcs, input_wcs.pixel_shape[0], input_wcs.pixel_shape[1], stepsize ) else: # ##Using the Python class for the WCS-based transformation # # Use user provided mapping function log.info('Using coordinate transformation defined by user...') if wcsmap is None: wcsmap = wcs_functions.WCSMap wmap = wcsmap(input_wcs,output_wcs) mapping = wmap.forward _shift_fr = 'output' _shift_un = 'output' ystart = 0 nmiss = 0 nskip = 0 # # This call to 'cdriz.tdriz' uses the new C syntax # _dny = insci.shape[0] # Call 'drizzle' to perform image combination if insci.dtype > np.float32: #WARNING: Input array recast as a float32 array insci = insci.astype(np.float32) _vers,nmiss,nskip = cdriz.tdriz(insci, inwht, outsci, outwht, outctx, uniqid, ystart, 1, 1, _dny, pix_ratio, 1.0, 1.0, 'center', pixfrac, kernel, in_units, expscale, wt_scl, fillval, nmiss, nskip, 1, mapping) if nmiss > 0: log.warning('! %s points were outside the output image.' % nmiss) if nskip > 0: log.debug('! Note, %s input lines were skipped completely.' % nskip) return _vers
Core routine for performing 'drizzle' operation on a single input image All input values will be Python objects such as ndarrays, instead of filenames. File handling (input and output) will be performed by calling routine.
def singletrial(num_trials, skipstep=1): """ Single-trial cross-validation schema Use one trial for training, all others for testing. Parameters ---------- num_trials : int Total number of trials skipstep : int only use every `skipstep` trial for training Returns ------- gen : generator object the generator returns tuples (trainset, testset) """ for t in range(0, num_trials, skipstep): trainset = [t] testset = [i for i in range(trainset[0])] + \ [i for i in range(trainset[-1] + 1, num_trials)] testset = sort([t % num_trials for t in testset]) yield trainset, testset
Single-trial cross-validation schema Use one trial for training, all others for testing. Parameters ---------- num_trials : int Total number of trials skipstep : int only use every `skipstep` trial for training Returns ------- gen : generator object the generator returns tuples (trainset, testset)
def run_duplicated_snps(in_prefix, in_type, out_prefix, base_dir, options): """Runs step2 (duplicated snps). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``tfile``). This function calls the :py:mod:`pyGenClean.DupSNPs.duplicated_snps` module. The required file type for this module is ``tfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed. .. note:: This function creates a ``map`` file, needed for the :py:mod:`pyGenClean.DupSNPs.duplicated_snps` module. """ # Creating the output directory os.mkdir(out_prefix) # We know we need a tfile required_type = "tfile" check_input_files(in_prefix, in_type, required_type) # This step require a map file (we now have a tfile) if not os.path.isfile(in_prefix + ".map"): outputFile = None try: outputFile = open(in_prefix + ".map", "w") except IOError: msg = "{}: can't write file".format(in_prefix + ".map") raise ProgramError(msg) try: with open(in_prefix + ".tped", 'r') as inputFile: for line in inputFile: row = createRowFromPlinkSpacedOutput(line) print >>outputFile, "\t".join(row[:4]) except IOError: msg = "{}: no such file".format(in_prefix + ".tped") raise ProgramError(msg) outputFile.close() # We need to inject the name of the input file and the name of the output # prefix script_prefix = os.path.join(out_prefix, "dup_snps") options += ["--{}".format(required_type), in_prefix, "--out", script_prefix] # We run the script try: duplicated_snps.main(options) except duplicated_snps.ProgramError as e: msg = "duplicated_snps: {}".format(e) raise ProgramError(msg) # Reading the number of duplicated markers duplicated_count = defaultdict(int) if os.path.isfile(script_prefix + ".duplicated_snps.tped"): with open(script_prefix + ".duplicated_snps.tped", "r") as i_file: duplicated_count = Counter( (i[0], i[3]) for i in [ tuple(createRowFromPlinkSpacedOutput(line)[:4]) for line in i_file ] ) # Counting the number of zeroed out genotypes per duplicated markers zeroed_out = defaultdict(int) if os.path.isfile(script_prefix + ".zeroed_out"): with open(script_prefix + ".zeroed_out", "r") as i_file: zeroed_out = Counter([ tuple(line.rstrip("\r\n").split("\t")[:2]) for line in i_file.read().splitlines()[1:] ]) nb_zeroed_out = sum(zeroed_out.values()) # Checking the not good enough markers not_good_enough = set() if os.path.isfile(script_prefix + ".not_good_enough"): with open(script_prefix + ".not_good_enough", "r") as i_file: not_good_enough = { line.rstrip("\r\n").split("\t")[0] for line in i_file.read().splitlines()[1:] } # Checking which markers were chosen chosen_markers = set() if os.path.isfile(script_prefix + ".chosen_snps.info"): with open(script_prefix + ".chosen_snps.info", "r") as i_file: chosen_markers = set(i_file.read().splitlines()) # Finding if some 'not_good_enough' samples were chosen not_good_still = chosen_markers & not_good_enough # Adding the 'not chosen markers' to the list of excluded markers removed_markers = set() o_filename = os.path.join(base_dir, "excluded_markers.txt") if os.path.isfile(script_prefix + ".removed_duplicates"): with open(script_prefix + ".removed_duplicates", "r") as i_file: removed_markers = set(i_file.read().splitlines()) with open(o_filename, "a") as o_file: for marker_id in removed_markers: print >>o_file, marker_id + "\t" + "removed duplicate" # Writing the summary results total_remaining = 0 with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file: print >>o_file, "# {}".format(script_prefix) rep_counter = Counter(duplicated_count.values()).most_common() if rep_counter: print >>o_file, "Number of replicated markers" else: print >>o_file, "Number of replicated markers\t0" total_nb_removed_rep = 0 for rep_type, rep_count in rep_counter: nb_removed_rep = (rep_count * rep_type) - rep_count print >>o_file, " - x{}\t{:,d}\t-{:,d}".format( rep_type, rep_count, nb_removed_rep, ) total_nb_removed_rep += nb_removed_rep total_remaining = total_nb_removed_rep - len(removed_markers) print >>o_file, ( "Number of replicated markers kept\t{nb:,d}\t+{nb:,d}".format( nb=total_remaining, ) ) print >>o_file, ("Poorly chosen replicated markers\t" "{nb:,d}".format(nb=len(not_good_still))) print >>o_file, ("Final number of excluded markers\t" "{nb:,d}".format(nb=len(removed_markers))) print >>o_file, "---" # We create a LaTeX summary latex_file = os.path.join(script_prefix + ".summary.tex") try: with open(latex_file, "w") as o_file: print >>o_file, latex_template.subsection( duplicated_snps.pretty_name ) text = ( "A total of {:,d} duplicated marker{} {} found.".format( len(duplicated_count), "s" if len(duplicated_count) > 1 else "", "were" if len(duplicated_count) > 1 else "was", ) ) print >>o_file, latex_template.wrap_lines(text) if len(duplicated_count) > 0: text = ( "While merging duplicates, a total of {:,d} genotype{} {} " "zeroed out. A total of {:,d} marker{} {} found to be not " "good enough for duplicate completion.".format( nb_zeroed_out, "s" if nb_zeroed_out > 1 else "", "were" if nb_zeroed_out > 1 else "was", len(not_good_enough), "s" if len(not_good_enough) > 1 else "", "were" if len(not_good_enough) > 1 else "was", ) ) print >>o_file, latex_template.wrap_lines(text) text = ( "A total of {:,d} marker{} {} excluded while creating the " "final dataset.".format( len(removed_markers), "s" if len(removed_markers) > 1 else "", "were" if len(removed_markers) > 1 else "was", ) ) print >>o_file, latex_template.wrap_lines(text) if total_remaining > 0: text = latex_template.textbf( "In total, {:,d} maker{} {} not merged for different " "reasons (low completion rate, discordant allele, " "discordant MAF, etc) and {} still present in the " "dataset.".format( total_remaining, "s" if total_remaining > 1 else "", "were" if total_remaining > 1 else "was", "are" if total_remaining > 1 else "is", ) ) print >>o_file, latex_template.wrap_lines(text) if len(not_good_still) > 0: start = "A total of" end = " and {} still present in the final dataset.".format( "are" if len(not_good_still) > 1 else "is", ) if total_remaining > 0: start = "Out of these," end = "." text = latex_template.textbf( start + " {:,d} marker{} {} not good enough for " "completion, but {} still selected as the best " "duplicate{}".format( len(not_good_still), "s" if len(not_good_still) > 1 else "", "were" if len(not_good_still) > 1 else "was", "were" if len(not_good_still) > 1 else "was", end, ) ) print >>o_file, latex_template.wrap_lines(text) except IOError: msg = "{}: cannot write LaTeX summary".format(latex_file) raise ProgramError(msg) # We know this step does produce a new data set (tfile), so we return it return _StepResult( next_file=os.path.join(out_prefix, "dup_snps.final"), next_file_type="tfile", latex_summary=latex_file, description=duplicated_snps.desc, long_description=duplicated_snps.long_desc, graph_path=None, )
Runs step2 (duplicated snps). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``tfile``). This function calls the :py:mod:`pyGenClean.DupSNPs.duplicated_snps` module. The required file type for this module is ``tfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed. .. note:: This function creates a ``map`` file, needed for the :py:mod:`pyGenClean.DupSNPs.duplicated_snps` module.
def get_corner(self, time): """ Gets the corner array indices of the STObject at a given time that corresponds to the upper left corner of the bounding box for the STObject. Args: time: time at which the corner is being extracted. Returns: corner index. """ if self.start_time <= time <= self.end_time: diff = time - self.start_time return self.i[diff][0, 0], self.j[diff][0, 0] else: return -1, -1
Gets the corner array indices of the STObject at a given time that corresponds to the upper left corner of the bounding box for the STObject. Args: time: time at which the corner is being extracted. Returns: corner index.
def order_assets(self, asset_ids, composition_id): """Reorders a set of assets in a composition. arg: asset_ids (osid.id.Id[]): ``Ids`` for a set of ``Assets`` arg: composition_id (osid.id.Id): ``Id`` of the ``Composition`` raise: NotFound - ``composition_id`` not found or, an ``asset_id`` not related to ``composition_id`` raise: NullArgument - ``instruction_ids`` or ``agenda_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ self._provider_session.order_assets(self, asset_ids, composition_id)
Reorders a set of assets in a composition. arg: asset_ids (osid.id.Id[]): ``Ids`` for a set of ``Assets`` arg: composition_id (osid.id.Id): ``Id`` of the ``Composition`` raise: NotFound - ``composition_id`` not found or, an ``asset_id`` not related to ``composition_id`` raise: NullArgument - ``instruction_ids`` or ``agenda_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def p_ioport_head(self, p): 'ioport_head : sigtypes portname' p[0] = self.create_ioport(p[1], p[2], lineno=p.lineno(2)) p.set_lineno(0, p.lineno(1))
ioport_head : sigtypes portname
def lv_load_areas(self): """ #TODO: description """ for lv_load_area in self._grid._graph.nodes(): if isinstance(lv_load_area, LVLoadAreaDing0): if lv_load_area.ring == self: yield lv_load_area
#TODO: description
def _query(self, action, qobj): """ returns wikidata query string """ if action == 'labels': return qobj.labels(self._pop_entities()) elif action == 'wikidata': return qobj.wikidata(self.params.get('title'), self.params.get('wikibase'))
returns wikidata query string
def cmd_shodan_open(ip, no_cache, json_output, nmap_command, verbose, output): """Output the open ports for an IP against shodan (nmap format). Example: \b $ habu.shodan.open 8.8.8.8 T:53,U:53 """ habucfg = loadcfg() if 'SHODAN_APIKEY' not in habucfg: print('You must provide a shodan apikey. Use the ~/.habu.json file (variable SHODAN_APIKEY), or export the variable HABU_SHODAN_APIKEY') print('Get your API key from https://www.shodan.io/') sys.exit(1) if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') data = shodan_get_result(ip, habucfg['SHODAN_APIKEY'], no_cache, verbose) ports = [] if 'data' in data: for service in data['data']: ports.append('{}:{}'.format( service['transport'][0].upper(), service['port'] )) if nmap_command: if ports: output.write('nmap -A -v -p {} {}'.format(','.join(ports), ip)) else: if json_output: output.write(json.dumps(ports, indent=4)) output.write('\n') else: output.write(','.join(ports))
Output the open ports for an IP against shodan (nmap format). Example: \b $ habu.shodan.open 8.8.8.8 T:53,U:53
def to_potential(f): ''' to_potential(f) yields f if f is a potential function; if f is not, but f can be converted to a potential function, that conversion is performed then the result is yielded. to_potential(Ellipsis) yields a potential function whose output is simply its input (i.e., the identity function). to_potential(None) is equivalent to to_potential(0). The following can be converted into potential functions: * Anything for which pimms.is_array(x, 'number') yields True (i.e., arrays of constants). * Any tuple (g, h) where g(x) yields a potential value and h(x) yields a jacobian matrix for the parameter vector x. ''' if is_potential(f): return f elif f is Ellipsis: return identity elif pimms.is_array(f, 'number'): return const_potential(f) elif isinstance(f, tuple) and len(f) == 2: return PotentialLambda(f[0], f[1]) else: raise ValueError('Could not convert object to potential function')
to_potential(f) yields f if f is a potential function; if f is not, but f can be converted to a potential function, that conversion is performed then the result is yielded. to_potential(Ellipsis) yields a potential function whose output is simply its input (i.e., the identity function). to_potential(None) is equivalent to to_potential(0). The following can be converted into potential functions: * Anything for which pimms.is_array(x, 'number') yields True (i.e., arrays of constants). * Any tuple (g, h) where g(x) yields a potential value and h(x) yields a jacobian matrix for the parameter vector x.
def _get_key_alias_from_cache(self, key_arn): ''' Find a key's alias by looking up its key_arn in the KEY_METADATA cache. This function will only work after a key has been lookedup by its alias and is meant as a convenience function for turning an ARN that's already been looked up back into its alias. ''' for alias in self.KEY_METADATA: if self.KEY_METADATA[alias]['KeyMetadata']['Arn'] == key_arn: return alias return None
Find a key's alias by looking up its key_arn in the KEY_METADATA cache. This function will only work after a key has been lookedup by its alias and is meant as a convenience function for turning an ARN that's already been looked up back into its alias.
def getSet(self, setID): ''' Gets the information of one specific build using its Brickset set ID. :param str setID: The ID of the build from Brickset. :returns: A single Build object. :rtype: :class:`brickfront.build.Build` :raises brickfront.errors.InvalidSetID: If no sets exist by that ID. ''' params = { 'apiKey': self.apiKey, 'userHash': self.userHash, 'setID': setID } url = Client.ENDPOINT.format('getSet') returned = get(url, params=params) self.checkResponse(returned) # Put it into a Build class root = ET.fromstring(returned.text) v = [Build(i, self) for i in root] # Return to user try: return v[0] except IndexError: raise InvalidSetID('There is no set with the ID of `{}`.'.format(setID))
Gets the information of one specific build using its Brickset set ID. :param str setID: The ID of the build from Brickset. :returns: A single Build object. :rtype: :class:`brickfront.build.Build` :raises brickfront.errors.InvalidSetID: If no sets exist by that ID.
def zipline_root(environ=None): """ Get the root directory for all zipline-managed files. For testing purposes, this accepts a dictionary to interpret as the os environment. Parameters ---------- environ : dict, optional A dict to interpret as the os environment. Returns ------- root : string Path to the zipline root dir. """ if environ is None: environ = os.environ root = environ.get('ZIPLINE_ROOT', None) if root is None: root = expanduser('~/.zipline') return root
Get the root directory for all zipline-managed files. For testing purposes, this accepts a dictionary to interpret as the os environment. Parameters ---------- environ : dict, optional A dict to interpret as the os environment. Returns ------- root : string Path to the zipline root dir.
def generate(env): """Add Builders and construction variables for lib to an Environment.""" SCons.Tool.createStaticLibBuilder(env) # Set-up ms tools paths msvc_setup_env_once(env) env['AR'] = 'lib' env['ARFLAGS'] = SCons.Util.CLVar('/nologo') env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}" env['LIBPREFIX'] = '' env['LIBSUFFIX'] = '.lib'
Add Builders and construction variables for lib to an Environment.
def get_organizers_events(self, id, **data): """ GET /organizers/:id/events/ Gets events of the :format:`organizer`. """ return self.get("/organizers/{0}/events/".format(id), data=data)
GET /organizers/:id/events/ Gets events of the :format:`organizer`.
def getfilearchive(self, project_id, filepath=None): """ Get an archive of the repository :param project_id: project id :param filepath: path to save the file to :return: True if the file was saved to the filepath """ if not filepath: filepath = '' request = requests.get( '{0}/{1}/repository/archive'.format(self.projects_url, project_id), verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout) if request.status_code == 200: if filepath == "": filepath = request.headers['content-disposition'].split(';')[1].split('=')[1].strip('"') with open(filepath, 'wb') as filesave: filesave.write(request.content) # TODO: Catch oserror exceptions as no permissions and such # TODO: change the filepath to a path and keep always the filename? return True else: msg = request.json()['message'] raise exceptions.HttpError(msg)
Get an archive of the repository :param project_id: project id :param filepath: path to save the file to :return: True if the file was saved to the filepath
def players(game_id): """Gets player/coach/umpire information for the game with matching id.""" # get data data = mlbgame.data.get_players(game_id) # parse data parsed = etree.parse(data) root = parsed.getroot() output = {} output['game_id'] = game_id # get player/coach data for team in root.findall('team'): type = team.attrib['type'] + "_team" # the type is either home_team or away_team output[type] = {} output[type]['players'] = [] output[type]['coaches'] = [] for p in team.findall('player'): player = {} for key in p.keys(): player[key] = p.get(key) output[type]['players'].append(player) for c in team.findall('coach'): coach = {} for key in c.keys(): coach[key] = c.get(key) output[type]['coaches'].append(coach) # get umpire data output['umpires'] = [] for u in root.find('umpires').findall('umpire'): umpire = {} for key in u.keys(): umpire[key] = u.get(key) output['umpires'].append(umpire) return output
Gets player/coach/umpire information for the game with matching id.
def set_sim_data(inj, field, data): """Sets data of a SimInspiral instance.""" try: sim_field = sim_inspiral_map[field] except KeyError: sim_field = field # for tc, map to geocentric times if sim_field == 'tc': inj.geocent_end_time = int(data) inj.geocent_end_time_ns = int(1e9*(data % 1)) else: setattr(inj, sim_field, data)
Sets data of a SimInspiral instance.
def rnd_date_list_high_performance(size, start=date(1970, 1, 1), end=None, **kwargs): """ Generate mass random date. :param size: int, number of :param start: date similar object, int / str / date / datetime :param end: date similar object, int / str / date / datetime, default today's date :param kwargs: args placeholder :return: list of datetime.date """ if end is None: end = date.today() start_days = to_ordinal(parser.parse_datetime(start)) end_days = to_ordinal(parser.parse_datetime(end)) _assert_correct_start_end(start_days, end_days) if has_np: # pragma: no cover return [ from_ordinal(days) for days in np.random.randint(start_days, end_days, size) ] else: return [ from_ordinal(random.randint(start_days, end_days)) for _ in range(size) ]
Generate mass random date. :param size: int, number of :param start: date similar object, int / str / date / datetime :param end: date similar object, int / str / date / datetime, default today's date :param kwargs: args placeholder :return: list of datetime.date
def app_score(self): """ Computes the area under the app curve. """ # compute curve precisions, pct_pred_pos, taus = self.precision_pct_pred_pos_curve(interval=False) # compute area app = 0 total = 0 for k in range(len(precisions)-1): # read cur data cur_prec = precisions[k] cur_pp = pct_pred_pos[k] cur_tau = taus[k] # read next data next_prec = precisions[k+1] next_pp = pct_pred_pos[k+1] next_tau = taus[k+1] # approximate with rectangles mid_prec = (cur_prec + next_prec) / 2.0 width_pp = np.abs(next_pp - cur_pp) app += mid_prec * width_pp total += width_pp return app
Computes the area under the app curve.
def extractDate(text): """ Tries to extract a date from a given :obj:`str`. :param str text: Input date. A :obj:`datetime.date` object is passed thought without modification. :rtype: :obj:`datetime.date`""" if type(text) is datetime.date: return text match = date_format.search(text.lower()) if not match: raise ValueError('unsupported date format: {0}'.format(text.lower())) # convert DD.MM.YYYY into YYYY-MM-DD if match.group('month'): if not match.group('month') in month_names: raise ValueError('unknown month names: "{0}"' .format(match.group('month'))) year = int(match.group('year')) return datetime.date( year if year > 2000 else 2000 + year, int(month_names[match.group('month')]), int(match.group('day'))) else: parts = list(map(lambda v: int(v), '-'.join(reversed( match.group('datestr').split('.'))).split('-'))) if parts[0] < 2000: parts[0] += 2000 return datetime.date(*parts)
Tries to extract a date from a given :obj:`str`. :param str text: Input date. A :obj:`datetime.date` object is passed thought without modification. :rtype: :obj:`datetime.date`
def GetMountPoint(self, path=None): """Walk back from the path to find the mount point. Args: path: a Unicode string containing the path or None. If path is None the value in self.path is used. Returns: path string of the mount point """ path = os.path.abspath( client_utils.CanonicalPathToLocalPath(path or self.path)) while not os.path.ismount(path): path = os.path.dirname(path) return path
Walk back from the path to find the mount point. Args: path: a Unicode string containing the path or None. If path is None the value in self.path is used. Returns: path string of the mount point
def set_unobserved_after(self,tlen,qlen,nt,p): """Set the unobservable sequence data after this base :param tlen: target homopolymer length :param qlen: query homopolymer length :param nt: nucleotide :param p: p is the probability of attributing this base to the unobserved error :type tlen: int :type qlen: int :type nt: char :type p: float """ self._unobservable.set_after(tlen,qlen,nt,p)
Set the unobservable sequence data after this base :param tlen: target homopolymer length :param qlen: query homopolymer length :param nt: nucleotide :param p: p is the probability of attributing this base to the unobserved error :type tlen: int :type qlen: int :type nt: char :type p: float
def from_xyz(x, y, z, alpha=1.0, wref=_DEFAULT_WREF): """Create a new instance based on the specifed CIE-XYZ values. Parameters: :x: The Red component value [0...1] :y: The Green component value [0...1] :z: The Blue component value [0...1] :alpha: The color transparency [0...1], default is opaque :wref: The whitepoint reference, default is 2° D65. Returns: A grapefruit.Color instance. >>> Color.from_xyz(0.488941, 0.365682, 0.0448137) Color(1.0, 0.5, 0.0, 1.0) >>> Color.from_xyz(0.488941, 0.365682, 0.0448137, 0.5) Color(1.0, 0.5, 0.0, 0.5) """ return Color(xyz_to_rgb(x, y, z), 'rgb', alpha, wref)
Create a new instance based on the specifed CIE-XYZ values. Parameters: :x: The Red component value [0...1] :y: The Green component value [0...1] :z: The Blue component value [0...1] :alpha: The color transparency [0...1], default is opaque :wref: The whitepoint reference, default is 2° D65. Returns: A grapefruit.Color instance. >>> Color.from_xyz(0.488941, 0.365682, 0.0448137) Color(1.0, 0.5, 0.0, 1.0) >>> Color.from_xyz(0.488941, 0.365682, 0.0448137, 0.5) Color(1.0, 0.5, 0.0, 0.5)
def http_error_307(self, url, fp, errcode, errmsg, headers, data=None): """Error 307 -- relocated, but turn POST into error.""" if data is None: return self.http_error_302(url, fp, errcode, errmsg, headers, data) else: return self.http_error_default(url, fp, errcode, errmsg, headers)
Error 307 -- relocated, but turn POST into error.
def install(self): """Install packages from the packages_dict.""" self.distro = distro_check() package_list = self.packages_dict.get(self.distro) self._installer(package_list=package_list.get('packages'))
Install packages from the packages_dict.
def validate_items(self, item_list, item_type): """ Go through a list Pmag_objects and check for: parent errors, children errors, type errors. Return a dictionary of exceptions in this format: {sample1: {'parent': [warning1, warning2, warning3], 'child': [warning1, warning2]}, sample2: {'child': [warning1], 'type': [warning1, warning2]}, ...} """ def append_or_create_dict_item(warning_type, dictionary, key, value): """ Add to dictionary with this format: {key1: {warning_type1: [value1, value2], warning_type2: [value1]}, ...} """ if not value: return try: name = key.name except AttributeError: name = key if not name in dictionary: dictionary[name] = {} if not warning_type in dictionary[name]: dictionary[name][warning_type] = [] for v in value: dictionary[name][warning_type].append(v) def check_item_type(item, item_type):#, warnings=None): """ Make sure that item has appropriate type, and is in the data object. """ warnings = [] item_list, item_class, item_constructor = self.data_lists[item_type] if not isinstance(item, item_class): warnings.append(PmagException('wrong type')) if item not in item_list: warnings.append(PmagException('not in data object')) return warnings def check_item_for_parent(item, item_type, parent_type): """ Make sure that item has a parent of the correct type """ if not parent_type: return [] if not isinstance(item, Pmag_object): return [] warnings = [] parent = item.get_parent() parent_list, parent_class, parent_constructor = self.data_lists[parent_type] if not parent or not parent.name: warnings.append(PmagException('missing parent')) return warnings if not isinstance(parent, parent_class): warnings.append(PmagException('invalid parent type', parent)) if not parent in parent_list: warnings.append(PmagException('parent not in data object', parent)) return warnings def check_item_for_children(item, child_type): """ Make sure that any children are of the correct type, and are in the data object """ if not child_type: return [] warnings = [] children = item.children child_list, child_class, child_constructor = self.data_lists[child_type] for child in children: if not isinstance(child, child_class): warnings.append(PmagException('child has wrong type', child)) if not child in child_list: warnings.append(PmagException('child not in data object', child)) return warnings warnings = {} type_ind = self.ancestry.index(item_type) parent_type = self.ancestry[type_ind+1] child_type = self.ancestry[type_ind-1] for item in item_list: #warnings[item] = [] type_warnings = check_item_type(item, item_type) append_or_create_dict_item('type', warnings, item, type_warnings) parent_warnings = check_item_for_parent(item, item_type, parent_type) append_or_create_dict_item('parent', warnings, item, parent_warnings) child_warnings = check_item_for_children(item, child_type) append_or_create_dict_item('children', warnings, item, child_warnings) return warnings
Go through a list Pmag_objects and check for: parent errors, children errors, type errors. Return a dictionary of exceptions in this format: {sample1: {'parent': [warning1, warning2, warning3], 'child': [warning1, warning2]}, sample2: {'child': [warning1], 'type': [warning1, warning2]}, ...}
def get_templates(path: Path) -> List[str]: '''List all files in ``templates`` directory, including all subdirectories. The resulting list contains UNIX-like relative paths starting with ``templates``. ''' result = [] for item in path.glob('**/*'): if item.is_file() and not item.name.startswith('_'): result.append(item.relative_to(path.parent).as_posix()) return result
List all files in ``templates`` directory, including all subdirectories. The resulting list contains UNIX-like relative paths starting with ``templates``.
def p_information_duration_speed(self, p): 'information : duration AT speed' logger.debug('information = duration %s at speed %s', p[1], p[3]) p[0] = p[3].for_duration(p[1])
information : duration AT speed
def GetRootFileEntry(self): """Retrieves the root file entry. Returns: BDEFileEntry: file entry or None. """ path_spec = bde_path_spec.BDEPathSpec(parent=self._path_spec.parent) return self.GetFileEntryByPathSpec(path_spec)
Retrieves the root file entry. Returns: BDEFileEntry: file entry or None.
def disable_inside(item, *elems, **kwargs): """Prevent elems from matching inside of item. Returns (item with elem disabled, *new versions of elems). """ _invert = kwargs.get("_invert", False) internal_assert(set(kwargs.keys()) <= set(("_invert",)), "excess keyword arguments passed to disable_inside") level = [0] # number of wrapped items deep we are; in a list to allow modification @contextmanager def manage_item(self, instring, loc): level[0] += 1 try: yield finally: level[0] -= 1 yield Wrap(item, manage_item) @contextmanager def manage_elem(self, instring, loc): if level[0] == 0 if not _invert else level[0] > 0: yield else: raise ParseException(instring, loc, self.errmsg, self) for elem in elems: yield Wrap(elem, manage_elem)
Prevent elems from matching inside of item. Returns (item with elem disabled, *new versions of elems).
def setup(): """Walk the user though the Wallace setup.""" # Create the Wallace config file if it does not already exist. config_name = ".wallaceconfig" config_path = os.path.join(os.path.expanduser("~"), config_name) if os.path.isfile(config_path): log("Wallace config file already exists.", chevrons=False) else: log("Creating Wallace config file at ~/.wallaceconfig...", chevrons=False) wallace_module_path = os.path.dirname(os.path.realpath(__file__)) src = os.path.join(wallace_module_path, "config", config_name) shutil.copyfile(src, config_path)
Walk the user though the Wallace setup.
def get_url(path, host, port, method="http"): """ make url from path, host and port :param method: str :param path: str, path within the request, e.g. "/api/version" :param host: str :param port: str or int :return: str """ return urlunsplit( (method, "%s:%s" % (host, port), path, "", "") )
make url from path, host and port :param method: str :param path: str, path within the request, e.g. "/api/version" :param host: str :param port: str or int :return: str
def get_process_pid(process_name): """ check for process' pid file and returns pid from there """ try: pid_filename = get_pid_filename(process_name) with open(pid_filename, mode='r') as pid_file: pid = int(pid_file.read().strip()) except IOError: pid = None return pid
check for process' pid file and returns pid from there
def get_cfg(ast_func): """ Traverses the AST and returns the corresponding CFG :param ast_func: The AST representation of function :type ast_func: ast.Function :returns: The CFG representation of the function :rtype: cfg.Function """ cfg_func = cfg.Function() for ast_var in ast_func.input_variable_list: cfg_var = cfg_func.get_variable(ast_var.name) cfg_func.add_input_variable(cfg_var) for ast_var in ast_func.output_variable_list: cfg_var = cfg_func.get_variable(ast_var.name) cfg_func.add_output_variable(cfg_var) bb_start = cfg.BasicBlock() cfg_func.add_basic_block(bb_start) for stmt in ast_func.body: bb_temp = bb_start bb_temp = process_cfg(stmt, bb_temp, cfg_func) cfg_func.clean_up() cfg_func.add_summary(ast_func.summary) return cfg_func
Traverses the AST and returns the corresponding CFG :param ast_func: The AST representation of function :type ast_func: ast.Function :returns: The CFG representation of the function :rtype: cfg.Function
def _create_array(self, arr: np.ndarray) -> int: """Returns the handle of a RawArray created from the given numpy array. Args: arr: A numpy ndarray. Returns: The handle (int) of the array. Raises: ValueError: if arr is not a ndarray or of an unsupported dtype. If the array is of an unsupported type, using a view of the array to another dtype and then converting on get is often a work around. """ if not isinstance(arr, np.ndarray): raise ValueError('Array is not a numpy ndarray.') try: c_arr = np.ctypeslib.as_ctypes(arr) except (KeyError, NotImplementedError): raise ValueError( 'Array has unsupported dtype {}.'.format(arr.dtype)) # pylint: disable=protected-access raw_arr = RawArray(c_arr._type_, c_arr) with self._lock: if self._count >= len(self._arrays): self._arrays += len(self._arrays) * [None] self._get_next_free() # Note storing the shape is a workaround for an issue encountered # when upgrading to numpy 1.15. # See https://github.com/numpy/numpy/issues/11636 self._arrays[self._current] = (raw_arr, arr.shape) self._count += 1 return self._current
Returns the handle of a RawArray created from the given numpy array. Args: arr: A numpy ndarray. Returns: The handle (int) of the array. Raises: ValueError: if arr is not a ndarray or of an unsupported dtype. If the array is of an unsupported type, using a view of the array to another dtype and then converting on get is often a work around.
def _u_distance_covariance_sqr_naive(x, y, exponent=1): """ Naive unbiased estimator for distance covariance. Computes the unbiased estimator for distance covariance between two matrices, using an :math:`O(N^2)` algorithm. """ a = _u_distance_matrix(x, exponent=exponent) b = _u_distance_matrix(y, exponent=exponent) return u_product(a, b)
Naive unbiased estimator for distance covariance. Computes the unbiased estimator for distance covariance between two matrices, using an :math:`O(N^2)` algorithm.
def liquid_jet_pump_ancillary(rhop, rhos, Kp, Ks, d_nozzle=None, d_mixing=None, Qp=None, Qs=None, P1=None, P2=None): r'''Calculates the remaining variable in a liquid jet pump when solving for one if the inlet variables only and the rest of them are known. The equation comes from conservation of energy and momentum in the mixing chamber. The variable to be solved for must be one of `d_nozzle`, `d_mixing`, `Qp`, `Qs`, `P1`, or `P2`. .. math:: P_1 - P_2 = \frac{1}{2}\rho_pV_n^2(1+K_p) - \frac{1}{2}\rho_s V_3^2(1+K_s) Rearrange to express V3 in terms of Vn, and using the density ratio `C`, the expression becomes: .. math:: P_1 - P_2 = \frac{1}{2}\rho_p V_n^2\left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right] Using the primary nozzle area and flow rate: .. math:: P_1 - P_2 = \frac{1}{2}\rho_p \left(\frac{Q_p}{A_n}\right)^2 \left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right] For `P`, `P2`, `Qs`, and `Qp`, the equation can be rearranged explicitly for them. For `d_mixing` and `d_nozzle`, a bounded solver is used searching between 1E-9 m and 20 times the other diameter which was specified. Parameters ---------- rhop : float The density of the primary (motive) fluid, [kg/m^3] rhos : float The density of the secondary fluid (drawn from the vacuum chamber), [kg/m^3] Kp : float The primary nozzle loss coefficient, [-] Ks : float The secondary inlet loss coefficient, [-] d_nozzle : float, optional The inside diameter of the primary fluid's nozle, [m] d_mixing : float, optional The diameter of the mixing chamber, [m] Qp : float, optional The volumetric flow rate of the primary fluid, [m^3/s] Qs : float, optional The volumetric flow rate of the secondary fluid, [m^3/s] P1 : float, optional The pressure of the primary fluid entering its nozzle, [Pa] P2 : float, optional The pressure of the secondary fluid at the entry of the ejector, [Pa] Returns ------- solution : float The parameter not specified (one of `d_nozzle`, `d_mixing`, `Qp`, `Qs`, `P1`, or `P2`), (units of `m`, `m`, `m^3/s`, `m^3/s`, `Pa`, or `Pa` respectively) Notes ----- The following SymPy code was used to obtain the analytical formulas ( they are not shown here due to their length): >>> from sympy import * >>> A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp = symbols('A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp') >>> R = A_nozzle/A_mixing >>> M = Qs/Qp >>> C = rhos/rhop >>> rhs = rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) >>> new = Eq(P1 - P2, rhs) >>> #solve(new, Qp) >>> #solve(new, Qs) >>> #solve(new, P1) >>> #solve(new, P2) Examples -------- Calculating primary fluid nozzle inlet pressure P1: >>> liquid_jet_pump_ancillary(rhop=998., rhos=1098., Ks=0.11, Kp=.04, ... P2=133600, Qp=0.01, Qs=0.01, d_mixing=0.045, d_nozzle=0.02238) 426434.60314398084 References ---------- .. [1] Ejectors and Jet Pumps. Design and Performance for Incompressible Liquid Flow. 85032. ESDU International PLC, 1985. ''' unknowns = sum(i is None for i in (d_nozzle, d_mixing, Qs, Qp, P1, P2)) if unknowns > 1: raise Exception('Too many unknowns') elif unknowns < 1: raise Exception('Overspecified') C = rhos/rhop if Qp is not None and Qs is not None: M = Qs/Qp if d_nozzle is not None: A_nozzle = pi/4*d_nozzle*d_nozzle if d_mixing is not None: A_mixing = pi/4*d_mixing*d_mixing R = A_nozzle/A_mixing if P1 is None: return rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) + P2 elif P2 is None: return -rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) + P1 elif Qs is None: try: return ((-2*A_nozzle**2*P1 + 2*A_nozzle**2*P2 + Kp*Qp**2*rhop + Qp**2*rhop)/(C*rhop*(Ks + 1)))**0.5*(A_mixing - A_nozzle)/A_nozzle except ValueError: return -1j elif Qp is None: return A_nozzle*((2*A_mixing**2*P1 - 2*A_mixing**2*P2 - 4*A_mixing*A_nozzle*P1 + 4*A_mixing*A_nozzle*P2 + 2*A_nozzle**2*P1 - 2*A_nozzle**2*P2 + C*Ks*Qs**2*rhop + C*Qs**2*rhop)/(rhop*(Kp + 1)))**0.5/(A_mixing - A_nozzle) elif d_nozzle is None: def err(d_nozzle): return P1 - liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Kp=Kp, Ks=Ks, d_nozzle=d_nozzle, d_mixing=d_mixing, Qp=Qp, Qs=Qs, P1=None, P2=P2) return brenth(err, 1E-9, d_mixing*20) elif d_mixing is None: def err(d_mixing): return P1 - liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Kp=Kp, Ks=Ks, d_nozzle=d_nozzle, d_mixing=d_mixing, Qp=Qp, Qs=Qs, P1=None, P2=P2) try: return brenth(err, 1E-9, d_nozzle*20) except: return newton(err, d_nozzle*2)
r'''Calculates the remaining variable in a liquid jet pump when solving for one if the inlet variables only and the rest of them are known. The equation comes from conservation of energy and momentum in the mixing chamber. The variable to be solved for must be one of `d_nozzle`, `d_mixing`, `Qp`, `Qs`, `P1`, or `P2`. .. math:: P_1 - P_2 = \frac{1}{2}\rho_pV_n^2(1+K_p) - \frac{1}{2}\rho_s V_3^2(1+K_s) Rearrange to express V3 in terms of Vn, and using the density ratio `C`, the expression becomes: .. math:: P_1 - P_2 = \frac{1}{2}\rho_p V_n^2\left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right] Using the primary nozzle area and flow rate: .. math:: P_1 - P_2 = \frac{1}{2}\rho_p \left(\frac{Q_p}{A_n}\right)^2 \left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right] For `P`, `P2`, `Qs`, and `Qp`, the equation can be rearranged explicitly for them. For `d_mixing` and `d_nozzle`, a bounded solver is used searching between 1E-9 m and 20 times the other diameter which was specified. Parameters ---------- rhop : float The density of the primary (motive) fluid, [kg/m^3] rhos : float The density of the secondary fluid (drawn from the vacuum chamber), [kg/m^3] Kp : float The primary nozzle loss coefficient, [-] Ks : float The secondary inlet loss coefficient, [-] d_nozzle : float, optional The inside diameter of the primary fluid's nozle, [m] d_mixing : float, optional The diameter of the mixing chamber, [m] Qp : float, optional The volumetric flow rate of the primary fluid, [m^3/s] Qs : float, optional The volumetric flow rate of the secondary fluid, [m^3/s] P1 : float, optional The pressure of the primary fluid entering its nozzle, [Pa] P2 : float, optional The pressure of the secondary fluid at the entry of the ejector, [Pa] Returns ------- solution : float The parameter not specified (one of `d_nozzle`, `d_mixing`, `Qp`, `Qs`, `P1`, or `P2`), (units of `m`, `m`, `m^3/s`, `m^3/s`, `Pa`, or `Pa` respectively) Notes ----- The following SymPy code was used to obtain the analytical formulas ( they are not shown here due to their length): >>> from sympy import * >>> A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp = symbols('A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp') >>> R = A_nozzle/A_mixing >>> M = Qs/Qp >>> C = rhos/rhop >>> rhs = rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) >>> new = Eq(P1 - P2, rhs) >>> #solve(new, Qp) >>> #solve(new, Qs) >>> #solve(new, P1) >>> #solve(new, P2) Examples -------- Calculating primary fluid nozzle inlet pressure P1: >>> liquid_jet_pump_ancillary(rhop=998., rhos=1098., Ks=0.11, Kp=.04, ... P2=133600, Qp=0.01, Qs=0.01, d_mixing=0.045, d_nozzle=0.02238) 426434.60314398084 References ---------- .. [1] Ejectors and Jet Pumps. Design and Performance for Incompressible Liquid Flow. 85032. ESDU International PLC, 1985.
def _validated(self, data): """Validate data if all subschemas validate it.""" for sub in self.schemas: data = sub(data) return data
Validate data if all subschemas validate it.
async def pop(self, full): ''' Remove and return the value for the given node. ''' node = self.nodes.get(full) if node is None: return valu = await self._popHiveNode(node) return valu
Remove and return the value for the given node.
def _operator_generator(self, index, conj): """ Internal method to generate the appropriate ladder operator at fermion orbital at 'index' If conj == -1 --> creation conj == +1 --> annihilation :param int index: fermion orbital to generate ladder operator at :param int conj: -1 for creation, +1 for annihilation """ if conj != -1 and conj != +1: raise ValueError("Improper conjugate coefficient") if index >= self.n_qubits or index < 0: raise IndexError("Operator index outside number of qubits for " "current Bravyi-Kitaev transform.") # parity set P(j). apply Z to, for parity sign. parity_set = [node.index for node in self.tree.get_parity_set(index)] # update set U(j). apply X to, for updating purposes. ancestors = [node.index for node in self.tree.get_update_set(index)] # remainder set C(j) = P(j) \ F(j) ancestor_children = [node.index for node in self.tree.get_remainder_set(index)] # Under Majorana basis, creation/annihilation operators given by # a^{\pm} = (c \mp id) / 2 # c_j = a_j + a_j^{\dagger} = X_{U(j)} X_j Z_{P(j)} c_maj = PauliTerm('X', index) for node_idx in parity_set: c_maj *= PauliTerm('Z', node_idx) for node_idx in ancestors: c_maj *= PauliTerm('X', node_idx) # d_j = i(a_j^{\dagger} - a_j) = X_{U(j)} Y_j Z_{C(j)} d_maj = PauliTerm('Y', index) for node_idx in ancestors: d_maj *= PauliTerm('X', node_idx) for node_idx in ancestor_children: d_maj *= PauliTerm('Z', node_idx) result = 0.5 * (c_maj + 1j * conj * d_maj) return result.simplify()
Internal method to generate the appropriate ladder operator at fermion orbital at 'index' If conj == -1 --> creation conj == +1 --> annihilation :param int index: fermion orbital to generate ladder operator at :param int conj: -1 for creation, +1 for annihilation
def spectral(data, lambd, *kwargs): """ Compute spectral contrast of image Performs bandpass filtering in Fourier space according to optical limit of detection system, approximated by twice the wavelength. Parameters ---------- data : 2d ndarray the image to compute the norm from lambd : float wavelength of the light in pixels """ # Set up fast fourier transform # if not data.dtype == np.dtype(np.complex): # data = np.array(data, dtype=np.complex) # fftplan = fftw3.Plan(data.copy(), None, nthreads = _ncores, # direction="forward", flags=_fftwflags) # fftdata = np.zeros(data.shape, dtype=np.complex) # fftplan.guru_execute_dft(data, fftdata) # fftw.destroy_plan(fftplan) fftdata = np.fft.fftn(data) # Filter Fourier transform fftdata[0, 0] = 0 kx = 2 * np.pi * np.fft.fftfreq(data.shape[0]).reshape(1, -1) ky = 2 * np.pi * np.fft.fftfreq(data.shape[1]).reshape(-1, 1) kmax = (2 * np.pi) / (2 * lambd) fftdata[np.where(kx**2 + ky**2 > kmax**2)] = 0 spec = np.sum(np.log(1 + np.abs(fftdata))) / np.sqrt(np.prod(data.shape)) return spec
Compute spectral contrast of image Performs bandpass filtering in Fourier space according to optical limit of detection system, approximated by twice the wavelength. Parameters ---------- data : 2d ndarray the image to compute the norm from lambd : float wavelength of the light in pixels
def encode(cls, value): """ take a list and turn it into a utf-8 encoded byte-string for redis. :param value: list :return: bytes """ try: coerced = list(value) if coerced == value: return json.dumps(coerced).encode(cls._encoding) except TypeError: pass raise InvalidValue('not a list')
take a list and turn it into a utf-8 encoded byte-string for redis. :param value: list :return: bytes
def _fake_modifyinstance(self, namespace, **params): """ Implements a server responder for :meth:`~pywbem.WBEMConnection.CreateInstance` Modify a CIM instance in the local repository. Raises: CIMError: CIM_ERR_ALREADY_EXISTS, CIM_ERR_INVALID_CLASS """ if self._repo_lite: raise CIMError( CIM_ERR_NOT_SUPPORTED, "ModifyInstance not supported when repo_lite set.") # Validate namespace instance_repo = self._get_instance_repo(namespace) modified_instance = deepcopy(params['ModifiedInstance']) property_list = params['PropertyList'] # Return if empty property list if property_list is not None and not property_list: return if modified_instance is not None and not modified_instance: return if not isinstance(modified_instance, CIMInstance): raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("The ModifiedInstance parameter is not a valid " "CIMInstance. Rcvd type={0}", type(modified_instance))) # Classnames in instance and path must match if modified_instance.classname != modified_instance.path.classname: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("ModifyInstance classname in path and instance do " "not match. classname={0!A}, path.classname={1!A}", modified_instance.classname, modified_instance.path.classname)) # Get class including properties from superclasses from repo try: target_class = self.GetClass(modified_instance.classname, namespace=namespace, LocalOnly=False, IncludeQualifiers=True, IncludeClassOrigin=True) except CIMError as ce: if ce.status_code == CIM_ERR_NOT_FOUND: raise CIMError( CIM_ERR_INVALID_CLASS, _format("Cannot modify instance because its creation " "class {0!A} does not exist in namespace {1!A}.", modified_instance.classname, namespace)) raise # get key properties and all class props cl_props = [p.name for p in six.itervalues(target_class.properties)] key_props = [p.name for p in six.itervalues(target_class.properties) if 'key' in p.qualifiers] # Get original instance in repo. Does not copy the orig instance. mod_inst_path = modified_instance.path.copy() if modified_instance.path.namespace is None: mod_inst_path.namespace = namespace orig_instance_tup = self._find_instance(mod_inst_path, instance_repo) if orig_instance_tup[0] is None: raise CIMError( CIM_ERR_NOT_FOUND, _format("Original Instance {0!A} not found in namespace {1!A}", modified_instance.path, namespace)) original_instance = orig_instance_tup[1] # Remove duplicate properties from property_list if property_list: if len(property_list) != len(set(property_list)): property_list = list(set(property_list)) # Test that all properties in modified instance and property list # are in the class if property_list: for p in property_list: if p not in cl_props: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Property {0!A} in PropertyList not in class " "{1!A}", p, modified_instance.classname)) for p in modified_instance: if p not in cl_props: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Property {0!A} in ModifiedInstance not in class " "{1!A}", p, modified_instance.classname)) # Set the class value for properties in the property list but not # in the modified_instance. This sets just the value component. mod_inst_props = set(modified_instance.keys()) new_props = mod_inst_props.difference(set(cl_props)) if new_props: for new_prop in new_props: modified_instance[new_prop] = \ target_class.properties[new_prop].value # Remove all properties that do not change value between original # instance and modified instance for p in list(modified_instance): if original_instance[p] == modified_instance[p]: del modified_instance[p] # Confirm no key properties in remaining modified instance for p in key_props: if p in modified_instance: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("ModifyInstance cannot modify key property {0!A}", p)) # Remove any properties from modified instance not in the property_list if property_list: for p in list(modified_instance): if p not in property_list: del modified_instance[p] # Exception if property in instance but not class or types do not # match for pname in modified_instance: if pname not in target_class.properties: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Property {0!A} specified in ModifiedInstance is " "not exposed by class {1!A} in namespace {2!A}", pname, target_class.classname, namespace)) cprop = target_class.properties[pname] iprop = modified_instance.properties[pname] if iprop.is_array != cprop.is_array \ or iprop.type != cprop.type \ or iprop.array_size != cprop.array_size: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Instance and class property name={0!A} type " "or other attributes do not match: " "instance={1!A}, class={2!A}", pname, iprop, cprop)) # Modify the value of properties in the repo with those from # modified instance index = orig_instance_tup[0] instance_repo[index].update(modified_instance.properties) return
Implements a server responder for :meth:`~pywbem.WBEMConnection.CreateInstance` Modify a CIM instance in the local repository. Raises: CIMError: CIM_ERR_ALREADY_EXISTS, CIM_ERR_INVALID_CLASS
def _get_part(pointlist, strokes): """Get some strokes of pointlist Parameters ---------- pointlist : list of lists of dicts strokes : list of integers Returns ------- list of lists of dicts """ result = [] strokes = sorted(strokes) for stroke_index in strokes: result.append(pointlist[stroke_index]) return result
Get some strokes of pointlist Parameters ---------- pointlist : list of lists of dicts strokes : list of integers Returns ------- list of lists of dicts
def quaternion_imag(quaternion): """Return imaginary part of quaternion. >>> quaternion_imag([3, 0, 1, 2]) array([0., 1., 2.]) """ return np.array(quaternion[1:4], dtype=np.float64, copy=True)
Return imaginary part of quaternion. >>> quaternion_imag([3, 0, 1, 2]) array([0., 1., 2.])
def _retrieve_remote(fnames): """Retrieve remote inputs found in the same bucket as the template or metadata files. """ for fname in fnames: if objectstore.is_remote(fname): inputs = [] regions = [] remote_base = os.path.dirname(fname) for rfname in objectstore.list(remote_base): if rfname.endswith(tuple(KNOWN_EXTS.keys())): inputs.append(rfname) elif rfname.endswith((".bed", ".bed.gz")): regions.append(rfname) return {"base": remote_base, "inputs": inputs, "region": regions[0] if len(regions) == 1 else None} return {}
Retrieve remote inputs found in the same bucket as the template or metadata files.
def inv(self): """The inverse translation""" result = Translation(-self.t) result._cache_inv = self return result
The inverse translation
def fkg_allowing_type_hints( namespace: Optional[str], fn: Callable, to_str: Callable[[Any], str] = repr) -> Callable[[Any], str]: """ Replacement for :func:`dogpile.cache.util.function_key_generator` that handles type-hinted functions like .. code-block:: python def testfunc(param: str) -> str: return param + "hello" ... at which :func:`inspect.getargspec` balks; plus :func:`inspect.getargspec` is deprecated in Python 3. Used as an argument to e.g. ``@cache_region_static.cache_on_arguments()``. Also modified to make the cached function unique per INSTANCE for normal methods of a class. Args: namespace: optional namespace, as per :func:`get_namespace` fn: function to generate a key for (usually the function being decorated) to_str: function to apply to map arguments to a string (to make a unique key for a particular call to the function); by default it is :func:`repr` Returns: a function that generates a string key, based on a given function as well as arguments to the returned function itself. """ namespace = get_namespace(fn, namespace) sig = inspect.signature(fn) argnames = [p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD] has_self = bool(argnames and argnames[0] in ('self', 'cls')) def generate_key(*args: Any, **kw: Any) -> str: """ Makes the actual key for a specific call to the decorated function, with particular ``args``/``kwargs``. """ if kw: raise ValueError("This dogpile.cache key function generator, " "fkg_allowing_type_hints, " "does not accept keyword arguments.") if has_self: # Unlike dogpile's default, make it instance- (or class-) specific # by including a representation of the "self" or "cls" argument: args = [hex(id(args[0]))] + list(args[1:]) key = namespace + "|" + " ".join(map(to_str, args)) if DEBUG_INTERNALS: log.debug("fkg_allowing_type_hints.generate_key() -> {!r}", key) return key return generate_key
Replacement for :func:`dogpile.cache.util.function_key_generator` that handles type-hinted functions like .. code-block:: python def testfunc(param: str) -> str: return param + "hello" ... at which :func:`inspect.getargspec` balks; plus :func:`inspect.getargspec` is deprecated in Python 3. Used as an argument to e.g. ``@cache_region_static.cache_on_arguments()``. Also modified to make the cached function unique per INSTANCE for normal methods of a class. Args: namespace: optional namespace, as per :func:`get_namespace` fn: function to generate a key for (usually the function being decorated) to_str: function to apply to map arguments to a string (to make a unique key for a particular call to the function); by default it is :func:`repr` Returns: a function that generates a string key, based on a given function as well as arguments to the returned function itself.
def runner(self): """ Run the necessary methods in the correct order """ logging.info('Starting {} analysis pipeline'.format(self.analysistype)) # Initialise the GenObject for sample in self.runmetadata.samples: setattr(sample, self.analysistype, GenObject()) try: sample[self.analysistype].pointfindergenus = self.pointfinder_org_dict[sample.general.referencegenus] except KeyError: sample[self.analysistype].pointfindergenus = 'ND' # Run the raw read mapping PointSipping(inputobject=self, cutoff=self.cutoff) # Create FASTA files from the raw read matcves self.fasta() # Run PointFinder on the FASTA files self.run_pointfinder() # Create summary reports of the PointFinder outputs self.parse_pointfinder()
Run the necessary methods in the correct order
def stop(self): """Mark the stop of the interval. Calling stop on an already stopped interval has no effect. An interval can only be stopped once. :returns: the duration if the interval is truely stopped otherwise ``False``. """ if self._start_instant is None: raise IntervalException("Attempt to stop an interval that has not started.") if self._stop_instant is None: self._stop_instant = instant() self._duration = int((self._stop_instant - self._start_instant) * 1000) return self._duration return False
Mark the stop of the interval. Calling stop on an already stopped interval has no effect. An interval can only be stopped once. :returns: the duration if the interval is truely stopped otherwise ``False``.
def main(**kwargs): """ Draw a couple of simple graphs and optionally generate an HTML file to upload them """ draw_lines() draw_histogram() draw_bar_chart() destination = "-r /report" if use_html: generate_html() command = "dx-build-report-html {h} {d}".format(h=html_filename, d=destination) else: command = "dx-build-report-html {l} {b} {h} {d}".format(l=lines_filename, b=bars_filename, h=histogram_filename, d=destination) sub_output = json.loads(subprocess.check_output(command, shell=True)) output = {} output["report"] = dxpy.dxlink(sub_output["recordId"]) return output
Draw a couple of simple graphs and optionally generate an HTML file to upload them
def generate_net(df,tf_idf,dump_path=None): '''Generate WordNetwork dict of Word() instance, and dump as a file if asked to. @Args: -- df : IDF value generated by find_tf_idf() tf_idf : TF-IDF value generated by find_tf_idf() dump_path : file_path where to dump network entities, standart format is '.wrnt' (default=None) @returns: -- word_net : list if Word() instances.(creating a network of words) ''' # error handling if dump_path and dump_path[-4:] != __WRNT_FORMAT: raise Exception(__WRNG_FORMAT_MSG) start_t = datetime.now() print(TAG,'Network Genertion initiated..') word_net = {} # list of word entities. #registering all word instances in a dict of network for word in df.keys(): word_net[word] = Word(word) print(TAG,'word-network instances created..',datetime.now()-start_t) start_t = datetime.now() #TODO: code for going through all the tf_idf elements and finding backward links and forward links of every word in word_net. for docs in tf_idf: for word in docs.keys(): word_net[word].addtofrwrd_links(set(docs.keys())) print(TAG, 'word filled with their relative words(network generated)... ',datetime.now()-start_t) # Dump the generated lists if dump_path is given. if dump_path: start_t = datetime.now() __words = {} __network = [] i=0 # creating word dict for refrence in next stage. for word in word_net: __words[word] = i i+=1 # creating final network list to be dumped. format=['word',1,2,3,4...(refrences from words dict)] for word in word_net: __temp_list = [word] __temp_list.extend([__words[w] for w in word_net[word].frwrd_links]) __network.append(__temp_list) del __temp_list print(TAG, 'created final relative-words list.. return ready.',datetime.now()-start_t) start_t = datetime.now() # Dumping data using pickle dump_file = open(dump_path,'wb') pickle.dump(__network,dump_file,protocol=pickle.HIGHEST_PROTOCOL) dump_file.close() print(TAG,'word network dumped @',dump_path,datetime.now()-start_t) #cleaning afterwards del __words del __network return word_net
Generate WordNetwork dict of Word() instance, and dump as a file if asked to. @Args: -- df : IDF value generated by find_tf_idf() tf_idf : TF-IDF value generated by find_tf_idf() dump_path : file_path where to dump network entities, standart format is '.wrnt' (default=None) @returns: -- word_net : list if Word() instances.(creating a network of words)
def parse_rule(tokens, variables, neighbors, parents, is_merc): """ Parse a rule set, return a list of declarations. Requires a dictionary of declared variables. Selectors in the neighbors list are simply grouped, and are generated from comma-delimited lists of selectors in the stylesheet. Selectors in the parents list should be combined with those found by this functions, and are generated from nested, Less-style rulesets. A rule set is a combination of selectors and declarations: http://www.w3.org/TR/CSS2/syndata.html#rule-sets Nesting is described in the Less CSS spec: http://lesscss.org/#-nested-rules To handle groups of selectors, use recursion: http://www.w3.org/TR/CSS2/selector.html#grouping """ # # Local helper function # def validate_selector_elements(elements, line, col): if len(elements) > 2: raise ParseException('Only two-element selectors are supported for Mapnik styles', line, col) if len(elements) == 0: raise ParseException('At least one element must be present in selectors for Mapnik styles', line, col) if elements[0].names[0] not in ('Map', 'Layer') and elements[0].names[0][0] not in ('.', '#', '*'): raise ParseException('All non-ID, non-class first elements must be "Layer" Mapnik styles', line, col) if set([name[:1] for name in elements[0].names[1:]]) - set('#.'): raise ParseException('All names after the first must be IDs or classes', line, col) if len(elements) == 2 and elements[1].countTests(): raise ParseException('Only the first element in a selector may have attributes in Mapnik styles', line, col) if len(elements) == 2 and elements[1].countIDs(): raise ParseException('Only the first element in a selector may have an ID in Mapnik styles', line, col) if len(elements) == 2 and elements[1].countClasses(): raise ParseException('Only the first element in a selector may have a class in Mapnik styles', line, col) def parse_variable_definition(tokens): """ Look for variable value tokens after an @keyword, return an array. """ while True: tname, tvalue, line, col = tokens.next() if (tname, tvalue) == ('CHAR', ':'): vtokens = [] while True: tname, tvalue, line, col = tokens.next() if (tname, tvalue) in (('CHAR', ';'), ('S', '\n')): return vtokens elif tname not in ('S', 'COMMENT'): vtokens.append((tname, tvalue, line, col)) elif tname not in ('S', 'COMMENT'): raise ParseException('Unexpected token in variable definition: "%s"' % tvalue, line, col) # # The work. # ElementClass = SelectorElement element = None elements = [] while True: tname, tvalue, line, col = tokens.next() if tname == 'ATKEYWORD': # # Likely variable definition: # http://lesscss.org/#-variables # variables[tvalue] = parse_variable_definition(tokens) elif (tname, tvalue) == ('CHAR', '&'): # # Start of a nested block with a "&" combinator # http://lesscss.org/#-nested-rules # ElementClass = ConcatenatedElement elif tname == 'S': # # Definitely no longer in a "&" combinator. # ElementClass = SelectorElement elif tname == 'IDENT': # # Identifier always starts a new element. # element = ElementClass() elements.append(element) element.addName(tvalue) elif tname == 'HASH': # # Hash is an ID selector: # http://www.w3.org/TR/CSS2/selector.html#id-selectors # if not element: element = ElementClass() elements.append(element) element.addName(tvalue) elif (tname, tvalue) == ('CHAR', '.'): while True: tname, tvalue, line, col = tokens.next() if tname == 'IDENT': # # Identifier after a period is a class selector: # http://www.w3.org/TR/CSS2/selector.html#class-html # if not element: element = ElementClass() elements.append(element) element.addName('.'+tvalue) break else: raise ParseException('Malformed class selector', line, col) elif (tname, tvalue) == ('CHAR', '*'): # # Asterisk character is a universal selector: # http://www.w3.org/TR/CSS2/selector.html#universal-selector # if not element: element = ElementClass() elements.append(element) element.addName(tvalue) elif (tname, tvalue) == ('CHAR', '['): # # Left-bracket is the start of an attribute selector: # http://www.w3.org/TR/CSS2/selector.html#attribute-selectors # if not element: element = ElementClass() elements.append(element) test = parse_attribute(tokens, is_merc) element.addTest(test) elif (tname, tvalue) == ('CHAR', ','): # # Comma delineates one of a group of selectors: # http://www.w3.org/TR/CSS2/selector.html#grouping # # Recurse here. # neighbors.append(Selector(*elements)) return parse_rule(tokens, variables, neighbors, parents, is_merc) elif (tname, tvalue) == ('CHAR', '{'): # # Left-brace is the start of a block: # http://www.w3.org/TR/CSS2/syndata.html#block # # Return a full block here. # class DummySelector: def __init__(self, *elements): self.elements = elements[:] neighbors.append(DummySelector(*elements)) selectors = [] # # Combine lists of parents and neighbors into a single list of # selectors, for passing off to parse_block(). There might not # be any parents, but there will definitely be neighbors. # for parent in (parents or [DummySelector()]): for neighbor in neighbors: if len(neighbor.elements) == 0: raise ParseException('At least one element must be present in selectors for Mapnik styles', line, col) elements = chain(parent.elements + neighbor.elements) selector = Selector(deepcopy(elements.next())) for element in elements: if element.__class__ is ConcatenatedElement: for name in element.names: selector.elements[-1].addName(deepcopy(name)) for test in element.tests: selector.elements[-1].addTest(deepcopy(test)) else: selector.addElement(deepcopy(element)) # selector should be fully valid at this point. validate_selector_elements(selector.elements, line, col) selector.convertZoomTests(is_merc) selectors.append(selector) return parse_block(tokens, variables, selectors, is_merc) elif tname not in ('S', 'COMMENT'): raise ParseException('Unexpected token in selector: "%s"' % tvalue, line, col)
Parse a rule set, return a list of declarations. Requires a dictionary of declared variables. Selectors in the neighbors list are simply grouped, and are generated from comma-delimited lists of selectors in the stylesheet. Selectors in the parents list should be combined with those found by this functions, and are generated from nested, Less-style rulesets. A rule set is a combination of selectors and declarations: http://www.w3.org/TR/CSS2/syndata.html#rule-sets Nesting is described in the Less CSS spec: http://lesscss.org/#-nested-rules To handle groups of selectors, use recursion: http://www.w3.org/TR/CSS2/selector.html#grouping
def _Tension(T): """Equation for the surface tension Parameters ---------- T : float Temperature, [K] Returns ------- σ : float Surface tension, [N/m] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * 248.15 ≤ T ≤ 647 * Estrapolate to -25ºC in supercooled liquid metastable state Examples -------- >>> _Tension(300) 0.0716859625 >>> _Tension(450) 0.0428914992 References ---------- IAPWS, Revised Release on Surface Tension of Ordinary Water Substance June 2014, http://www.iapws.org/relguide/Surf-H2O.html """ if 248.15 <= T <= Tc: Tr = T/Tc return 1e-3*(235.8*(1-Tr)**1.256*(1-0.625*(1-Tr))) else: raise NotImplementedError("Incoming out of bound")
Equation for the surface tension Parameters ---------- T : float Temperature, [K] Returns ------- σ : float Surface tension, [N/m] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * 248.15 ≤ T ≤ 647 * Estrapolate to -25ºC in supercooled liquid metastable state Examples -------- >>> _Tension(300) 0.0716859625 >>> _Tension(450) 0.0428914992 References ---------- IAPWS, Revised Release on Surface Tension of Ordinary Water Substance June 2014, http://www.iapws.org/relguide/Surf-H2O.html
def touch_tip(self, location=None, radius=1.0, v_offset=-1.0, speed=60.0): """ Touch the :any:`Pipette` tip to the sides of a well, with the intent of removing left-over droplets Notes ----- If no `location` is passed, the pipette will touch_tip from it's current position. Parameters ---------- location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`) The :any:`Placeable` (:any:`Well`) to perform the touch_tip. Can also be a tuple with first item :any:`Placeable`, second item relative :any:`Vector` radius : float Radius is a floating point describing the percentage of a well's radius. When radius=1.0, :any:`touch_tip()` will move to 100% of the wells radius. When radius=0.5, :any:`touch_tip()` will move to 50% of the wells radius. Default: 1.0 (100%) speed: float The speed for touch tip motion, in mm/s. Default: 60.0 mm/s, Max: 80.0 mm/s, Min: 20.0 mm/s v_offset: float The offset in mm from the top of the well to touch tip. Default: -1.0 mm Returns ------- This instance of :class:`Pipette`. Examples -------- .. >>> from opentrons import instruments, labware, robot # doctest: +SKIP >>> robot.reset() # doctest: +SKIP >>> plate = labware.load('96-flat', '8') # doctest: +SKIP >>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP >>> p300.aspirate(50, plate[0]) # doctest: +SKIP >>> p300.dispense(plate[1]).touch_tip() # doctest: +SKIP """ if not self.tip_attached: log.warning("Cannot touch tip without a tip attached.") if speed > 80.0: log.warning("Touch tip speeds greater than 80mm/s not allowed") speed = 80.0 if speed < 20.0: log.warning("Touch tip speeds greater than 80mm/s not allowed") speed = 20.0 if helpers.is_number(location): # Deprecated syntax log.warning("Please use the `v_offset` named parameter") v_offset = location location = None # if no location specified, use the previously # associated placeable to get Well dimensions if location is None: location = self.previous_placeable do_publish(self.broker, commands.touch_tip, self.touch_tip, 'before', None, None, self, location, radius, v_offset, speed) # move to location if we're not already there if location != self.previous_placeable: self.move_to(location) v_offset = (0, 0, v_offset) well_edges = [ location.from_center(x=radius, y=0, z=1), # right edge location.from_center(x=radius * -1, y=0, z=1), # left edge location.from_center(x=0, y=radius, z=1), # back edge location.from_center(x=0, y=radius * -1, z=1) # front edge ] # Apply vertical offset to well edges well_edges = map(lambda x: x + v_offset, well_edges) self.robot.gantry.push_speed() self.robot.gantry.set_speed(speed) [self.move_to((location, e), strategy='direct') for e in well_edges] self.robot.gantry.pop_speed() do_publish(self.broker, commands.touch_tip, self.touch_tip, 'after', self, None, self, location, radius, v_offset, speed) return self
Touch the :any:`Pipette` tip to the sides of a well, with the intent of removing left-over droplets Notes ----- If no `location` is passed, the pipette will touch_tip from it's current position. Parameters ---------- location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`) The :any:`Placeable` (:any:`Well`) to perform the touch_tip. Can also be a tuple with first item :any:`Placeable`, second item relative :any:`Vector` radius : float Radius is a floating point describing the percentage of a well's radius. When radius=1.0, :any:`touch_tip()` will move to 100% of the wells radius. When radius=0.5, :any:`touch_tip()` will move to 50% of the wells radius. Default: 1.0 (100%) speed: float The speed for touch tip motion, in mm/s. Default: 60.0 mm/s, Max: 80.0 mm/s, Min: 20.0 mm/s v_offset: float The offset in mm from the top of the well to touch tip. Default: -1.0 mm Returns ------- This instance of :class:`Pipette`. Examples -------- .. >>> from opentrons import instruments, labware, robot # doctest: +SKIP >>> robot.reset() # doctest: +SKIP >>> plate = labware.load('96-flat', '8') # doctest: +SKIP >>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP >>> p300.aspirate(50, plate[0]) # doctest: +SKIP >>> p300.dispense(plate[1]).touch_tip() # doctest: +SKIP
def run(self): """Run the App main logic. This method should contain the core logic of the App. """ # read inputs indent = int(self.tcex.playbook.read(self.args.indent)) json_data = self.tcex.playbook.read(self.args.json_data) # get the playbook variable type json_data_type = self.tcex.playbook.variable_type(self.args.json_data) # convert string input to dict if json_data_type in ['String']: json_data = json.loads(json_data) # generate the new "pretty" json (this will be used as an option variable) try: self.pretty_json = json.dumps(json_data, indent=indent, sort_keys=self.args.sort_keys) except Exception: self.tcex.exit(1, 'Failed parsing JSON data.') # set the App exit message self.exit_message = 'JSON prettified.'
Run the App main logic. This method should contain the core logic of the App.
def create(self, filename, filedata): """Create a file from raw data""" attributes = {'filename': filename, 'source': filedata} return self.transport.POST(url='/file/v2/', body=attributes, type='multipart/form-data')
Create a file from raw data
def get_assessment(self, assessment): """ To get Assessment by id """ response = self.http.get('/Assessment/' + str(assessment)) assessment = Schemas.Assessment(assessment=response) return assessment
To get Assessment by id
def parse_alarm(self, global_params, region, alarm): """ Parse a single CloudWatch trail :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param alarm: Alarm """ alarm['arn'] = alarm.pop('AlarmArn') alarm['name'] = alarm.pop('AlarmName') # Drop some data for k in ['AlarmConfigurationUpdatedTimestamp', 'StateReason', 'StateReasonData', 'StateUpdatedTimestamp']: foo = alarm.pop(k) if k in alarm else None alarm_id = self.get_non_aws_id(alarm['arn']) self.alarms[alarm_id] = alarm
Parse a single CloudWatch trail :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param alarm: Alarm
def modify_target_group(TargetGroupArn=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None): """ Modifies the health checks used when evaluating the health state of the targets in the specified target group. To monitor the health of the targets, use DescribeTargetHealth . See also: AWS API Documentation Examples This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group. Expected Output: :example: response = client.modify_target_group( TargetGroupArn='string', HealthCheckProtocol='HTTP'|'HTTPS', HealthCheckPort='string', HealthCheckPath='string', HealthCheckIntervalSeconds=123, HealthCheckTimeoutSeconds=123, HealthyThresholdCount=123, UnhealthyThresholdCount=123, Matcher={ 'HttpCode': 'string' } ) :type TargetGroupArn: string :param TargetGroupArn: [REQUIRED] The Amazon Resource Name (ARN) of the target group. :type HealthCheckProtocol: string :param HealthCheckProtocol: The protocol to use to connect with the target. :type HealthCheckPort: string :param HealthCheckPort: The port to use to connect with the target. :type HealthCheckPath: string :param HealthCheckPath: The ping path that is the destination for the health check request. :type HealthCheckIntervalSeconds: integer :param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target. :type HealthCheckTimeoutSeconds: integer :param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response means a failed health check. :type HealthyThresholdCount: integer :param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy. :type UnhealthyThresholdCount: integer :param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy. :type Matcher: dict :param Matcher: The HTTP codes to use when checking for a successful response from a target. HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299'). :rtype: dict :return: { 'TargetGroups': [ { 'TargetGroupArn': 'string', 'TargetGroupName': 'string', 'Protocol': 'HTTP'|'HTTPS', 'Port': 123, 'VpcId': 'string', 'HealthCheckProtocol': 'HTTP'|'HTTPS', 'HealthCheckPort': 'string', 'HealthCheckIntervalSeconds': 123, 'HealthCheckTimeoutSeconds': 123, 'HealthyThresholdCount': 123, 'UnhealthyThresholdCount': 123, 'HealthCheckPath': 'string', 'Matcher': { 'HttpCode': 'string' }, 'LoadBalancerArns': [ 'string', ] }, ] } :returns: (string) -- """ pass
Modifies the health checks used when evaluating the health state of the targets in the specified target group. To monitor the health of the targets, use DescribeTargetHealth . See also: AWS API Documentation Examples This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group. Expected Output: :example: response = client.modify_target_group( TargetGroupArn='string', HealthCheckProtocol='HTTP'|'HTTPS', HealthCheckPort='string', HealthCheckPath='string', HealthCheckIntervalSeconds=123, HealthCheckTimeoutSeconds=123, HealthyThresholdCount=123, UnhealthyThresholdCount=123, Matcher={ 'HttpCode': 'string' } ) :type TargetGroupArn: string :param TargetGroupArn: [REQUIRED] The Amazon Resource Name (ARN) of the target group. :type HealthCheckProtocol: string :param HealthCheckProtocol: The protocol to use to connect with the target. :type HealthCheckPort: string :param HealthCheckPort: The port to use to connect with the target. :type HealthCheckPath: string :param HealthCheckPath: The ping path that is the destination for the health check request. :type HealthCheckIntervalSeconds: integer :param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target. :type HealthCheckTimeoutSeconds: integer :param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response means a failed health check. :type HealthyThresholdCount: integer :param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy. :type UnhealthyThresholdCount: integer :param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy. :type Matcher: dict :param Matcher: The HTTP codes to use when checking for a successful response from a target. HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299'). :rtype: dict :return: { 'TargetGroups': [ { 'TargetGroupArn': 'string', 'TargetGroupName': 'string', 'Protocol': 'HTTP'|'HTTPS', 'Port': 123, 'VpcId': 'string', 'HealthCheckProtocol': 'HTTP'|'HTTPS', 'HealthCheckPort': 'string', 'HealthCheckIntervalSeconds': 123, 'HealthCheckTimeoutSeconds': 123, 'HealthyThresholdCount': 123, 'UnhealthyThresholdCount': 123, 'HealthCheckPath': 'string', 'Matcher': { 'HttpCode': 'string' }, 'LoadBalancerArns': [ 'string', ] }, ] } :returns: (string) --
def get_sampletype_data(self): """Returns a list of SampleType data """ for obj in self.get_sampletypes(): info = self.get_base_info(obj) yield info
Returns a list of SampleType data
def getCPUuse(self): """Return cpu time utilization in seconds. @return: Dictionary of stats. """ hz = os.sysconf('SC_CLK_TCK') info_dict = {} try: fp = open(cpustatFile, 'r') line = fp.readline() fp.close() except: raise IOError('Failed reading stats from file: %s' % cpustatFile) headers = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'steal', 'guest'] arr = line.split() if len(arr) > 1 and arr[0] == 'cpu': return dict(zip(headers[0:len(arr)], [(float(t) / hz) for t in arr[1:]])) return info_dict
Return cpu time utilization in seconds. @return: Dictionary of stats.
def dict_partial_cmp(target_dict, dict_list, ducktype): """ Whether partial dict are in dict_list or not """ for called_dict in dict_list: # ignore invalid test case if len(target_dict) > len(called_dict): continue # get the intersection of two dicts intersection = {} for item in target_dict: dtype = ducktype(target_dict[item]) if hasattr(dtype, "mtest"): if item in called_dict and dtype.mtest(called_dict[item]): intersection[item] = target_dict[item] else: if item in called_dict and dtype == called_dict[item]: intersection[item] = target_dict[item] if intersection == target_dict: return True # if no any arguments matched to called_args, return False return False
Whether partial dict are in dict_list or not
def hstack(tup): """ Horizontally stack a sequence of value bounds pairs. Parameters ---------- tup: sequence a sequence of value, ``Bound`` pairs Returns ------- value: ndarray a horizontally concatenated array1d bounds: a list of Bounds """ vals, bounds = zip(*tup) stackvalue = np.hstack(vals) stackbounds = list(chain(*bounds)) return stackvalue, stackbounds
Horizontally stack a sequence of value bounds pairs. Parameters ---------- tup: sequence a sequence of value, ``Bound`` pairs Returns ------- value: ndarray a horizontally concatenated array1d bounds: a list of Bounds
def show_xyzs(self, xs, ys, zs, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs): "Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`." title = 'Input / Prediction / Target' axs = subplots(len(xs), 3, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=14) for i,(x,y,z) in enumerate(zip(xs,ys,zs)): x.show(ax=axs[i,0], **kwargs) y.show(ax=axs[i,2], **kwargs) z.show(ax=axs[i,1], **kwargs)
Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`.
def find_locales(self) -> Dict[str, gettext.GNUTranslations]: """ Load all compiled locales from path :return: dict with locales """ translations = {} for name in os.listdir(self.path): if not os.path.isdir(os.path.join(self.path, name)): continue mo_path = os.path.join(self.path, name, 'LC_MESSAGES', self.domain + '.mo') if os.path.exists(mo_path): with open(mo_path, 'rb') as fp: translations[name] = gettext.GNUTranslations(fp) elif os.path.exists(mo_path[:-2] + 'po'): raise RuntimeError(f"Found locale '{name} but this language is not compiled!") return translations
Load all compiled locales from path :return: dict with locales
def makedirs(directory): """ Resursively create a named directory. """ parent = os.path.dirname(os.path.abspath(directory)) if not os.path.exists(parent): makedirs(parent) os.mkdir(directory)
Resursively create a named directory.
def init_model_based_tags(self, model): """Initializing the model based memory and NIC information tags. It should be called just after instantiating a RIBCL object. ribcl = ribcl.RIBCLOperations(host, login, password, timeout, port, cacert=cacert) model = ribcl.get_product_name() ribcl.init_model_based_tags(model) Again, model attribute is also set here on the RIBCL object. :param model: the model string """ self.model = model if 'G7' in self.model: self.MEMORY_SIZE_TAG = "MEMORY_SIZE" self.MEMORY_SIZE_NOT_PRESENT_TAG = "Not Installed" self.NIC_INFORMATION_TAG = "NIC_INFOMATION" else: self.MEMORY_SIZE_TAG = "TOTAL_MEMORY_SIZE" self.MEMORY_SIZE_NOT_PRESENT_TAG = "N/A" self.NIC_INFORMATION_TAG = "NIC_INFORMATION"
Initializing the model based memory and NIC information tags. It should be called just after instantiating a RIBCL object. ribcl = ribcl.RIBCLOperations(host, login, password, timeout, port, cacert=cacert) model = ribcl.get_product_name() ribcl.init_model_based_tags(model) Again, model attribute is also set here on the RIBCL object. :param model: the model string
def _replace_constant_methods(self): """Replaces conventional distribution methods by its constant counterparts.""" self.cumulative_distribution = self._constant_cumulative_distribution self.percent_point = self._constant_percent_point self.probability_density = self._constant_probability_density self.sample = self._constant_sample
Replaces conventional distribution methods by its constant counterparts.
async def deploy( self, *, user_data: typing.Union[bytes, str] = None, distro_series: str = None, hwe_kernel: str = None, comment: str = None, wait: bool = False, wait_interval: int = 5): """Deploy this machine. :param user_data: User-data to provide to the machine when booting. If provided as a byte string, it will be base-64 encoded prior to transmission. If provided as a Unicode string it will be assumed to be already base-64 encoded. :param distro_series: The OS to deploy. :param hwe_kernel: The HWE kernel to deploy. Probably only relevant when deploying Ubuntu. :param comment: A comment for the event log. :param wait: If specified, wait until the deploy is complete. :param wait_interval: How often to poll, defaults to 5 seconds """ params = {"system_id": self.system_id} if user_data is not None: if isinstance(user_data, bytes): params["user_data"] = base64.encodebytes(user_data) else: # Already base-64 encoded. Convert to a byte string in # preparation for multipart assembly. params["user_data"] = user_data.encode("ascii") if distro_series is not None: params["distro_series"] = distro_series if hwe_kernel is not None: params["hwe_kernel"] = hwe_kernel if comment is not None: params["comment"] = comment self._data = await self._handler.deploy(**params) if not wait: return self else: # Wait for the machine to be fully deployed while self.status == NodeStatus.DEPLOYING: await asyncio.sleep(wait_interval) self._data = await self._handler.read(system_id=self.system_id) if self.status == NodeStatus.FAILED_DEPLOYMENT: msg = "{hostname} failed to deploy.".format( hostname=self.hostname ) raise FailedDeployment(msg, self) return self
Deploy this machine. :param user_data: User-data to provide to the machine when booting. If provided as a byte string, it will be base-64 encoded prior to transmission. If provided as a Unicode string it will be assumed to be already base-64 encoded. :param distro_series: The OS to deploy. :param hwe_kernel: The HWE kernel to deploy. Probably only relevant when deploying Ubuntu. :param comment: A comment for the event log. :param wait: If specified, wait until the deploy is complete. :param wait_interval: How often to poll, defaults to 5 seconds
def view(self, **kwds): """ Endpoint: /action/<id>/view.json Requests the full contents of the action. Updates the action object's fields with the response. """ result = self._client.action.view(self, **kwds) self._replace_fields(result.get_fields()) self._update_fields_with_objects()
Endpoint: /action/<id>/view.json Requests the full contents of the action. Updates the action object's fields with the response.
def center(self, axis=1): """ Subtract the mean either within or across records. Parameters ---------- axis : int, optional, default = 1 Which axis to center along, within (1) or across (0) records. """ if axis == 1: return self.map(lambda x: x - mean(x)) elif axis == 0: meanval = self.mean().toarray() return self.map(lambda x: x - meanval) else: raise Exception('Axis must be 0 or 1')
Subtract the mean either within or across records. Parameters ---------- axis : int, optional, default = 1 Which axis to center along, within (1) or across (0) records.
def image(self): """An Image like array of ``self.data`` convenient for image processing tasks * 2D array for single band, grayscale image data * 3D array for three band, RGB image data Enables working with ``self.data`` as if it were a PIL image. See https://planetaryimage.readthedocs.io/en/latest/usage.html to see how to open images to view them and make manipulations. """ if self.bands == 1: return self.data.squeeze() elif self.bands == 3: return numpy.dstack(self.data)
An Image like array of ``self.data`` convenient for image processing tasks * 2D array for single band, grayscale image data * 3D array for three band, RGB image data Enables working with ``self.data`` as if it were a PIL image. See https://planetaryimage.readthedocs.io/en/latest/usage.html to see how to open images to view them and make manipulations.
def createPolygon(self, points, strokewidth=1, stroke='black', fill='none'): """ Creates a Polygon @type points: string in the form "x1,y1 x2,y2 x3,y3" @param points: all points relevant to the polygon @type strokewidth: string or int @param strokewidth: width of the pen used to draw @type stroke: string (either css constants like "black" or numerical values like "#FFFFFF") @param stroke: color with which to draw the outer limits @type fill: string (either css constants like "black" or numerical values like "#FFFFFF") @param fill: color with which to fill the element (default: no filling) @return: a polygon object """ style_dict = {'fill':fill, 'stroke-width':strokewidth, 'stroke':stroke} myStyle = StyleBuilder(style_dict) p = Polygon(points=points) p.set_style(myStyle.getStyle()) return p
Creates a Polygon @type points: string in the form "x1,y1 x2,y2 x3,y3" @param points: all points relevant to the polygon @type strokewidth: string or int @param strokewidth: width of the pen used to draw @type stroke: string (either css constants like "black" or numerical values like "#FFFFFF") @param stroke: color with which to draw the outer limits @type fill: string (either css constants like "black" or numerical values like "#FFFFFF") @param fill: color with which to fill the element (default: no filling) @return: a polygon object
def soldOutForRole(self,role,includeTemporaryRegs=False): ''' Accepts a DanceRole object and responds if the number of registrations for that role exceeds the capacity for that role at this event. ''' return self.numRegisteredForRole( role,includeTemporaryRegs=includeTemporaryRegs) >= (self.capacityForRole(role) or 0)
Accepts a DanceRole object and responds if the number of registrations for that role exceeds the capacity for that role at this event.
def add_ring(self, ring): """Adds a ring to _rings if not already existing""" if ring not in self._rings and isinstance(ring, RingDing0): self._rings.append(ring)
Adds a ring to _rings if not already existing