code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def log(*args, **kwargs): """Log things with the global logger.""" level = kwargs.pop('level', logging.INFO) logger.log(level, *args, **kwargs)
Log things with the global logger.
def _relative_score(self, start_eot, end_eot, active, passive): """Return the balance of perception between the two nodes. A positive score indicates the result is relatively better for active. """ active_start = self._score_eot_for_actor(start_eot, active) passive_start = self._score_eot_for_actor(start_eot, passive) active_end = self._score_eot_for_actor(end_eot, active) passive_end = self._score_eot_for_actor(end_eot, passive) return (active_end - passive_end) - (active_start - passive_start)
Return the balance of perception between the two nodes. A positive score indicates the result is relatively better for active.
def default_arguments(self): """ :rtype dict :rtype dict """ d = OrderedDict() for arg in self._default_args: d.update({arg.name: arg}) return d
:rtype dict :rtype dict
def fetch_bug_details(self, bug_ids): """Fetches bug metadata from bugzilla and returns an encoded dict if successful, otherwise returns None.""" params = {'include_fields': 'product, component, priority, whiteboard, id'} params['id'] = bug_ids try: response = self.session.get(settings.BZ_API_URL + '/rest/bug', headers=self.session.headers, params=params, timeout=30) response.raise_for_status() except RequestException as e: logger.warning('error fetching bugzilla metadata for bugs due to {}'.format(e)) return None if response.headers['Content-Type'] == 'text/html; charset=UTF-8': return None data = response.json() if 'bugs' not in data: return None return data['bugs']
Fetches bug metadata from bugzilla and returns an encoded dict if successful, otherwise returns None.
def _init_metadata(self): """stub""" QuestionTextFormRecord._init_metadata(self) QuestionFilesFormRecord._init_metadata(self) super(QuestionTextAndFilesMixin, self)._init_metadata()
stub
def gc_velocity_update(particle, social, state): """ Guaranteed convergence velocity update. Args: particle: cipy.algorithms.pso.Particle: Particle to update the velocity for. social: cipy.algorithms.pso.Particle: The social best for the particle. state: cipy.algorithms.pso.State: The state of the PSO algorithm. Returns: numpy.ndarray: the calculated velocity. """ gbest = state.swarm[gbest_idx(state.swarm)].position if not np.array_equal(gbest, particle.position): return std_velocity(particle, social, state) rho = state.params['rho'] inertia = state.params['inertia'] v_max = state.params['v_max'] size = particle.position.size r2 = state.rng.uniform(0.0, 1.0, size) velocity = __gc_velocity_equation__(inertia, rho, r2, particle, gbest) return __clamp__(velocity, v_max)
Guaranteed convergence velocity update. Args: particle: cipy.algorithms.pso.Particle: Particle to update the velocity for. social: cipy.algorithms.pso.Particle: The social best for the particle. state: cipy.algorithms.pso.State: The state of the PSO algorithm. Returns: numpy.ndarray: the calculated velocity.
def parse_cache_control(self, headers): """ Parse the cache control headers returning a dictionary with values for the different directives. """ retval = {} cc_header = 'cache-control' if 'Cache-Control' in headers: cc_header = 'Cache-Control' if cc_header in headers: parts = headers[cc_header].split(',') parts_with_args = [ tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=") ] parts_wo_args = [ (name.strip().lower(), 1) for name in parts if -1 == name.find("=") ] retval = dict(parts_with_args + parts_wo_args) return retval
Parse the cache control headers returning a dictionary with values for the different directives.
def new(localfile, jottapath, JFS): """Upload a new file from local disk (doesn't exist on JottaCloud). Returns JottaFile object""" with open(localfile) as lf: _new = JFS.up(jottapath, lf) return _new
Upload a new file from local disk (doesn't exist on JottaCloud). Returns JottaFile object
def generate_secret(length=30): """ Generate an ASCII secret using random.SysRandom Based on oauthlib's common.generate_token function """ rand = random.SystemRandom() ascii_characters = string.ascii_letters + string.digits return ''.join(rand.choice(ascii_characters) for _ in range(length))
Generate an ASCII secret using random.SysRandom Based on oauthlib's common.generate_token function
def _check_jwt_claims(jwt_claims): """Checks whether the JWT claims should be accepted. Specifically, this method checks the "exp" claim and the "nbf" claim (if present), and raises UnauthenticatedException if 1) the current time is before the time identified by the "nbf" claim, or 2) the current time is equal to or after the time identified by the "exp" claim. Args: jwt_claims: the JWT claims whose expiratio to be checked. Raises: UnauthenticatedException: When the "exp" claim is malformed or the JWT has already expired. """ current_time = time.time() expiration = jwt_claims[u"exp"] if not isinstance(expiration, INT_TYPES): raise suppliers.UnauthenticatedException(u'Malformed claim: "exp" must be an integer') if current_time >= expiration: raise suppliers.UnauthenticatedException(u"The auth token has already expired") if u"nbf" not in jwt_claims: return not_before_time = jwt_claims[u"nbf"] if not isinstance(not_before_time, INT_TYPES): raise suppliers.UnauthenticatedException(u'Malformed claim: "nbf" must be an integer') if current_time < not_before_time: raise suppliers.UnauthenticatedException(u'Current time is less than the "nbf" time')
Checks whether the JWT claims should be accepted. Specifically, this method checks the "exp" claim and the "nbf" claim (if present), and raises UnauthenticatedException if 1) the current time is before the time identified by the "nbf" claim, or 2) the current time is equal to or after the time identified by the "exp" claim. Args: jwt_claims: the JWT claims whose expiratio to be checked. Raises: UnauthenticatedException: When the "exp" claim is malformed or the JWT has already expired.
def clean_ret_type(ret_type): """Clean the erraneous parsed return type.""" ret_type = get_printable(ret_type).strip() if ret_type == 'LRESULT LRESULT': ret_type = 'LRESULT' for bad in [ 'DECLSPEC_NORETURN', 'NTSYSCALLAPI', '__kernel_entry', '__analysis_noreturn', '_Post_equals_last_error_', '_Maybe_raises_SEH_exception_', '_CRT_STDIO_INLINE', '_ACRTIMP' ]: if bad in ret_type: ret_type = ret_type.replace(bad, '').strip() logging.debug(_('cleaned %s'), bad) return ret_type
Clean the erraneous parsed return type.
def digest(instr, checksum='md5'): ''' Return a checksum digest for a string instr A string checksum : ``md5`` The hashing algorithm to use to generate checksums. Valid options: md5, sha256, sha512. CLI Example: .. code-block:: bash salt '*' hashutil.digest 'get salted' ''' hashing_funcs = { 'md5': __salt__['hashutil.md5_digest'], 'sha256': __salt__['hashutil.sha256_digest'], 'sha512': __salt__['hashutil.sha512_digest'], } hash_func = hashing_funcs.get(checksum) if hash_func is None: raise salt.exceptions.CommandExecutionError( "Hash func '{0}' is not supported.".format(checksum)) return hash_func(instr)
Return a checksum digest for a string instr A string checksum : ``md5`` The hashing algorithm to use to generate checksums. Valid options: md5, sha256, sha512. CLI Example: .. code-block:: bash salt '*' hashutil.digest 'get salted'
def render_to_response(self, context, indent=None): "Returns a JSON response containing 'context' as payload" return self.get_json_response(self.convert_context_to_json(context, indent=indent))
Returns a JSON response containing 'context' as payload
def deleteAllNetworkViews(self, networkId, verbose=None): """ Deletes all Network Views available in the Network specified by the `networkId` parameter. Cytoscape can have multiple views per network model, but this feature is not exposed in the Cytoscape GUI. GUI access is limited to the first available view only. :param networkId: SUID of the Network :param verbose: print more :returns: default: successful operation """ response=api(url=self.___url+'networks/'+str(networkId)+'/views', method="DELETE", verbose=verbose) return response
Deletes all Network Views available in the Network specified by the `networkId` parameter. Cytoscape can have multiple views per network model, but this feature is not exposed in the Cytoscape GUI. GUI access is limited to the first available view only. :param networkId: SUID of the Network :param verbose: print more :returns: default: successful operation
def as_indexable(array): """ This function always returns a ExplicitlyIndexed subclass, so that the vectorized indexing is always possible with the returned object. """ if isinstance(array, ExplicitlyIndexed): return array if isinstance(array, np.ndarray): return NumpyIndexingAdapter(array) if isinstance(array, pd.Index): return PandasIndexAdapter(array) if isinstance(array, dask_array_type): return DaskIndexingAdapter(array) raise TypeError('Invalid array type: {}'.format(type(array)))
This function always returns a ExplicitlyIndexed subclass, so that the vectorized indexing is always possible with the returned object.
def remove_line(self, section, line): """Remove all instances of a line. Returns: int: the number of lines removed """ try: s = self._get_section(section, create=False) except KeyError: # No such section, skip. return 0 return s.remove(line)
Remove all instances of a line. Returns: int: the number of lines removed
def vqa_attention_base(): """VQA attention baseline hparams.""" hparams = common_hparams.basic_params1() hparams.batch_size = 128 hparams.use_fixed_batch_size = True, hparams.optimizer = "adam" hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.999 hparams.optimizer_adam_epsilon = 1e-8 hparams.weight_decay = 0. hparams.clip_grad_norm = 0. hparams.initializer = "xavier" hparams.learning_rate = 0.5 hparams.learning_rate_schedule = "legacy" hparams.learning_rate_warmup_steps = 0 hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate_decay_rate = 0.5 hparams.learning_rate_decay_steps = 50000 hparams.dropout = 0.5 hparams.summarize_grads = True hparams.summarize_vars = True # not used hparams hparams.label_smoothing = 0. hparams.multiply_embedding_mode = "" # add new hparams # preprocess hparams.add_hparam("resize_side", 512) hparams.add_hparam("height", 448) hparams.add_hparam("width", 448) hparams.add_hparam("distort", True) hparams.add_hparam("train_resnet", False) hparams.add_hparam("rnn_type", "lstm") hparams.add_hparam("num_rnn_layers", 1) hparams.add_hparam("max_question_length", 15) # lstm hidden size hparams.hidden_size = 512 hparams.add_hparam("attn_dim", 512) hparams.add_hparam("num_glimps", 2) hparams.add_hparam("num_mlp_layers", 1) hparams.add_hparam("mlp_dim", 1024) hparams.add_hparam("image_input_type", "image") hparams.add_hparam("image_model_fn", "resnet_v1_152") hparams.add_hparam("image_feat_size", 0) # self attention parts hparams.norm_type = "layer" hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.layer_prepostprocess_dropout = 0.3 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 hparams.image_hidden_size = 2048 hparams.add_hparam("num_encoder_layers", 1) # Attention-related flags. hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("image_filter_size", 1024) hparams.add_hparam("self_attention_type", "dot_product") hparams.add_hparam("scale_dotproduct", True) return hparams
VQA attention baseline hparams.
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_tx_accepts(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_tx_accepts = ET.SubElement(fcoe_intf_list, "fcoe-intf-tx-accepts") fcoe_intf_tx_accepts.text = kwargs.pop('fcoe_intf_tx_accepts') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def serialize_with_sampled_logs(self, logs_limit=-1): """serialize a result with up to `logs_limit` logs. If `logs_limit` is -1, this function will return a result with all its logs. """ return { 'id': self.id, 'pathName': self.path_name, 'name': self.name, 'isUnregistered': self.is_unregistered, 'logs': [log.serialize for log in self.sampled_logs(logs_limit)], 'args': self.args.serialize if self.args is not None else [], 'commands': [cmd.serialize for cmd in self.commands], 'snapshots': [cmd.serialize for cmd in self.snapshots], 'logModifiedAt': self.log_modified_at.isoformat() }
serialize a result with up to `logs_limit` logs. If `logs_limit` is -1, this function will return a result with all its logs.
def parse(): """ Parse command line options """ parser = argparse.ArgumentParser( description='Dynamic DynamoDB - Auto provisioning AWS DynamoDB') parser.add_argument( '-c', '--config', help='Read configuration from a configuration file') parser.add_argument( '--dry-run', action='store_true', help='Run without making any changes to your DynamoDB table') parser.add_argument( '--run-once', action='store_true', help='Run once and then exit Dynamic DynamoDB, instead of looping') parser.add_argument( '--show-config', action='store_true', help='Parse config files, print parsed data and then exit Dynamic DynamoDB') parser.add_argument( '--check-interval', type=int, help="""How many seconds should we wait between the checks (default: 300)""") parser.add_argument( '--log-file', help='Send output to the given log file') parser.add_argument( '--log-level', choices=['debug', 'info', 'warning', 'error'], help='Log level to use (default: info)') parser.add_argument( '--log-config-file', help=( 'Use a custom Python logging configuration file. Overrides both ' '--log-level and --log-file.' )) parser.add_argument( '--version', action='store_true', help='Print current version number') parser.add_argument( '--aws-access-key-id', help="Override Boto configuration with the following AWS access key") parser.add_argument( '--aws-secret-access-key', help="Override Boto configuration with the following AWS secret key") daemon_ag = parser.add_argument_group('Daemon options') daemon_ag.add_argument( '--daemon', help=( 'Run Dynamic DynamoDB in daemon mode. Valid modes are ' '[start|stop|restart|foreground]')) daemon_ag.add_argument( '--instance', default='default', help=( 'Name of the Dynamic DynamoDB instance. ' 'Used to run multiple instances of Dynamic DynamoDB. ' 'Give each instance a unique name and control them separately ' 'with the --daemon flag. (default: default)')) daemon_ag.add_argument( '--pid-file-dir', default='/tmp', help='Directory where pid file is located in. Defaults to /tmp') dynamodb_ag = parser.add_argument_group('DynamoDB options') dynamodb_ag.add_argument( '-r', '--region', help='AWS region to operate in (default: us-east-1') dynamodb_ag.add_argument( '-t', '--table-name', help=( 'Table(s) to target. ' 'The name is treated as a regular expression. ' 'E.g. "^my_table.*$" or "my_table"')) r_scaling_ag = parser.add_argument_group('Read units scaling properties') r_scaling_ag.add_argument( '--reads-upper-threshold', type=int, help="""Scale up the reads with --increase-reads-with if the currently consumed read units reaches this many percent (default: 90)""") r_scaling_ag.add_argument( '--throttled-reads-upper-threshold', type=int, help="""Scale up the reads with --increase-reads-with if the count of throttled read events exceeds this count (default: 0)""") r_scaling_ag.add_argument( '--reads-lower-threshold', type=int, help="""Scale down the reads with --decrease-reads-with if the currently consumed read units is as low as this percentage (default: 30)""") r_scaling_ag.add_argument( '--increase-reads-with', type=int, help="""How much should we increase the read units with? (default: 50, max: 100 if --increase-reads-unit = percent)""") r_scaling_ag.add_argument( '--decrease-reads-with', type=int, help="""How much should we decrease the read units with? (default: 50)""") r_scaling_ag.add_argument( '--increase-reads-unit', type=str, help='Do you want to scale in percent or units? (default: percent)') r_scaling_ag.add_argument( '--decrease-reads-unit', type=str, help='Do you want to scale in percent or units? (default: percent)') r_scaling_ag.add_argument( '--min-provisioned-reads', type=int, help="""Minimum number of provisioned reads""") r_scaling_ag.add_argument( '--max-provisioned-reads', type=int, help="""Maximum number of provisioned reads""") r_scaling_ag.add_argument( '--num-read-checks-before-scale-down', type=int, help="""Number of consecutive checks that must meet criteria before a scale down event occurs""") r_scaling_ag.add_argument( '--num-read-checks-reset-percent', type=int, help="""Percentage Value that will cause the num_read_checks_before scale_down var to reset back to 0""") w_scaling_ag = parser.add_argument_group('Write units scaling properties') w_scaling_ag.add_argument( '--writes-upper-threshold', type=int, help="""Scale up the writes with --increase-writes-with if the currently consumed write units reaches this many percent (default: 90)""") w_scaling_ag.add_argument( '--throttled-writes-upper-threshold', type=int, help="""Scale up the reads with --increase-writes-with if the count of throttled write events exceeds this count (default: 0)""") w_scaling_ag.add_argument( '--writes-lower-threshold', type=int, help="""Scale down the writes with --decrease-writes-with if the currently consumed write units is as low as this percentage (default: 30)""") w_scaling_ag.add_argument( '--increase-writes-with', type=int, help="""How much should we increase the write units with? (default: 50, max: 100 if --increase-writes-unit = 'percent')""") w_scaling_ag.add_argument( '--decrease-writes-with', type=int, help="""How much should we decrease the write units with? (default: 50)""") w_scaling_ag.add_argument( '--increase-writes-unit', type=str, help='Do you want to scale in percent or units? (default: percent)') w_scaling_ag.add_argument( '--decrease-writes-unit', type=str, help='Do you want to scale in percent or units? (default: percent)') w_scaling_ag.add_argument( '--min-provisioned-writes', type=int, help="""Minimum number of provisioned writes""") w_scaling_ag.add_argument( '--max-provisioned-writes', type=int, help="""Maximum number of provisioned writes""") w_scaling_ag.add_argument( '--num-write-checks-before-scale-down', type=int, help="""Number of consecutive checks that must meet criteria before a scale down event occurs""") w_scaling_ag.add_argument( '--num-write-checks-reset-percent', type=int, help="""Percentage Value that will cause the num_write_checks_before scale_down var to reset back to 0""") args = parser.parse_args() # Print the version and quit if args.version: # Read the dynamic-dynamodb.conf configuration file internal_config_file = ConfigParser.RawConfigParser() internal_config_file.optionxform = lambda option: option internal_config_file.read( os.path.abspath( os.path.join( os.path.dirname(__file__), '../dynamic-dynamodb.conf'))) print 'Dynamic DynamoDB version: {0}'.format( internal_config_file.get('general', 'version')) sys.exit(0) # Replace any new values in the configuration configuration = {} for arg in args.__dict__: if args.__dict__.get(arg) is not None: configuration[arg] = args.__dict__.get(arg) return configuration
Parse command line options
def to_time(value, ctx): """ Tries conversion of any value to a time """ if isinstance(value, str): time = ctx.get_date_parser().time(value) if time is not None: return time elif isinstance(value, datetime.time): return value elif isinstance(value, datetime.datetime): return value.astimezone(ctx.timezone).time() raise EvaluationError("Can't convert '%s' to a time" % str(value))
Tries conversion of any value to a time
def recursive_glob(base_directory, regex=''): """ Uses glob to find all files or folders that match the regex starting from the base_directory. Parameters ---------- base_directory: str regex: str Returns ------- files: list """ files = glob(op.join(base_directory, regex)) for path, dirlist, filelist in os.walk(base_directory): for dir_name in dirlist: files.extend(glob(op.join(path, dir_name, regex))) return files
Uses glob to find all files or folders that match the regex starting from the base_directory. Parameters ---------- base_directory: str regex: str Returns ------- files: list
def _get_summary_struct(self): """ Returns a structured description of the model, including (where relevant) the schema of the training data, description of the training data, training statistics, and model hyperparameters. Returns ------- sections : list (of list of tuples) A list of summary sections. Each section is a list. Each item in a section list is a tuple of the form: ('<feature>','<field>') section_titles: list A list of section titles. The order matches that of the 'sections' object. """ sections = [] fields = [] _features = _precomputed_field(_internal_utils.pretty_print_list(self.features)) _exclude = _precomputed_field(_internal_utils.pretty_print_list(self.excluded_features)) header_fields = [("Features", "features"), ("Excluded Features", "excluded_features")] sections.append("Model Fields") fields.append(header_fields) if self.user_column_interpretations: sections.append("User Specified Interpretations") fields.append(list(sorted(self._get("user_column_interpretations").items()))) column_interpretations = self._get("column_interpretations") features = self._get("features") if self._get("fitted") and features is not None: n_rows = len(features) transform_info = [None]*n_rows for i, f in enumerate(features): interpretation = column_interpretations[f] input_type = self.input_types[f] description, output_type = _get_interpretation_description_and_output_type( interpretation, input_type) transform_info[i] = (f, input_type.__name__, interpretation, description, output_type.__name__) transform_table = _SFrame() transform_table["Column"] = [t[0] for t in transform_info] transform_table["Type"] = [t[1] for t in transform_info] transform_table["Interpretation"] = [t[2] for t in transform_info] transform_table["Transforms"] = [t[3] for t in transform_info] transform_table["Output Type"] = [t[4] for t in transform_info] fields[-1].append(transform_table) return fields, sections
Returns a structured description of the model, including (where relevant) the schema of the training data, description of the training data, training statistics, and model hyperparameters. Returns ------- sections : list (of list of tuples) A list of summary sections. Each section is a list. Each item in a section list is a tuple of the form: ('<feature>','<field>') section_titles: list A list of section titles. The order matches that of the 'sections' object.
def get(self, queue_get): """ to get states from multiprocessing.queue """ if isinstance(queue_get, (tuple, list)): self.result.extend(queue_get)
to get states from multiprocessing.queue
def find_and_convert(self, attr_name: str, attr_value: S, desired_attr_type: Type[T], logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """ Utility method to convert some value into the desired type. It relies on get_all_conversion_chains to find the converters, and apply them in correct order :return: """ if robust_isinstance(attr_value, desired_attr_type) and not is_collection(desired_attr_type): # value is already of the correct type return attr_value else: # try to find conversion chains generic, approx, exact = self.get_all_conversion_chains(type(attr_value), desired_attr_type) all_chains = generic + approx + exact if len(all_chains) > 0: all_errors = dict() for chain in reversed(all_chains): try: return chain.convert(desired_attr_type, attr_value, logger, options) except Exception as e: all_errors[chain] = e raise AttrConversionException.create(attr_name, attr_value, desired_attr_type, all_errors) else: # did not find any conversion chain raise NoConverterFoundForObjectType.create(self, attr_value, desired_attr_type)
Utility method to convert some value into the desired type. It relies on get_all_conversion_chains to find the converters, and apply them in correct order :return:
def inference_q(self, next_action_arr): ''' Infernce Q-Value. Args: next_action_arr: `np.ndarray` of action. Returns: `np.ndarray` of Q-Values. ''' q_arr = next_action_arr.reshape((next_action_arr.shape[0], -1)) self.__q_arr_list.append(q_arr) while len(self.__q_arr_list) > self.__seq_len: self.__q_arr_list = self.__q_arr_list[1:] while len(self.__q_arr_list) < self.__seq_len: self.__q_arr_list.append(self.__q_arr_list[-1]) q_arr = np.array(self.__q_arr_list) q_arr = q_arr.transpose((1, 0, 2)) q_arr = self.__lstm_model.inference(q_arr) return q_arr[:, -1].reshape((q_arr.shape[0], 1))
Infernce Q-Value. Args: next_action_arr: `np.ndarray` of action. Returns: `np.ndarray` of Q-Values.
def do_load_modules(self, modules): """Wrapper for calling load_and_init method of modules_manager attribute :param modules: list of modules that should be loaded by the daemon :return: None """ _ts = time.time() logger.info("Loading modules...") if self.modules_manager.load_and_init(modules): if self.modules_manager.instances: logger.info("I correctly loaded my modules: [%s]", ','.join([inst.name for inst in self.modules_manager.instances])) else: logger.info("I do not have any module") else: # pragma: no cover, not with unit tests... logger.error("Errors were encountered when checking and loading modules:") for msg in self.modules_manager.configuration_errors: logger.error(msg) if self.modules_manager.configuration_warnings: # pragma: no cover, not tested for msg in self.modules_manager.configuration_warnings: logger.warning(msg) statsmgr.gauge('modules.count', len(modules)) statsmgr.timer('modules.load-time', time.time() - _ts)
Wrapper for calling load_and_init method of modules_manager attribute :param modules: list of modules that should be loaded by the daemon :return: None
def skip(type_name, filename): """Provide reporting statistics for a skipped file.""" report = ['Skipping {} file: {}'.format(type_name, filename)] report_stats = ReportStats(filename, report=report) return report_stats
Provide reporting statistics for a skipped file.
def set_selection(self, selection, name="default", executor=None): """Sets the selection object :param selection: Selection object :param name: selection 'slot' :param executor: :return: """ def create(current): return selection self._selection(create, name, executor=executor, execute_fully=True)
Sets the selection object :param selection: Selection object :param name: selection 'slot' :param executor: :return:
def load(self): """ Loads this stream by calling River View for data. """ print "Loading data for %s..." % self.getName() self._dataHandle = self._stream.data( since=self._since, until=self._until, limit=self._limit, aggregate=self._aggregate ) self._data = self._dataHandle.data() self._headers = self._dataHandle.headers() print "Loaded %i rows." % len(self)
Loads this stream by calling River View for data.
def get_property(self): """Establishes the dynamic behavior of Property values""" scope = self def fget(self): """Call dynamic function then validate output""" value = scope.func(self) if value is None or value is undefined: return None return scope.validate(self, value) def fset(self, value): """Validate and call setter""" if scope.set_func is None: raise AttributeError('cannot set attribute') scope.set_func(self, scope.validate(self, value)) def fdel(self): """call deleter""" if scope.del_func is None: raise AttributeError('cannot delete attribute') scope.del_func(self) return property(fget=fget, fset=fset, fdel=fdel, doc=scope.sphinx())
Establishes the dynamic behavior of Property values
def _CreateNewSeasonDir(self, seasonNum): """ Creates a new season directory name in the form 'Season <NUM>'. If skipUserInput is True this will be accepted by default otherwise the user can choose to accept this, use the base show directory or enter a different name. Parameters ---------- seasonNum : int Season number. Returns ---------- string or None If the user accepts the generated directory name or gives a new name this will be returned. If it the user chooses to use the base directory an empty string is returned. If the user chooses to skip at this input stage None is returned. """ seasonDirName = "Season {0}".format(seasonNum) goodlogging.Log.Info("RENAMER", "Generated directory name: '{0}'".format(seasonDirName)) if self._skipUserInput is False: response = goodlogging.Log.Input("RENAMER", "Enter 'y' to accept this directory, 'b' to use base show directory, 'x' to skip this file or enter a new directory name to use: ") response = util.CheckEmptyResponse(response) else: response = 'y' if response.lower() == 'b': return '' elif response.lower() == 'y': return seasonDirName elif response.lower() == 'x': return None else: return response
Creates a new season directory name in the form 'Season <NUM>'. If skipUserInput is True this will be accepted by default otherwise the user can choose to accept this, use the base show directory or enter a different name. Parameters ---------- seasonNum : int Season number. Returns ---------- string or None If the user accepts the generated directory name or gives a new name this will be returned. If it the user chooses to use the base directory an empty string is returned. If the user chooses to skip at this input stage None is returned.
def _write_particle_information(gsd_file, structure, xyz, ref_distance, ref_mass, ref_energy, rigid_bodies): """Write out the particle information. """ gsd_file.particles.N = len(structure.atoms) gsd_file.particles.position = xyz / ref_distance types = [atom.name if atom.type == '' else atom.type for atom in structure.atoms] unique_types = list(set(types)) unique_types.sort(key=natural_sort) gsd_file.particles.types = unique_types typeids = np.array([unique_types.index(t) for t in types]) gsd_file.particles.typeid = typeids masses = np.array([atom.mass for atom in structure.atoms]) masses[masses==0] = 1.0 gsd_file.particles.mass = masses / ref_mass charges = np.array([atom.charge for atom in structure.atoms]) e0 = 2.39725e-4 ''' Permittivity of free space = 2.39725e-4 e^2/((kcal/mol)(angstrom)), where e is the elementary charge ''' charge_factor = (4.0*np.pi*e0*ref_distance*ref_energy)**0.5 gsd_file.particles.charge = charges / charge_factor if rigid_bodies: rigid_bodies = [-1 if body is None else body for body in rigid_bodies] gsd_file.particles.body = rigid_bodies
Write out the particle information.
def EnableEditingOnService(self, url, definition = None): """Enables editing capabilities on a feature service. Args: url (str): The URL of the feature service. definition (dict): A dictionary containing valid definition values. Defaults to ``None``. Returns: dict: The existing feature service definition capabilities. When ``definition`` is not provided (``None``), the following values are used by default: +------------------------------+------------------------------------------+ | Key | Value | +------------------------------+------------------------------------------+ | hasStaticData | ``False`` | +------------------------------+------------------------------------------+ | allowGeometryUpdates | ``True`` | +------------------------------+------------------------------------------+ | enableEditorTracking | ``False`` | +------------------------------+------------------------------------------+ | enableOwnershipAccessControl | ``False`` | +------------------------------+------------------------------------------+ | allowOthersToUpdate | ``True`` | +------------------------------+------------------------------------------+ | allowOthersToDelete | ``True`` | +------------------------------+------------------------------------------+ | capabilities | ``"Query,Editing,Create,Update,Delete"`` | +------------------------------+------------------------------------------+ """ adminFS = AdminFeatureService(url=url, securityHandler=self._securityHandler) if definition is None: definition = collections.OrderedDict() definition['hasStaticData'] = False definition['allowGeometryUpdates'] = True definition['editorTrackingInfo'] = {} definition['editorTrackingInfo']['enableEditorTracking'] = False definition['editorTrackingInfo']['enableOwnershipAccessControl'] = False definition['editorTrackingInfo']['allowOthersToUpdate'] = True definition['editorTrackingInfo']['allowOthersToDelete'] = True definition['capabilities'] = "Query,Editing,Create,Update,Delete" existingDef = {} existingDef['capabilities'] = adminFS.capabilities existingDef['allowGeometryUpdates'] = adminFS.allowGeometryUpdates enableResults = adminFS.updateDefinition(json_dict=definition) if 'error' in enableResults: return enableResults['error'] adminFS = None del adminFS print (enableResults) return existingDef
Enables editing capabilities on a feature service. Args: url (str): The URL of the feature service. definition (dict): A dictionary containing valid definition values. Defaults to ``None``. Returns: dict: The existing feature service definition capabilities. When ``definition`` is not provided (``None``), the following values are used by default: +------------------------------+------------------------------------------+ | Key | Value | +------------------------------+------------------------------------------+ | hasStaticData | ``False`` | +------------------------------+------------------------------------------+ | allowGeometryUpdates | ``True`` | +------------------------------+------------------------------------------+ | enableEditorTracking | ``False`` | +------------------------------+------------------------------------------+ | enableOwnershipAccessControl | ``False`` | +------------------------------+------------------------------------------+ | allowOthersToUpdate | ``True`` | +------------------------------+------------------------------------------+ | allowOthersToDelete | ``True`` | +------------------------------+------------------------------------------+ | capabilities | ``"Query,Editing,Create,Update,Delete"`` | +------------------------------+------------------------------------------+
def QA_util_get_trade_datetime(dt=datetime.datetime.now()): """交易的真实日期 Returns: [type] -- [description] """ #dt= datetime.datetime.now() if QA_util_if_trade(str(dt.date())) and dt.time() < datetime.time(15, 0, 0): return str(dt.date()) else: return QA_util_get_real_date(str(dt.date()), trade_date_sse, 1)
交易的真实日期 Returns: [type] -- [description]
def load_mnist(): '''Load the MNIST digits dataset.''' mnist = skdata.mnist.dataset.MNIST() mnist.meta # trigger download if needed. def arr(n, dtype): arr = mnist.arrays[n] return arr.reshape((len(arr), -1)).astype(dtype) train_images = arr('train_images', np.float32) / 128 - 1 train_labels = arr('train_labels', np.uint8) return ((train_images[:50000], train_labels[:50000, 0]), (train_images[50000:], train_labels[50000:, 0]))
Load the MNIST digits dataset.
def output_eol_literal_marker(self, m): """Pass through rest link.""" marker = ':' if m.group(1) is None else '' return self.renderer.eol_literal_marker(marker)
Pass through rest link.
def from_edgelist(self, edges, strict=True): """ Load transform data from an edge list into the current scene graph. Parameters ------------- edgelist : (n,) tuples (node_a, node_b, {key: value}) strict : bool If true, raise a ValueError when a malformed edge is passed in a tuple. """ # loop through each edge for edge in edges: # edge contains attributes if len(edge) == 3: self.update(edge[1], edge[0], **edge[2]) # edge just contains nodes elif len(edge) == 2: self.update(edge[1], edge[0]) # edge is broken elif strict: raise ValueError('edge incorrect shape: {}'.format(str(edge)))
Load transform data from an edge list into the current scene graph. Parameters ------------- edgelist : (n,) tuples (node_a, node_b, {key: value}) strict : bool If true, raise a ValueError when a malformed edge is passed in a tuple.
def get_summary_str(self, sec2d_nt): """Get string describing counts of placed/unplaced GO IDs and count of sections.""" data = self.get_summary_data(sec2d_nt) return "{M} GO IDs placed into {N} sections; {U} unplaced GO IDs".format( N=len(data['sections']), M=len(data['grouped']), U=len(data['ungrouped']))
Get string describing counts of placed/unplaced GO IDs and count of sections.
def add_line_data(self, line_data): """Add executed line data. `line_data` is { filename: { lineno: None, ... }, ...} """ for filename, linenos in iitems(line_data): self.lines.setdefault(filename, {}).update(linenos)
Add executed line data. `line_data` is { filename: { lineno: None, ... }, ...}
def collect_from_bundles(self, bundles: List[Bundle]) -> Dict[str, Any]: """ Collect objects where :meth:`type_check` returns ``True`` from bundles. Names (keys) are expected to be unique across bundles, except for the app bundle, which can override anything from other bundles. """ all_objects = {} # all discovered objects key_bundles = {} # lookup of which bundle a key came from object_keys = set() # keys in all_objects, used to ensure uniqueness for bundle in bundles: from_bundle = self.collect_from_bundle(bundle) if isinstance(bundle, AppBundle): all_objects.update(from_bundle) break # app_bundle is last, no need to update keys from_bundle_keys = set(from_bundle.keys()) conflicts = object_keys.intersection(from_bundle_keys) if conflicts: msg = [f'{self.name} from {bundle.name} conflict with ' f'previously registered {self.name}:'] for key in conflicts: msg.append(f'{key} from {key_bundles[key].name}') raise NameCollisionError('\n'.join(msg)) all_objects.update(from_bundle) object_keys = object_keys.union(from_bundle_keys) key_bundles.update({k: bundle for k in from_bundle_keys}) return all_objects
Collect objects where :meth:`type_check` returns ``True`` from bundles. Names (keys) are expected to be unique across bundles, except for the app bundle, which can override anything from other bundles.
def do_roles(self, service): """ Role information Usage: > roles <servicename> Display role information for service > roles all Display all role information for cluster """ if not self.has_cluster(): return None if not service: return None if service == "all": if not self.CACHED_SERVICES: self.services_autocomplete('', service, 0, 0) for s in self.CACHED_SERVICES: print("= " + s.upper() + " =") self.do_roles(s) return None try: service = api.get_cluster(self.cluster).get_service(service) headers = ["ROLE TYPE", "HOST", "ROLE NAME", "STATE", "HEALTH", "CONFIG"] align = ["ROLE TYPE", "ROLE NAME", "HOST"] rows = [] for roletype in service.get_role_types(): for role in service.get_roles_by_type(roletype): if role.configStale: config = "STALE" else: config = "UP TO DATE" rows.append([role.type, role.hostRef.hostId, role.name, role.roleState, role.healthSummary, config]) self.generate_output(headers, rows, align=align) except ApiException: print("Service not found")
Role information Usage: > roles <servicename> Display role information for service > roles all Display all role information for cluster
def load_pyproject_toml( use_pep517, # type: Optional[bool] pyproject_toml, # type: str setup_py, # type: str req_name # type: str ): # type: (...) -> Optional[Tuple[List[str], str, List[str]]] """Load the pyproject.toml file. Parameters: use_pep517 - Has the user requested PEP 517 processing? None means the user hasn't explicitly specified. pyproject_toml - Location of the project's pyproject.toml file setup_py - Location of the project's setup.py file req_name - The name of the requirement we're processing (for error reporting) Returns: None if we should use the legacy code path, otherwise a tuple ( requirements from pyproject.toml, name of PEP 517 backend, requirements we should check are installed after setting up the build environment ) """ has_pyproject = os.path.isfile(pyproject_toml) has_setup = os.path.isfile(setup_py) if has_pyproject: with io.open(pyproject_toml, encoding="utf-8") as f: pp_toml = pytoml.load(f) build_system = pp_toml.get("build-system") else: build_system = None # The following cases must use PEP 517 # We check for use_pep517 being non-None and falsey because that means # the user explicitly requested --no-use-pep517. The value 0 as # opposed to False can occur when the value is provided via an # environment variable or config file option (due to the quirk of # strtobool() returning an integer in pip's configuration code). if has_pyproject and not has_setup: if use_pep517 is not None and not use_pep517: raise InstallationError( "Disabling PEP 517 processing is invalid: " "project does not have a setup.py" ) use_pep517 = True elif build_system and "build-backend" in build_system: if use_pep517 is not None and not use_pep517: raise InstallationError( "Disabling PEP 517 processing is invalid: " "project specifies a build backend of {} " "in pyproject.toml".format( build_system["build-backend"] ) ) use_pep517 = True # If we haven't worked out whether to use PEP 517 yet, # and the user hasn't explicitly stated a preference, # we do so if the project has a pyproject.toml file. elif use_pep517 is None: use_pep517 = has_pyproject # At this point, we know whether we're going to use PEP 517. assert use_pep517 is not None # If we're using the legacy code path, there is nothing further # for us to do here. if not use_pep517: return None if build_system is None: # Either the user has a pyproject.toml with no build-system # section, or the user has no pyproject.toml, but has opted in # explicitly via --use-pep517. # In the absence of any explicit backend specification, we # assume the setuptools backend that most closely emulates the # traditional direct setup.py execution, and require wheel and # a version of setuptools that supports that backend. build_system = { "requires": ["setuptools>=40.8.0", "wheel"], "build-backend": "setuptools.build_meta:__legacy__", } # If we're using PEP 517, we have build system information (either # from pyproject.toml, or defaulted by the code above). # Note that at this point, we do not know if the user has actually # specified a backend, though. assert build_system is not None # Ensure that the build-system section in pyproject.toml conforms # to PEP 518. error_template = ( "{package} has a pyproject.toml file that does not comply " "with PEP 518: {reason}" ) # Specifying the build-system table but not the requires key is invalid if "requires" not in build_system: raise InstallationError( error_template.format(package=req_name, reason=( "it has a 'build-system' table but not " "'build-system.requires' which is mandatory in the table" )) ) # Error out if requires is not a list of strings requires = build_system["requires"] if not _is_list_of_str(requires): raise InstallationError(error_template.format( package=req_name, reason="'build-system.requires' is not a list of strings.", )) backend = build_system.get("build-backend") check = [] # type: List[str] if backend is None: # If the user didn't specify a backend, we assume they want to use # the setuptools backend. But we can't be sure they have included # a version of setuptools which supplies the backend, or wheel # (which is needed by the backend) in their requirements. So we # make a note to check that those requirements are present once # we have set up the environment. # This is quite a lot of work to check for a very specific case. But # the problem is, that case is potentially quite common - projects that # adopted PEP 518 early for the ability to specify requirements to # execute setup.py, but never considered needing to mention the build # tools themselves. The original PEP 518 code had a similar check (but # implemented in a different way). backend = "setuptools.build_meta:__legacy__" check = ["setuptools>=40.8.0", "wheel"] return (requires, backend, check)
Load the pyproject.toml file. Parameters: use_pep517 - Has the user requested PEP 517 processing? None means the user hasn't explicitly specified. pyproject_toml - Location of the project's pyproject.toml file setup_py - Location of the project's setup.py file req_name - The name of the requirement we're processing (for error reporting) Returns: None if we should use the legacy code path, otherwise a tuple ( requirements from pyproject.toml, name of PEP 517 backend, requirements we should check are installed after setting up the build environment )
def set_source_nodes(self, source_nodes): r""" Set multiple source nodes and compute their t-weights. Parameters ---------- source_nodes : sequence of integers Declare the source nodes via their ids. Raises ------ ValueError If a passed node id does not refer to any node of the graph (i.e. it is either higher than the initially set number of nodes or lower than zero). Notes ----- It does not get checked if one of the supplied source-nodes already has a weight assigned (e.g. by passing it to `set_sink_nodes`). This can occur when the foreground- and background-markers cover the same region. In this case the order of setting the terminal nodes can affect the graph and therefore the graph-cut result. """ if max(source_nodes) >= self.__nodes or min(source_nodes) < 0: raise ValueError('Invalid node id of {} or {}. Valid values are 0 to {}.'.format(max(source_nodes), min(source_nodes), self.__nodes - 1)) # set the source-to-node weights (t-weights) for snode in source_nodes: self.__graph.add_tweights(int(snode), self.MAX, 0)
r""" Set multiple source nodes and compute their t-weights. Parameters ---------- source_nodes : sequence of integers Declare the source nodes via their ids. Raises ------ ValueError If a passed node id does not refer to any node of the graph (i.e. it is either higher than the initially set number of nodes or lower than zero). Notes ----- It does not get checked if one of the supplied source-nodes already has a weight assigned (e.g. by passing it to `set_sink_nodes`). This can occur when the foreground- and background-markers cover the same region. In this case the order of setting the terminal nodes can affect the graph and therefore the graph-cut result.
def exists(self, path): """ Does provided path exist on S3? """ (bucket, key) = self._path_to_bucket_and_key(path) # root always exists if self._is_root(key): return True # file if self._exists(bucket, key): return True if self.isdir(path): return True logger.debug('Path %s does not exist', path) return False
Does provided path exist on S3?
def read_preferences_file(self): """ If json preferences file exists, read it in. """ user_data_dir = find_pmag_dir.find_user_data_dir("thellier_gui") if not user_data_dir: return {} if os.path.exists(user_data_dir): pref_file = os.path.join(user_data_dir, "thellier_gui_preferences.json") if os.path.exists(pref_file): with open(pref_file, "r") as pfile: return json.load(pfile) return {}
If json preferences file exists, read it in.
def load(self, filename): """Load proxies from file""" with open(filename, 'r') as fin: proxies = json.load(fin) for protocol in proxies: for proxy in proxies[protocol]: self.proxies[protocol][proxy['addr']] = Proxy( proxy['addr'], proxy['protocol'], proxy['weight'], proxy['last_checked']) self.addr_list[protocol].append(proxy['addr'])
Load proxies from file
def in_git_clone(): """Returns `True` if the current directory is a git repository Logic is 'borrowed' from :func:`git.repo.fun.is_git_dir` """ gitdir = '.git' return os.path.isdir(gitdir) and ( os.path.isdir(os.path.join(gitdir, 'objects')) and os.path.isdir(os.path.join(gitdir, 'refs')) and os.path.exists(os.path.join(gitdir, 'HEAD')) )
Returns `True` if the current directory is a git repository Logic is 'borrowed' from :func:`git.repo.fun.is_git_dir`
def example_clinical_data(study_name, environment): """Test demonstrating building clinical data""" odm = ODM("test system")( ClinicalData("Mediflex", "DEV")( SubjectData("MDSOL", "IJS TEST4", transaction_type="Insert")( StudyEventData("SUBJECT")( FormData("EN", transaction_type="Update")( # Although Signature is ODM1.3.1 RWS does not support it inbound currently # RWSBuilders do support outbound generation of Signature at FormData level # Signature()( # UserRef("isparks"), # LocationRef("MDSOL"), # SignatureRef("APPROVED"), # DateTimeStamp(datetime(2015, 9, 11, 10, 15, 22, 80)) # ), ItemGroupData()( ItemData("SUBJINIT", "AAA")( AuditRecord(edit_point=AuditRecord.EDIT_DATA_MANAGEMENT, used_imputation_method= False, identifier='X2011', include_file_oid=False)( UserRef("isparks"), LocationRef("MDSOL"), ReasonForChange("Data Entry Error"), DateTimeStamp(datetime(2015, 9, 11, 10, 15, 22, 80)) ), MdsolQuery(value="Subject initials should be 2 chars only.", recipient="Site from System", status=QueryStatusType.Open) ), ItemData("SUBJID", '001') ) ) ) ) ) ) return odm
Test demonstrating building clinical data
def select_code(self, code): """ 选择股票 @2018/06/03 pandas 的索引问题导致 https://github.com/pandas-dev/pandas/issues/21299 因此先用set_index去重做一次index 影响的有selects,select_time,select_month,get_bar @2018/06/04 当选择的时间越界/股票不存在,raise ValueError @2018/06/04 pandas索引问题已经解决 全部恢复 """ def _select_code(code): return self.data.loc[(slice(None), code), :] try: return self.new(_select_code(code), self.type, self.if_fq) except: raise ValueError('QA CANNOT FIND THIS CODE {}'.format(code))
选择股票 @2018/06/03 pandas 的索引问题导致 https://github.com/pandas-dev/pandas/issues/21299 因此先用set_index去重做一次index 影响的有selects,select_time,select_month,get_bar @2018/06/04 当选择的时间越界/股票不存在,raise ValueError @2018/06/04 pandas索引问题已经解决 全部恢复
def normalize(Y, normalization_type='stats'): """Normalize the vector Y using statistics or its range. :param Y: Row or column vector that you want to normalize. :param normalization_type: String specifying the kind of normalization to use. Options are 'stats' to use mean and standard deviation, or 'maxmin' to use the range of function values. :return Y_normalized: The normalized vector. """ Y = np.asarray(Y, dtype=float) if np.max(Y.shape) != Y.size: raise NotImplementedError('Only 1-dimensional arrays are supported.') # Only normalize with non null sdev (divide by zero). For only one # data point both std and ptp return 0. if normalization_type == 'stats': Y_norm = Y - Y.mean() std = Y.std() if std > 0: Y_norm /= std elif normalization_type == 'maxmin': Y_norm = Y - Y.min() y_range = np.ptp(Y) if y_range > 0: Y_norm /= y_range # A range of [-1, 1] is more natural for a zero-mean GP Y_norm = 2 * (Y_norm - 0.5) else: raise ValueError('Unknown normalization type: {}'.format(normalization_type)) return Y_norm
Normalize the vector Y using statistics or its range. :param Y: Row or column vector that you want to normalize. :param normalization_type: String specifying the kind of normalization to use. Options are 'stats' to use mean and standard deviation, or 'maxmin' to use the range of function values. :return Y_normalized: The normalized vector.
def set_network_connection(self, network): """ Set the network connection for the remote device. Example of setting airplane mode:: driver.mobile.set_network_connection(driver.mobile.AIRPLANE_MODE) """ mode = network.mask if isinstance(network, self.ConnectionType) else network return self.ConnectionType(self._driver.execute( Command.SET_NETWORK_CONNECTION, { 'name': 'network_connection', 'parameters': {'type': mode}})['value'])
Set the network connection for the remote device. Example of setting airplane mode:: driver.mobile.set_network_connection(driver.mobile.AIRPLANE_MODE)
def site(self, site): """ Sets the site of this OauthTokenReference. :param site: The site of this OauthTokenReference. :type: str """ if site is None: raise ValueError("Invalid value for `site`, must not be `None`") if site is not None and len(site) > 255: raise ValueError("Invalid value for `site`, length must be less than or equal to `255`") if site is not None and len(site) < 3: raise ValueError("Invalid value for `site`, length must be greater than or equal to `3`") if site is not None and not re.search('(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\\.)+[a-z]{2,}(?:@[a-z0-9](?:[-.](?=[a-z0-9])|[a-z0-9]){0,29})?', site): raise ValueError("Invalid value for `site`, must be a follow pattern or equal to `/(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\\.)+[a-z]{2,}(?:@[a-z0-9](?:[-.](?=[a-z0-9])|[a-z0-9]){0,29})?/`") self._site = site
Sets the site of this OauthTokenReference. :param site: The site of this OauthTokenReference. :type: str
def connect(self): """ Connects to the device and starts the read thread """ self.serial = serial.Serial(port=self.port, baudrate=self.baudrate, timeout=self.timeout) # Start read thread self.alive = True self.rxThread = threading.Thread(target=self._readLoop) self.rxThread.daemon = True self.rxThread.start()
Connects to the device and starts the read thread
def get_games(ctx): """Prints out games owned by a Steam user.""" username = ctx.obj['username'] games = User(username).get_games_owned() for game in sorted(games.values(), key=itemgetter('title')): click.echo('%s [appid: %s]' % (game['title'], game['appid'])) click.secho('Total gems owned by `%s`: %d' % (username, len(games)), fg='green')
Prints out games owned by a Steam user.
def format(self, record): # type: (logging.LogRecord) -> str """Format the log record with timestamps and level based colors. Args: record: The log record to format. Returns: The formatted log record. """ if record.levelno >= logging.ERROR: color = colorama.Fore.RED elif record.levelno >= logging.WARNING: color = colorama.Fore.YELLOW elif record.levelno >= logging.INFO: color = colorama.Fore.RESET else: color = colorama.Fore.CYAN format_template = ( '{}{}%(levelname)s{} [%(asctime)s][%(name)s]{} %(message)s') if sys.stdout.isatty(): self._fmt = format_template.format( colorama.Style.BRIGHT, color, colorama.Fore.RESET, colorama.Style.RESET_ALL ) else: self._fmt = format_template.format(*[''] * 4) if six.PY3: self._style._fmt = self._fmt # pylint: disable=protected-access return super(_LogColorFormatter, self).format(record)
Format the log record with timestamps and level based colors. Args: record: The log record to format. Returns: The formatted log record.
def notify_peer_message(self, message, sender_id): """A new message was received from a peer""" payload = message.SerializeToString() self._notify( "consensus_notifier_notify_peer_message", payload, len(payload), sender_id, len(sender_id))
A new message was received from a peer
def locked_get(self): """Retrieve Credential from datastore. Returns: oauth2client.Credentials """ credentials = None if self._cache: json = self._cache.get(self._key_name) if json: credentials = client.Credentials.new_from_json(json) if credentials is None: entity = self._get_entity() if entity is not None: credentials = getattr(entity, self._property_name) if self._cache: self._cache.set(self._key_name, credentials.to_json()) if credentials and hasattr(credentials, 'set_store'): credentials.set_store(self) return credentials
Retrieve Credential from datastore. Returns: oauth2client.Credentials
def __configure_client(self, config): """ write the perforce client """ self.logger.info("Configuring p4 client...") client_dict = config.to_dict() client_dict['root_path'] = os.path.expanduser(config.get('root_path')) os.chdir(client_dict['root_path']) client_dict['hostname'] = system.NODE client_dict['p4view'] = config['p4view'] % self.environment.target.get_context_dict() client = re.sub('//depot', ' //depot', p4client_template % client_dict) self.logger.info(lib.call("%s client -i" % self.p4_command, stdin=client, env=self.p4environ, cwd=client_dict['root_path']))
write the perforce client
def iteritems(self): """ Iterates over all mappings Yields ------ (int,Mapping) The next pair (index, mapping) """ for m in self.mappings: yield self.indexes[m.clause][m.target], m
Iterates over all mappings Yields ------ (int,Mapping) The next pair (index, mapping)
def delay_for( self, wait: typing.Union[int, float], identifier: typing.Any, ) -> bool: """Defer the execution of a function for some number of seconds. Args: wait (typing.Union[int, float]): A numeric value that represents the number of seconds that must pass before the callback becomes available for execution. All given values must be positive. identifier (typing.Any): The identifier returned from a call to defer or defer_for. Returns: bool: True if the call is delayed. False if the identifier is invalid or if the deferred call is already executed. """ raise NotImplementedError()
Defer the execution of a function for some number of seconds. Args: wait (typing.Union[int, float]): A numeric value that represents the number of seconds that must pass before the callback becomes available for execution. All given values must be positive. identifier (typing.Any): The identifier returned from a call to defer or defer_for. Returns: bool: True if the call is delayed. False if the identifier is invalid or if the deferred call is already executed.
def treat(request_body): """ Treat a notification and guarantee its authenticity. :param request_body: The request body in plain text. :type request_body: string :return: A safe APIResource :rtype: APIResource """ # Python 3+ support if isinstance(request_body, six.binary_type): request_body = request_body.decode('utf-8') try: data = json.loads(request_body) except ValueError: raise exceptions.UnknownAPIResource('Request body is malformed JSON.') unsafe_api_resource = APIResource.factory(data) try: consistent_api_resource = unsafe_api_resource.get_consistent_resource() except AttributeError: raise exceptions.UnknownAPIResource('The API resource provided is invalid.') return consistent_api_resource
Treat a notification and guarantee its authenticity. :param request_body: The request body in plain text. :type request_body: string :return: A safe APIResource :rtype: APIResource
def _select_features(example, feature_list=None): """Select a subset of features from the example dict.""" feature_list = feature_list or ["inputs", "targets"] return {f: example[f] for f in feature_list}
Select a subset of features from the example dict.
def parse_http_date(date): """ Parse a date format as specified by HTTP RFC7231 section 7.1.1.1. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Return an integer expressed in seconds since the epoch, in UTC. Implementation copied from Django. https://github.com/django/django/blob/master/django/utils/http.py#L157 License: BSD 3-clause """ MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split() __D = r'(?P<day>\d{2})' __D2 = r'(?P<day>[ \d]\d)' __M = r'(?P<mon>\w{3})' __Y = r'(?P<year>\d{4})' __Y2 = r'(?P<year>\d{2})' __T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T)) RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T)) ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y)) # email.utils.parsedate() does the job for RFC1123 dates; unfortunately # RFC7231 makes it mandatory to support RFC850 dates too. So we roll # our own RFC-compliant parsing. for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE: m = regex.match(date) if m is not None: break else: raise ValueError("%r is not in a valid HTTP date format" % date) try: year = int(m.group('year')) if year < 100: if year < 70: year += 2000 else: year += 1900 month = MONTHS.index(m.group('mon').lower()) + 1 day = int(m.group('day')) hour = int(m.group('hour')) min = int(m.group('min')) sec = int(m.group('sec')) result = datetime.datetime(year, month, day, hour, min, sec) return calendar.timegm(result.utctimetuple()) except Exception as exc: raise ValueError("%r is not a valid date" % date) from exc
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Return an integer expressed in seconds since the epoch, in UTC. Implementation copied from Django. https://github.com/django/django/blob/master/django/utils/http.py#L157 License: BSD 3-clause
def dtdQAttrDesc(self, elem, name, prefix): """Search the DTD for the description of this qualified attribute on this element. """ ret = libxml2mod.xmlGetDtdQAttrDesc(self._o, elem, name, prefix) if ret is None:raise treeError('xmlGetDtdQAttrDesc() failed') __tmp = xmlAttribute(_obj=ret) return __tmp
Search the DTD for the description of this qualified attribute on this element.
def set_hyperparams(self, new_params): """Sets the free hyperparameters to the new parameter values in new_params. Parameters ---------- new_params : :py:class:`Array` or other Array-like, (len(:py:attr:`self.free_params`),) New parameter values, ordered as dictated by the docstring for the class. """ new_params = scipy.asarray(new_params, dtype=float) if len(new_params) == len(self.free_params): if self.enforce_bounds: for idx, new_param, bound in zip(range(0, len(new_params)), new_params, self.free_param_bounds): if bound[0] is not None and new_param < bound[0]: new_params[idx] = bound[0] elif bound[1] is not None and new_param > bound[1]: new_params[idx] = bound[1] self.params[~self.fixed_params] = new_params else: raise ValueError("Length of new_params must be %s!" % (len(self.free_params),))
Sets the free hyperparameters to the new parameter values in new_params. Parameters ---------- new_params : :py:class:`Array` or other Array-like, (len(:py:attr:`self.free_params`),) New parameter values, ordered as dictated by the docstring for the class.
def fix_multiple_files(filenames, options, output=None): """Fix list of files. Optionally fix files recursively. """ filenames = find_files(filenames, options.recursive, options.exclude) if options.jobs > 1: import multiprocessing pool = multiprocessing.Pool(options.jobs) pool.map(_fix_file, [(name, options) for name in filenames]) else: for name in filenames: _fix_file((name, options, output))
Fix list of files. Optionally fix files recursively.
def edit_ipv6(self, ip6, descricao, id_ip): """ Edit a IP6 :param ip6: An IP6 available to save in format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx. :param descricao: IP description. :param id_ip: Ipv6 identifier. Integer value and greater than zero. :return: None """ if not is_valid_int_param(id_ip): raise InvalidParameterError( u'Ipv6 identifier is invalid or was not informed.') if ip6 is None or ip6 == "": raise InvalidParameterError(u'IP6 is invalid or was not informed.') ip_map = dict() ip_map['descricao'] = descricao ip_map['ip6'] = ip6 ip_map['id_ip'] = id_ip url = "ipv6/edit/" code, xml = self.submit({'ip_map': ip_map}, 'POST', url) return self.response(code, xml)
Edit a IP6 :param ip6: An IP6 available to save in format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx. :param descricao: IP description. :param id_ip: Ipv6 identifier. Integer value and greater than zero. :return: None
def crop(self, start_timestamp, end_timestamp): """ Return a new TimeSeries object contains all the timstamps and values within the specified range. :param int start_timestamp: the start timestamp value :param int end_timestamp: the end timestamp value :return: :class:`TimeSeries` object. """ output = {} for key, value in self.items(): if key >= start_timestamp and key <= end_timestamp: output[key] = value if output: return TimeSeries(output) else: raise ValueError('TimeSeries data was empty or invalid.')
Return a new TimeSeries object contains all the timstamps and values within the specified range. :param int start_timestamp: the start timestamp value :param int end_timestamp: the end timestamp value :return: :class:`TimeSeries` object.
def simplex_grid(m, n): r""" Construct an array consisting of the integer points in the (m-1)-dimensional simplex :math:`\{x \mid x_0 + \cdots + x_{m-1} = n \}`, or equivalently, the m-part compositions of n, which are listed in lexicographic order. The total number of the points (hence the length of the output array) is L = (n+m-1)!/(n!*(m-1)!) (i.e., (n+m-1) choose (m-1)). Parameters ---------- m : scalar(int) Dimension of each point. Must be a positive integer. n : scalar(int) Number which the coordinates of each point sum to. Must be a nonnegative integer. Returns ------- out : ndarray(int, ndim=2) Array of shape (L, m) containing the integer points in the simplex, aligned in lexicographic order. Notes ----- A grid of the (m-1)-dimensional *unit* simplex with n subdivisions along each dimension can be obtained by `simplex_grid(m, n) / n`. Examples -------- >>> simplex_grid(3, 4) array([[0, 0, 4], [0, 1, 3], [0, 2, 2], [0, 3, 1], [0, 4, 0], [1, 0, 3], [1, 1, 2], [1, 2, 1], [1, 3, 0], [2, 0, 2], [2, 1, 1], [2, 2, 0], [3, 0, 1], [3, 1, 0], [4, 0, 0]]) >>> simplex_grid(3, 4) / 4 array([[ 0. , 0. , 1. ], [ 0. , 0.25, 0.75], [ 0. , 0.5 , 0.5 ], [ 0. , 0.75, 0.25], [ 0. , 1. , 0. ], [ 0.25, 0. , 0.75], [ 0.25, 0.25, 0.5 ], [ 0.25, 0.5 , 0.25], [ 0.25, 0.75, 0. ], [ 0.5 , 0. , 0.5 ], [ 0.5 , 0.25, 0.25], [ 0.5 , 0.5 , 0. ], [ 0.75, 0. , 0.25], [ 0.75, 0.25, 0. ], [ 1. , 0. , 0. ]]) References ---------- A. Nijenhuis and H. S. Wilf, Combinatorial Algorithms, Chapter 5, Academic Press, 1978. """ L = num_compositions_jit(m, n) if L == 0: # Overflow occured raise ValueError(_msg_max_size_exceeded) out = np.empty((L, m), dtype=np.int_) x = np.zeros(m, dtype=np.int_) x[m-1] = n for j in range(m): out[0, j] = x[j] h = m for i in range(1, L): h -= 1 val = x[h] x[h] = 0 x[m-1] = val - 1 x[h-1] += 1 for j in range(m): out[i, j] = x[j] if val != 1: h = m return out
r""" Construct an array consisting of the integer points in the (m-1)-dimensional simplex :math:`\{x \mid x_0 + \cdots + x_{m-1} = n \}`, or equivalently, the m-part compositions of n, which are listed in lexicographic order. The total number of the points (hence the length of the output array) is L = (n+m-1)!/(n!*(m-1)!) (i.e., (n+m-1) choose (m-1)). Parameters ---------- m : scalar(int) Dimension of each point. Must be a positive integer. n : scalar(int) Number which the coordinates of each point sum to. Must be a nonnegative integer. Returns ------- out : ndarray(int, ndim=2) Array of shape (L, m) containing the integer points in the simplex, aligned in lexicographic order. Notes ----- A grid of the (m-1)-dimensional *unit* simplex with n subdivisions along each dimension can be obtained by `simplex_grid(m, n) / n`. Examples -------- >>> simplex_grid(3, 4) array([[0, 0, 4], [0, 1, 3], [0, 2, 2], [0, 3, 1], [0, 4, 0], [1, 0, 3], [1, 1, 2], [1, 2, 1], [1, 3, 0], [2, 0, 2], [2, 1, 1], [2, 2, 0], [3, 0, 1], [3, 1, 0], [4, 0, 0]]) >>> simplex_grid(3, 4) / 4 array([[ 0. , 0. , 1. ], [ 0. , 0.25, 0.75], [ 0. , 0.5 , 0.5 ], [ 0. , 0.75, 0.25], [ 0. , 1. , 0. ], [ 0.25, 0. , 0.75], [ 0.25, 0.25, 0.5 ], [ 0.25, 0.5 , 0.25], [ 0.25, 0.75, 0. ], [ 0.5 , 0. , 0.5 ], [ 0.5 , 0.25, 0.25], [ 0.5 , 0.5 , 0. ], [ 0.75, 0. , 0.25], [ 0.75, 0.25, 0. ], [ 1. , 0. , 0. ]]) References ---------- A. Nijenhuis and H. S. Wilf, Combinatorial Algorithms, Chapter 5, Academic Press, 1978.
def asRemoteException(ErrorType): '''return the remote exception version of the error above you can catch errors as usally: >>> try: raise asRemoteException(ValueError) except ValueError: pass or you can catch the remote Exception >>> try: raise asRemoteException(ReferenceError)(ReferenceError(),'') except asRemoteException(ReferenceError): pass ''' RemoteException = _remoteExceptionCache.get(ErrorType) if RemoteException is None: RemoteException = _newRemoteException(ErrorType) _remoteExceptionCache.setdefault(ErrorType, RemoteException) _remoteExceptionCache.setdefault(RemoteException, RemoteException) return _remoteExceptionCache.get(ErrorType) return RemoteException
return the remote exception version of the error above you can catch errors as usally: >>> try: raise asRemoteException(ValueError) except ValueError: pass or you can catch the remote Exception >>> try: raise asRemoteException(ReferenceError)(ReferenceError(),'') except asRemoteException(ReferenceError): pass
def iter_descendants(self, strategy="levelorder", is_leaf_fn=None): """ Returns an iterator over all descendant nodes.""" for n in self.traverse(strategy=strategy, is_leaf_fn=is_leaf_fn): if n is not self: yield n
Returns an iterator over all descendant nodes.
def execute(self, args): """ Executes the command invocation :param args: The command arguments for this invocation :type args: list :return: The command result :rtype: knack.util.CommandResultItem """ import colorama self.cli_ctx.raise_event(EVENT_INVOKER_PRE_CMD_TBL_CREATE, args=args) cmd_tbl = self.commands_loader.load_command_table(args) command = self._rudimentary_get_command(args) self.cli_ctx.invocation.data['command_string'] = command self.commands_loader.load_arguments(command) self.cli_ctx.raise_event(EVENT_INVOKER_POST_CMD_TBL_CREATE, cmd_tbl=cmd_tbl) self.parser.load_command_table(self.commands_loader) self.cli_ctx.raise_event(EVENT_INVOKER_CMD_TBL_LOADED, parser=self.parser) arg_check = [a for a in args if a not in ['--verbose', '--debug']] if not arg_check: self.cli_ctx.completion.enable_autocomplete(self.parser) subparser = self.parser.subparsers[tuple()] self.help.show_welcome(subparser) return CommandResultItem(None, exit_code=0) if args[0].lower() == 'help': args[0] = '--help' self.cli_ctx.completion.enable_autocomplete(self.parser) self.cli_ctx.raise_event(EVENT_INVOKER_PRE_PARSE_ARGS, args=args) parsed_args = self.parser.parse_args(args) self.cli_ctx.raise_event(EVENT_INVOKER_POST_PARSE_ARGS, command=parsed_args.command, args=parsed_args) self._validation(parsed_args) # save the command name (leaf in the tree) self.data['command'] = parsed_args.command cmd = parsed_args.func if hasattr(parsed_args, 'cmd'): parsed_args.cmd = cmd deprecations = getattr(parsed_args, '_argument_deprecations', []) if cmd.deprecate_info: deprecations.append(cmd.deprecate_info) params = self._filter_params(parsed_args) # search for implicit deprecation path_comps = cmd.name.split()[:-1] implicit_deprecate_info = None while path_comps and not implicit_deprecate_info: implicit_deprecate_info = resolve_deprecate_info(self.cli_ctx, ' '.join(path_comps)) del path_comps[-1] if implicit_deprecate_info: deprecate_kwargs = implicit_deprecate_info.__dict__.copy() deprecate_kwargs['object_type'] = 'command' del deprecate_kwargs['_get_tag'] del deprecate_kwargs['_get_message'] deprecations.append(ImplicitDeprecated(**deprecate_kwargs)) colorama.init() for d in deprecations: print(d.message, file=sys.stderr) colorama.deinit() cmd_result = parsed_args.func(params) cmd_result = todict(cmd_result) event_data = {'result': cmd_result} self.cli_ctx.raise_event(EVENT_INVOKER_TRANSFORM_RESULT, event_data=event_data) self.cli_ctx.raise_event(EVENT_INVOKER_FILTER_RESULT, event_data=event_data) return CommandResultItem(event_data['result'], exit_code=0, table_transformer=cmd_tbl[parsed_args.command].table_transformer, is_query_active=self.data['query_active'])
Executes the command invocation :param args: The command arguments for this invocation :type args: list :return: The command result :rtype: knack.util.CommandResultItem
def get_schema(self, filename): """ Guess schema using messytables """ table_set = self.read_file(filename) # Have I been able to read the filename if table_set is None: return [] # Get the first table as rowset row_set = table_set.tables[0] offset, headers = headers_guess(row_set.sample) row_set.register_processor(headers_processor(headers)) row_set.register_processor(offset_processor(offset + 1)) types = type_guess(row_set.sample, strict=True) # Get a sample as well.. sample = next(row_set.sample) clean = lambda v: str(v) if not isinstance(v, str) else v schema = [] for i, h in enumerate(headers): schema.append([h, str(types[i]), clean(sample[i].value)]) return schema
Guess schema using messytables
def set_events_callback(self, call_back): """Sets the user callback that the Server object has to call when an event is created. """ logger.info("setting event callback") callback_wrap = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(snap7.snap7types.SrvEvent), ctypes.c_int) def wrapper(usrptr, pevent, size): """ Wraps python function into a ctypes function :param usrptr: not used :param pevent: pointer to snap7 event struct :param size: :returns: should return an int """ logger.info("callback event: " + self.event_text(pevent.contents)) call_back(pevent.contents) return 0 self._callback = callback_wrap(wrapper) usrPtr = ctypes.c_void_p() return self.library.Srv_SetEventsCallback(self.pointer, self._callback, usrPtr)
Sets the user callback that the Server object has to call when an event is created.
def _del_thread(self, dwThreadId): """ Private method to remove a thread object from the snapshot. @type dwThreadId: int @param dwThreadId: Global thread ID. """ try: aThread = self.__threadDict[dwThreadId] del self.__threadDict[dwThreadId] except KeyError: aThread = None msg = "Unknown thread ID %d" % dwThreadId warnings.warn(msg, RuntimeWarning) if aThread: aThread.clear()
Private method to remove a thread object from the snapshot. @type dwThreadId: int @param dwThreadId: Global thread ID.
def macs_filtered_reads_plot(self): """ Plot of filtered reads for control and treatment samples """ data = dict() req_cats = ['control_fragments_total', 'control_fragments_after_filtering', 'treatment_fragments_total', 'treatment_fragments_after_filtering'] for s_name, d in self.macs_data.items(): if all([c in d for c in req_cats]): data['{}: Control'.format(s_name)] = dict() data['{}: Treatment'.format(s_name)] = dict() data['{}: Control'.format(s_name)]['fragments_filtered'] = d['control_fragments_total'] - d['control_fragments_after_filtering'] data['{}: Control'.format(s_name)]['fragments_not_filtered'] = d['control_fragments_after_filtering'] data['{}: Treatment'.format(s_name)]['fragments_filtered'] = d['treatment_fragments_total'] - d['treatment_fragments_after_filtering'] data['{}: Treatment'.format(s_name)]['fragments_not_filtered'] = d['treatment_fragments_after_filtering'] # Check that we have something to plot if len(data) == 0: return # Specify the order of the different possible categories keys = OrderedDict() keys['fragments_not_filtered'] = { 'color': '#437BB1', 'name': 'Remaining fragments' } keys['fragments_filtered'] = { 'color': '#B1084C', 'name': 'Filtered fragments' } # Config for the plot pconfig = { 'id': 'macs2_filtered', 'title': 'MACS2: Filtered Fragments', 'ylab': '# Fragments', 'cpswitch_counts_label': 'Number of Fragments', 'hide_zero_cats': False } self.add_section( plot = bargraph.plot(data, keys, pconfig) )
Plot of filtered reads for control and treatment samples
def validate_arguments(args): """Makes sure arguments are valid, specified files exist, etc.""" #check arguments print print "Checking input...", semantic_tests = ["animals", "custom"] phonemic_tests = ["a", "p", "s", "f"] if args.similarity_file: print print "Custom similarity file was specified..." args.semantic = "custom" if args.threshold: try: args.threshold = float(args.threshold) except ValueError: raise VFClustException('Custom threshold (--threshold argument) must be a number.') if not (args.source_file_path.lower().endswith('csv') or args.source_file_path.lower().endswith('textgrid')): raise VFClustException('The input must be either a .TextGrid or .csv file!\nYou provided ' + args.source_file_path.lower()) if not os.path.isfile(args.source_file_path): raise VFClustException('The input file path you provided does not exist on your system!') #if no output path provided, write to source file path if args.output_path == None: args.output_path = args.source_path #if output_path is False, don't output anything elif args.output_path == False: pass else: #verify/make folders for output if len(args.output_path) == 0: args.output_path = os.path.abspath(os.path.dirname(args.source_file_path)) try: if not os.path.isdir(args.output_path): os.mkdir(args.output_path) except: print "Error creating folder for program output. " \ "Make sure you have write permissions to the folder you provided. " \ "You can change the folder with the -o option." \ "The output directory will be the same as the input directory." #make phonemic and semantic args lower case if (args.semantic): args.semantic = args.semantic.lower() if (args.phonemic): args.phonemic = args.phonemic.lower() #must choose either semantic or phonemic if not (args.semantic or args.phonemic): print "DEBUG", args.semantic, args.similarity_file raise VFClustException( '''You must specify at least one phonemic or semantic test to run using -p or -s, followed by the test type. Alternatively, provide a custom similarity file using the --similarity-file and --threshold options.''') #make sure semantic arguments are legit if args.semantic and args.semantic not in semantic_tests: raise VFClustException("Currently only " + ",".join(semantic_tests) + " are supported for semantic testing. " \ "You provided " + args.semantic) if args.phonemic and args.phonemic not in phonemic_tests: raise VFClustException("Currently only " + ",".join(phonemic_tests) + " are supported for phonemic testing. " \ "You provided " + args.phonemic) if (args.phonemic and args.semantic): raise VFClustException("You must choose EITHER semantic OR phonemic clustering.") #make paths absolute args.source_file_path = os.path.abspath(args.source_file_path) if args.output_path: args.output_path = os.path.abspath(args.output_path) #using custom similarity file if args.similarity_file: #if it's not None if not os.path.isfile(args.similarity_file): raise VFClustException('The custom similarity file path you provided does not exist on your system!') if not args.threshold: raise VFClustException('You must specify a clustering threshold when using a custom similarity file. Use --threshold X, where X is the threshold number.') try: args.threshold = float(args.threshold) except: raise VFClustException('Error reading the custom threshold you provided. It must be a number, e.g. --threshold 6.7 or --threshold 10') args.similarity_file = os.path.abspath(args.similarity_file) print "OK!" print print "Parsed arguments:" print_table([(k, str(vars(args)[k])) for k in vars(args)]) return args
Makes sure arguments are valid, specified files exist, etc.
def read(self, n): """ Consume `n` characters from the stream. """ while len(self.buf) < n: chunk = self.f.recv(4096) if not chunk: raise EndOfStreamError() self.buf += chunk res, self.buf = self.buf[:n], self.buf[n:] return res
Consume `n` characters from the stream.
def _get_data_dtype(self): """Get the dtype of the file based on the actual available channels""" pkhrec = [ ('GP_PK_HEADER', GSDTRecords.gp_pk_header), ('GP_PK_SH1', GSDTRecords.gp_pk_sh1) ] pk_head_dtype = np.dtype(pkhrec) def get_lrec(cols): lrec = [ ("gp_pk", pk_head_dtype), ("version", np.uint8), ("satid", np.uint16), ("time", (np.uint16, 5)), ("lineno", np.uint32), ("chan_id", np.uint8), ("acq_time", (np.uint16, 3)), ("line_validity", np.uint8), ("line_rquality", np.uint8), ("line_gquality", np.uint8), ("line_data", (np.uint8, cols)) ] return lrec # each pixel is 10-bits -> one line of data has 25% more bytes # than the number of columns suggest (10/8 = 1.25) visir_rec = get_lrec(int(self.mda['number_of_columns'] * 1.25)) number_of_visir_channels = len( [s for s in self.mda['channel_list'] if not s == 'HRV']) drec = [('visir', (visir_rec, number_of_visir_channels))] if self.mda['available_channels']['HRV']: hrv_rec = get_lrec(int(self.mda['hrv_number_of_columns'] * 1.25)) drec.append(('hrv', (hrv_rec, 3))) return np.dtype(drec)
Get the dtype of the file based on the actual available channels
def clone(self) -> 'ImageBBox': "Mimic the behavior of torch.clone for `Image` objects." flow = FlowField(self.size, self.flow.flow.clone()) return self.__class__(flow, scale=False, y_first=False, labels=self.labels, pad_idx=self.pad_idx)
Mimic the behavior of torch.clone for `Image` objects.
def update_dashboard(self, id, **kwargs): # noqa: E501 """Update a specific dashboard # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_dashboard(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param Dashboard body: Example Body: <pre>{ \"name\": \"Dashboard API example\", \"id\": \"api-example\", \"url\": \"api-example\", \"description\": \"Dashboard Description\", \"sections\": [ { \"name\": \"Section 1\", \"rows\": [ { \"charts\": [ { \"name\": \"Chart 1\", \"description\": \"description1\", \"sources\": [ { \"name\": \"Source1\", \"query\": \"ts()\" } ] } ] } ] } ] }</pre> :return: ResponseContainerDashboard If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.update_dashboard_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.update_dashboard_with_http_info(id, **kwargs) # noqa: E501 return data
Update a specific dashboard # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_dashboard(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param Dashboard body: Example Body: <pre>{ \"name\": \"Dashboard API example\", \"id\": \"api-example\", \"url\": \"api-example\", \"description\": \"Dashboard Description\", \"sections\": [ { \"name\": \"Section 1\", \"rows\": [ { \"charts\": [ { \"name\": \"Chart 1\", \"description\": \"description1\", \"sources\": [ { \"name\": \"Source1\", \"query\": \"ts()\" } ] } ] } ] } ] }</pre> :return: ResponseContainerDashboard If the method is called asynchronously, returns the request thread.
def summary(self, sortOn=None): """ Summarize all the alignments for this title. @param sortOn: A C{str} attribute to sort titles on. One of 'length', 'maxScore', 'medianScore', 'readCount', or 'title'. @raise ValueError: If an unknown C{sortOn} value is given. @return: A generator that yields C{dict} instances as produced by C{TitleAlignments} (see class earlier in this file), sorted by C{sortOn}. """ titles = self if sortOn is None else self.sortTitles(sortOn) for title in titles: yield self[title].summary()
Summarize all the alignments for this title. @param sortOn: A C{str} attribute to sort titles on. One of 'length', 'maxScore', 'medianScore', 'readCount', or 'title'. @raise ValueError: If an unknown C{sortOn} value is given. @return: A generator that yields C{dict} instances as produced by C{TitleAlignments} (see class earlier in this file), sorted by C{sortOn}.
def store_result(self, message, result: Result, ttl: int) -> None: """Store a result in the backend. Parameters: message(Message) result(object): Must be serializable. ttl(int): The maximum amount of time the result may be stored in the backend for. """ message_key = self.build_message_key(message) return self._store(message_key, result, ttl)
Store a result in the backend. Parameters: message(Message) result(object): Must be serializable. ttl(int): The maximum amount of time the result may be stored in the backend for.
def radiation_values(self, location, timestep=1): """Lists of driect normal, diffuse horiz, and global horiz rad at each timestep. """ # create sunpath and get altitude at every timestep of the design day sp = Sunpath.from_location(location) altitudes = [] dates = self._get_datetimes(timestep) for t_date in dates: sun = sp.calculate_sun_from_date_time(t_date) altitudes.append(sun.altitude) dir_norm, diff_horiz = ashrae_clear_sky( altitudes, self._month, self._clearness) glob_horiz = [dhr + dnr * math.sin(math.radians(alt)) for alt, dnr, dhr in zip(altitudes, dir_norm, diff_horiz)] return dir_norm, diff_horiz, glob_horiz
Lists of driect normal, diffuse horiz, and global horiz rad at each timestep.
def list_semod(): ''' Return a structure listing all of the selinux modules on the system and what state they are in CLI Example: .. code-block:: bash salt '*' selinux.list_semod .. versionadded:: 2016.3.0 ''' helptext = __salt__['cmd.run']('semodule -h').splitlines() semodule_version = '' for line in helptext: if line.strip().startswith('full'): semodule_version = 'new' if semodule_version == 'new': mdata = __salt__['cmd.run']('semodule -lfull').splitlines() ret = {} for line in mdata: if not line.strip(): continue comps = line.split() if len(comps) == 4: ret[comps[1]] = {'Enabled': False, 'Version': None} else: ret[comps[1]] = {'Enabled': True, 'Version': None} else: mdata = __salt__['cmd.run']('semodule -l').splitlines() ret = {} for line in mdata: if not line.strip(): continue comps = line.split() if len(comps) == 3: ret[comps[0]] = {'Enabled': False, 'Version': comps[1]} else: ret[comps[0]] = {'Enabled': True, 'Version': comps[1]} return ret
Return a structure listing all of the selinux modules on the system and what state they are in CLI Example: .. code-block:: bash salt '*' selinux.list_semod .. versionadded:: 2016.3.0
def pp_hex(raw, reverse=True): """Return a pretty-printed (hex style) version of a binary string. Args: raw (bytes): any sequence of bytes reverse (bool): True if output should be in reverse order. Returns: Hex string corresponding to input byte sequence. """ if not reverse: return ''.join(['{:02x}'.format(v) for v in bytearray(raw)]) return ''.join(reversed(['{:02x}'.format(v) for v in bytearray(raw)]))
Return a pretty-printed (hex style) version of a binary string. Args: raw (bytes): any sequence of bytes reverse (bool): True if output should be in reverse order. Returns: Hex string corresponding to input byte sequence.
def default_branch(self, file): """ Decide the name of the default branch given the file and the configuration :param file: File with informations about it :return: Branch Name """ if isinstance(self.__default_branch__, str): return self.__default_branch__ elif self.__default_branch__ == GithubProxy.DEFAULT_BRANCH.NO: return self.master_upstream else: return file.sha[:8]
Decide the name of the default branch given the file and the configuration :param file: File with informations about it :return: Branch Name
def iterstraight(self, raw): """Iterator that undoes the effect of filtering, and yields each row in serialised format (as a sequence of bytes). Assumes input is straightlaced. `raw` should be an iterable that yields the raw bytes in chunks of arbitrary size. """ # length of row, in bytes rb = self.row_bytes a = array('B') # The previous (reconstructed) scanline. None indicates first # line of image. recon = None for some in raw: a.extend(some) while len(a) >= rb + 1: filter_type = a[0] scanline = a[1:rb+1] del a[:rb+1] recon = self.undo_filter(filter_type, scanline, recon) yield recon if len(a) != 0: # :file:format We get here with a file format error: # when the available bytes (after decompressing) do not # pack into exact rows. raise FormatError( 'Wrong size for decompressed IDAT chunk.') assert len(a) == 0
Iterator that undoes the effect of filtering, and yields each row in serialised format (as a sequence of bytes). Assumes input is straightlaced. `raw` should be an iterable that yields the raw bytes in chunks of arbitrary size.
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_last_counters_cleared(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_last_counters_cleared = ET.SubElement(fcoe_intf_list, "fcoe-intf-last-counters-cleared") fcoe_intf_last_counters_cleared.text = kwargs.pop('fcoe_intf_last_counters_cleared') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def chunk_count(self): """Return a count of the chunks in this world folder.""" c = 0 for r in self.iter_regions(): c += r.chunk_count() return c
Return a count of the chunks in this world folder.
def train_evaluate_model_from_config(config: Union[str, Path, dict], iterator: Union[DataLearningIterator, DataFittingIterator] = None, *, to_train: bool = True, evaluation_targets: Optional[Iterable[str]] = None, to_validate: Optional[bool] = None, download: bool = False, start_epoch_num: Optional[int] = None, recursive: bool = False) -> Dict[str, Dict[str, float]]: """Make training and evaluation of the model described in corresponding configuration file.""" config = parse_config(config) if download: deep_download(config) if to_train and recursive: for subconfig in get_all_elems_from_json(config['chainer'], 'config_path'): log.info(f'Training "{subconfig}"') train_evaluate_model_from_config(subconfig, download=False, recursive=True) import_packages(config.get('metadata', {}).get('imports', [])) if iterator is None: try: data = read_data_by_config(config) except ConfigError as e: to_train = False log.warning(f'Skipping training. {e.message}') else: iterator = get_iterator_from_config(config, data) if 'train' not in config: log.warning('Train config is missing. Populating with default values') train_config = config.get('train') if start_epoch_num is not None: train_config['start_epoch_num'] = start_epoch_num if 'evaluation_targets' not in train_config and ('validate_best' in train_config or 'test_best' in train_config): log.warning('"validate_best" and "test_best" parameters are deprecated.' ' Please, use "evaluation_targets" list instead') train_config['evaluation_targets'] = [] if train_config.pop('validate_best', True): train_config['evaluation_targets'].append('valid') if train_config.pop('test_best', True): train_config['evaluation_targets'].append('test') trainer_class = get_model(train_config.pop('class_name', 'nn_trainer')) trainer = trainer_class(config['chainer'], **train_config) if to_train: trainer.train(iterator) res = {} if iterator is not None: if to_validate is not None: if evaluation_targets is None: log.warning('"to_validate" parameter is deprecated and will be removed in future versions.' ' Please, use "evaluation_targets" list instead') evaluation_targets = ['test'] if to_validate: evaluation_targets.append('valid') else: log.warn('Both "evaluation_targets" and "to_validate" parameters are specified.' ' "to_validate" is deprecated and will be ignored') res = trainer.evaluate(iterator, evaluation_targets, print_reports=True) trainer.get_chainer().destroy() res = {k: v['metrics'] for k, v in res.items()} return res
Make training and evaluation of the model described in corresponding configuration file.
def get_key_pair(self, alias_name): """ Retrieves the public and private key pair associated with the specified alias name. Args: alias_name: Key pair associated with the RabbitMQ Returns: dict: RabbitMQ certificate """ uri = self.URI + "/keypair/" + alias_name return self._client.get(uri)
Retrieves the public and private key pair associated with the specified alias name. Args: alias_name: Key pair associated with the RabbitMQ Returns: dict: RabbitMQ certificate
def delete_by_ids(self, ids): """Delete objects by ids. :param ids: list of objects ids to delete. :return: True if objects were deleted. Otherwise, return False if no objects were found or the delete was not successful. """ try: self.filter(id__in=ids).delete() return True except self.model.DoesNotExist: return False
Delete objects by ids. :param ids: list of objects ids to delete. :return: True if objects were deleted. Otherwise, return False if no objects were found or the delete was not successful.
def check_name(name): """ Verify the name is well-formed >>> check_name(123) False >>> check_name('') False >>> check_name('abc') False >>> check_name('abc.def') True >>> check_name('abc.def.ghi') False >>> check_name('abc.d-ef') True >>> check_name('abc.d+ef') False >>> check_name('.abc') False >>> check_name('abc.') False >>> check_name('abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.abcd') False >>> check_name('abcdabcdabcdabcdabcdabcdabcdabcdabc.d') True """ if type(name) not in [str, unicode]: return False if not is_name_valid(name): return False return True
Verify the name is well-formed >>> check_name(123) False >>> check_name('') False >>> check_name('abc') False >>> check_name('abc.def') True >>> check_name('abc.def.ghi') False >>> check_name('abc.d-ef') True >>> check_name('abc.d+ef') False >>> check_name('.abc') False >>> check_name('abc.') False >>> check_name('abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.abcd') False >>> check_name('abcdabcdabcdabcdabcdabcdabcdabcdabc.d') True
def modify_snapshot(snapshot_id=None, description=None, userdata=None, cleanup=None, config="root"): ''' Modify attributes of an existing snapshot. config Configuration name. (Default: root) snapshot_id ID of the snapshot to be modified. cleanup Change the cleanup method of the snapshot. (str) description Change the description of the snapshot. (str) userdata Change the userdata dictionary of the snapshot. (dict) CLI example: .. code-block:: bash salt '*' snapper.modify_snapshot 54 description="my snapshot description" salt '*' snapper.modify_snapshot 54 description="my snapshot description" salt '*' snapper.modify_snapshot 54 userdata='{"foo": "bar"}' salt '*' snapper.modify_snapshot snapshot_id=54 cleanup="number" ''' if not snapshot_id: raise CommandExecutionError('Error: No snapshot ID has been provided') snapshot = get_snapshot(config=config, number=snapshot_id) try: # Updating only the explicitly provided attributes by the user updated_opts = { 'description': description if description is not None else snapshot['description'], 'cleanup': cleanup if cleanup is not None else snapshot['cleanup'], 'userdata': userdata if userdata is not None else snapshot['userdata'], } snapper.SetSnapshot(config, snapshot_id, updated_opts['description'], updated_opts['cleanup'], updated_opts['userdata']) return get_snapshot(config=config, number=snapshot_id) except dbus.DBusException as exc: raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
Modify attributes of an existing snapshot. config Configuration name. (Default: root) snapshot_id ID of the snapshot to be modified. cleanup Change the cleanup method of the snapshot. (str) description Change the description of the snapshot. (str) userdata Change the userdata dictionary of the snapshot. (dict) CLI example: .. code-block:: bash salt '*' snapper.modify_snapshot 54 description="my snapshot description" salt '*' snapper.modify_snapshot 54 description="my snapshot description" salt '*' snapper.modify_snapshot 54 userdata='{"foo": "bar"}' salt '*' snapper.modify_snapshot snapshot_id=54 cleanup="number"
def router_connections(self): """Return a list of MongoClients, one for each mongos.""" clients = [] for server in self._routers: if Servers().is_alive(server): client = self.create_connection(Servers().hostname(server)) clients.append(client) return clients
Return a list of MongoClients, one for each mongos.
def get_column_info(connection, table_name): """ Return an in order list of (name, type) tuples describing the columns in the given table. """ cursor = connection.cursor() cursor.execute("SELECT sql FROM sqlite_master WHERE type == 'table' AND name == ?", (table_name,)) statement, = cursor.fetchone() coldefs = re.match(_sql_create_table_pattern, statement).groupdict()["coldefs"] return [(coldef.groupdict()["name"], coldef.groupdict()["type"]) for coldef in re.finditer(_sql_coldef_pattern, coldefs) if coldef.groupdict()["name"].upper() not in ("PRIMARY", "UNIQUE", "CHECK")]
Return an in order list of (name, type) tuples describing the columns in the given table.
def get_ips(self, interface=None, family=None, scope=None, timeout=0): """ Get a tuple of IPs for the container. """ kwargs = {} if interface: kwargs['interface'] = interface if family: kwargs['family'] = family if scope: kwargs['scope'] = scope ips = None timeout = int(os.environ.get('LXC_GETIP_TIMEOUT', timeout)) while not ips: ips = _lxc.Container.get_ips(self, **kwargs) if timeout == 0: break timeout -= 1 time.sleep(1) return ips
Get a tuple of IPs for the container.
def _disjoint_qubits(op1: ops.Operation, op2: ops.Operation) -> bool: """Returns true only if the operations have qubits in common.""" return not set(op1.qubits) & set(op2.qubits)
Returns true only if the operations have qubits in common.