code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def strftime(self, fmt): """Format using strftime(). The date part of the timestamp passed to underlying strftime should not be used. """ # The year must be >= 1000 else Python's strftime implementation # can raise a bogus exception. timetuple = (1900, 1, 1, self._hour, self._minute, self._second, 0, 1, -1) return _wrap_strftime(self, fmt, timetuple)
Format using strftime(). The date part of the timestamp passed to underlying strftime should not be used.
def addMonitor(self, monitorFriendlyName, monitorURL): """ Returns True if Monitor was added, otherwise False. """ url = self.baseUrl url += "newMonitor?apiKey=%s" % self.apiKey url += "&monitorFriendlyName=%s" % monitorFriendlyName url += "&monitorURL=%s&monitorType=1" % monitorURL url += "&monitorAlertContacts=%s" % monitorAlertContacts url += "&noJsonCallback=1&format=json" success, response = self.requestApi(url) if success: return True else: return False
Returns True if Monitor was added, otherwise False.
def _wait_for_handles(handles, timeout=-1): """ Waits for multiple handles. (Similar to 'select') Returns the handle which is ready. Returns `None` on timeout. http://msdn.microsoft.com/en-us/library/windows/desktop/ms687025(v=vs.85).aspx """ arrtype = HANDLE * len(handles) handle_array = arrtype(*handles) ret = windll.kernel32.WaitForMultipleObjects( len(handle_array), handle_array, BOOL(False), DWORD(timeout)) if ret == WAIT_TIMEOUT: return None else: h = handle_array[ret] return h
Waits for multiple handles. (Similar to 'select') Returns the handle which is ready. Returns `None` on timeout. http://msdn.microsoft.com/en-us/library/windows/desktop/ms687025(v=vs.85).aspx
def add_row(self, row): """Add a row to the table Arguments: row - row of data, should be a list with as many elements as the table has fields""" if self._field_names and len(row) != len(self._field_names): raise Exception("Row has incorrect number of values, (actual) %d!=%d (expected)" %(len(row),len(self._field_names))) self._rows.append(list(row))
Add a row to the table Arguments: row - row of data, should be a list with as many elements as the table has fields
def callback(self, callback, *args, **kwds): """ Registers an arbitrary callback and arguments. Cannot suppress exceptions. """ return self << _CloseDummy(callback, args, kwds)
Registers an arbitrary callback and arguments. Cannot suppress exceptions.
def _fix_syscall_ip(state): """ Resolve syscall information from the state, get the IP address of the syscall SimProcedure, and set the IP of the state accordingly. Don't do anything if the resolution fails. :param SimState state: the program state. :return: None """ try: bypass = o.BYPASS_UNSUPPORTED_SYSCALL in state.options stub = state.project.simos.syscall(state, allow_unsupported=bypass) if stub: # can be None if simos is not a subclass of SimUserspace state.ip = stub.addr # fix the IP except AngrUnsupportedSyscallError: pass
Resolve syscall information from the state, get the IP address of the syscall SimProcedure, and set the IP of the state accordingly. Don't do anything if the resolution fails. :param SimState state: the program state. :return: None
def enforce_versioning(force=False): """Install versioning on the db.""" connect_str, repo_url = get_version_data() LOG.warning("Your database uses an unversioned benchbuild schema.") if not force and not ui.ask( "Should I enforce version control on your schema?"): LOG.error("User declined schema versioning.") return None repo_version = migrate.version(repo_url, url=connect_str) migrate.version_control(connect_str, repo_url, version=repo_version) return repo_version
Install versioning on the db.
def configureLastWill(self, topic, payload, QoS): """ **Description** Used to configure the last will topic, payload and QoS of the client. Should be called before connect. This is a public facing API inherited by application level public clients. **Syntax** .. code:: python myShadowClient.configureLastWill("last/Will/Topic", "lastWillPayload", 0) myJobsClient.configureLastWill("last/Will/Topic", "lastWillPayload", 0) **Parameters** *topic* - Topic name that last will publishes to. *payload* - Payload to publish for last will. *QoS* - Quality of Service. Could be 0 or 1. **Returns** None """ # AWSIoTMQTTClient.configureLastWill(srcTopic, srcPayload, srcQos) self._AWSIoTMQTTClient.configureLastWill(topic, payload, QoS)
**Description** Used to configure the last will topic, payload and QoS of the client. Should be called before connect. This is a public facing API inherited by application level public clients. **Syntax** .. code:: python myShadowClient.configureLastWill("last/Will/Topic", "lastWillPayload", 0) myJobsClient.configureLastWill("last/Will/Topic", "lastWillPayload", 0) **Parameters** *topic* - Topic name that last will publishes to. *payload* - Payload to publish for last will. *QoS* - Quality of Service. Could be 0 or 1. **Returns** None
def validate_member_id_params_for_group_type(group_type, params, member_group_ids, member_entity_ids): """Determine whether member ID parameters can be sent with a group create / update request. These parameters are only allowed for the internal group type. If they're set for an external group type, Vault returns a "error" response. :param group_type: Type of the group, internal or external :type group_type: str | unicode :param params: Params dict to conditionally add the member entity/group ID's to. :type params: dict :param member_group_ids: Group IDs to be assigned as group members. :type member_group_ids: str | unicode :param member_entity_ids: Entity IDs to be assigned as group members. :type member_entity_ids: str | unicode :return: Params dict with conditionally added member entity/group ID's. :rtype: dict """ if group_type == 'external': if member_entity_ids is not None: logger.warning("InvalidRequest: member entities can't be set manually for external groupsl ignoring member_entity_ids argument.") else: params['member_entity_ids'] = member_entity_ids if group_type == 'external': if member_group_ids is not None: logger.warning("InvalidRequest: member groups can't be set for external groups; ignoring member_group_ids argument.") else: params['member_group_ids'] = member_group_ids return params
Determine whether member ID parameters can be sent with a group create / update request. These parameters are only allowed for the internal group type. If they're set for an external group type, Vault returns a "error" response. :param group_type: Type of the group, internal or external :type group_type: str | unicode :param params: Params dict to conditionally add the member entity/group ID's to. :type params: dict :param member_group_ids: Group IDs to be assigned as group members. :type member_group_ids: str | unicode :param member_entity_ids: Entity IDs to be assigned as group members. :type member_entity_ids: str | unicode :return: Params dict with conditionally added member entity/group ID's. :rtype: dict
def copy_logstore(from_client, from_project, from_logstore, to_logstore, to_project=None, to_client=None): """ copy logstore, index, logtail config to target logstore, machine group are not included yet. the target logstore will be crated if not existing :type from_client: LogClient :param from_client: logclient instance :type from_project: string :param from_project: project name :type from_logstore: string :param from_logstore: logstore name :type to_logstore: string :param to_logstore: target logstore name :type to_project: string :param to_project: project name, copy to same project if not being specified, will try to create it if not being specified :type to_client: LogClient :param to_client: logclient instance, use it to operate on the "to_project" if being specified :return: """ # check client if to_project is not None: # copy to a different project in different client to_client = to_client or from_client # check if target project exists or not ret = from_client.get_project(from_project) try: ret = to_client.create_project(to_project, ret.get_description()) except LogException as ex: if ex.get_error_code() == 'ProjectAlreadyExist': # don't create the project as it already exists pass else: raise to_project = to_project or from_project to_client = to_client or from_client # return if logstore are the same one if from_client is to_client and from_project == to_project and from_logstore == to_logstore: return # copy logstore ret = from_client.get_logstore(from_project, from_logstore) res_shard = from_client.list_shards(from_project, from_logstore) expected_rwshard_count = len([shard for shard in res_shard.shards if shard['status'].lower() == 'readwrite']) try: ret = to_client.create_logstore(to_project, to_logstore, ttl=ret.get_ttl(), shard_count=min(expected_rwshard_count, MAX_INIT_SHARD_COUNT), enable_tracking=ret.get_enable_tracking(), append_meta=ret.append_meta, auto_split=ret.auto_split, max_split_shard=ret.max_split_shard, preserve_storage=ret.preserve_storage) except LogException as ex: if ex.get_error_code() == 'LogStoreAlreadyExist': # update logstore's settings ret = to_client.update_logstore(to_project, to_logstore, ttl=ret.get_ttl(), enable_tracking=ret.get_enable_tracking(), append_meta=ret.append_meta, auto_split=ret.auto_split, max_split_shard=ret.max_split_shard, preserve_storage=ret.preserve_storage ) # arrange shard to expected count res = arrange_shard(to_client, to_project, to_logstore, min(expected_rwshard_count, MAX_INIT_SHARD_COUNT)) else: raise # copy index try: ret = from_client.get_index_config(from_project, from_logstore) ret = to_client.create_index(to_project, to_logstore, ret.get_index_config()) except LogException as ex: if ex.get_error_code() == 'IndexConfigNotExist': # source has no index pass elif ex.get_error_code() == 'IndexAlreadyExist': # target already has index, overwrite it ret = to_client.update_index(to_project, to_logstore, ret.get_index_config()) pass else: raise # list logtail config linked to the logstore and copy them default_fetch_size = 100 offset, size = 0, default_fetch_size while True: ret = from_client.list_logtail_config(from_project, offset=offset, size=size) count = ret.get_configs_count() total = ret.get_configs_total() for config_name in ret.get_configs(): ret = from_client.get_logtail_config(from_project, config_name) config = ret.logtail_config if config.logstore_name != from_logstore: continue config.config_name = to_logstore + '_' + config_name config.logstore_name = to_logstore ret = to_client.create_logtail_config(to_project, config) offset += count if count < size or offset >= total: break
copy logstore, index, logtail config to target logstore, machine group are not included yet. the target logstore will be crated if not existing :type from_client: LogClient :param from_client: logclient instance :type from_project: string :param from_project: project name :type from_logstore: string :param from_logstore: logstore name :type to_logstore: string :param to_logstore: target logstore name :type to_project: string :param to_project: project name, copy to same project if not being specified, will try to create it if not being specified :type to_client: LogClient :param to_client: logclient instance, use it to operate on the "to_project" if being specified :return:
def point_translate(point_in, vector_in): """ Translates the input points using the input vector. :param point_in: input point :type point_in: list, tuple :param vector_in: input vector :type vector_in: list, tuple :return: translated point :rtype: list """ try: if point_in is None or len(point_in) == 0 or vector_in is None or len(vector_in) == 0: raise ValueError("Input arguments cannot be empty") except TypeError as e: print("An error occurred: {}".format(e.args[-1])) raise TypeError("Input must be a list or tuple") except Exception: raise # Translate the point using the input vector point_out = [coord + comp for coord, comp in zip(point_in, vector_in)] return point_out
Translates the input points using the input vector. :param point_in: input point :type point_in: list, tuple :param vector_in: input vector :type vector_in: list, tuple :return: translated point :rtype: list
def create_index(self, index): """Creates and opens index folder for given index. If the index already exists, it just opens it, otherwise it creates it first. """ index._path = os.path.join(self.indexes_path, index._name) if whoosh.index.exists_in(index._path): _whoosh = whoosh.index.open_dir(index._path) elif not os.path.exists(index._path): os.makedirs(index._path) _whoosh = whoosh.index.create_in(index._path, index._schema) index._whoosh = _whoosh
Creates and opens index folder for given index. If the index already exists, it just opens it, otherwise it creates it first.
def relabel(self, qubits: Qubits) -> 'State': """Return a copy of this state with new qubits""" return State(self.vec.tensor, qubits, self._memory)
Return a copy of this state with new qubits
def get_variant_genotypes(self, variant): """Get the genotypes from a well formed variant instance. Args: marker (Variant): A Variant instance. Returns: A list of Genotypes instance containing a pointer to the variant as well as a vector of encoded genotypes. """ if not self.has_index: raise NotImplementedError("Not implemented when IMPUTE2 file is " "not indexed (see genipe)") # Find the variant in the index try: impute2_chrom = CHROM_STR_TO_INT[variant.chrom.name] except KeyError: raise ValueError( "Invalid chromosome ('{}') for IMPUTE2.".format(variant.chrom) ) variant_info = self._impute2_index[ (self._impute2_index.chrom == impute2_chrom) & (self._impute2_index.pos == variant.pos) ] if variant_info.shape[0] == 0: logging.variant_not_found(variant) return [] elif variant_info.shape[0] == 1: return self._get_biallelic_variant(variant, variant_info) else: return self._get_multialleic_variant(variant, variant_info)
Get the genotypes from a well formed variant instance. Args: marker (Variant): A Variant instance. Returns: A list of Genotypes instance containing a pointer to the variant as well as a vector of encoded genotypes.
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False): """Residual unit with 2 sub layers.""" if activate_before_residual: with tf.variable_scope('shared_activation'): x = self._layer_norm('init_bn', x) x = self._relu(x, self.hps.relu_leakiness) orig_x = x else: with tf.variable_scope('residual_only_activation'): orig_x = x x = self._layer_norm('init_bn', x) x = self._relu(x, self.hps.relu_leakiness) with tf.variable_scope('sub1'): x = self._conv('conv1', x, 3, in_filter, out_filter, stride) with tf.variable_scope('sub2'): x = self._layer_norm('bn2', x) x = self._relu(x, self.hps.relu_leakiness) x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1]) with tf.variable_scope('sub_add'): if in_filter != out_filter: orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID') orig_x = tf.pad( orig_x, [[0, 0], [0, 0], [0, 0], [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]]) x += orig_x return x
Residual unit with 2 sub layers.
def get_file(fn): """Returns file contents in unicode as list.""" fn = os.path.join(os.path.dirname(__file__), 'data', fn) f = open(fn, 'rb') lines = [line.decode('utf-8').strip() for line in f.readlines()] return lines
Returns file contents in unicode as list.
def catalogAdd(type, orig, replace): """Add an entry in the catalog, it may overwrite existing but different entries. If called before any other catalog routine, allows to override the default shared catalog put in place by xmlInitializeCatalog(); """ ret = libxml2mod.xmlCatalogAdd(type, orig, replace) return ret
Add an entry in the catalog, it may overwrite existing but different entries. If called before any other catalog routine, allows to override the default shared catalog put in place by xmlInitializeCatalog();
def radial_density(im, bins=10, voxel_size=1): r""" Computes radial density function by analyzing the histogram of voxel values in the distance transform. This function is defined by Torquato [1] as: .. math:: \int_0^\infty P(r)dr = 1.0 where *P(r)dr* is the probability of finding a voxel at a lying at a radial distance between *r* and *dr* from the solid interface. This is equivalent to a probability density function (*pdf*) The cumulative distribution is defined as: .. math:: F(r) = \int_r^\infty P(r)dr which gives the fraction of pore-space with a radius larger than *r*. This is equivalent to the cumulative distribution function (*cdf*). Parameters ---------- im : ND-array Either a binary image of the pore space with ``True`` indicating the pore phase (or phase of interest), or a pre-calculated distance transform which can save time. bins : int or array_like This number of bins (if int) or the location of the bins (if array). This argument is passed directly to Scipy's ``histogram`` function so see that docstring for more information. The default is 10 bins, which reduces produces a relatively smooth distribution. voxel_size : scalar The size of a voxel side in preferred units. The default is 1, so the user can apply the scaling to the returned results after the fact. Returns ------- result : named_tuple A named-tuple containing several 1D arrays: *R* - radius, equivalent to ``bin_centers`` *pdf* - probability density function *cdf* - cumulative density function *bin_centers* - the center point of each bin *bin_edges* - locations of bin divisions, including 1 more value than the number of bins *bin_widths* - useful for passing to the ``width`` argument of ``matplotlib.pyplot.bar`` Notes ----- This function should not be taken as a pore size distribution in the explict sense, but rather an indicator of the sizes in the image. The distance transform contains a very skewed number of voxels with small values near the solid walls. Nonetheless, it does provide a useful indicator and it's mathematical formalism is handy. Torquato refers to this as the *pore-size density function*, and mentions that it is also known as the *pore-size distribution function*. These terms are avoided here since they have specific connotations in porous media analysis. References ---------- [1] Torquato, S. Random Heterogeneous Materials: Mircostructure and Macroscopic Properties. Springer, New York (2002) - See page 48 & 292 """ if im.dtype == bool: im = spim.distance_transform_edt(im) mask = find_dt_artifacts(im) == 0 im[mask] = 0 x = im[im > 0].flatten() h = sp.histogram(x, bins=bins, density=True) h = _parse_histogram(h=h, voxel_size=voxel_size) rdf = namedtuple('radial_density_function', ('R', 'pdf', 'cdf', 'bin_centers', 'bin_edges', 'bin_widths')) return rdf(h.bin_centers, h.pdf, h.cdf, h.bin_centers, h.bin_edges, h.bin_widths)
r""" Computes radial density function by analyzing the histogram of voxel values in the distance transform. This function is defined by Torquato [1] as: .. math:: \int_0^\infty P(r)dr = 1.0 where *P(r)dr* is the probability of finding a voxel at a lying at a radial distance between *r* and *dr* from the solid interface. This is equivalent to a probability density function (*pdf*) The cumulative distribution is defined as: .. math:: F(r) = \int_r^\infty P(r)dr which gives the fraction of pore-space with a radius larger than *r*. This is equivalent to the cumulative distribution function (*cdf*). Parameters ---------- im : ND-array Either a binary image of the pore space with ``True`` indicating the pore phase (or phase of interest), or a pre-calculated distance transform which can save time. bins : int or array_like This number of bins (if int) or the location of the bins (if array). This argument is passed directly to Scipy's ``histogram`` function so see that docstring for more information. The default is 10 bins, which reduces produces a relatively smooth distribution. voxel_size : scalar The size of a voxel side in preferred units. The default is 1, so the user can apply the scaling to the returned results after the fact. Returns ------- result : named_tuple A named-tuple containing several 1D arrays: *R* - radius, equivalent to ``bin_centers`` *pdf* - probability density function *cdf* - cumulative density function *bin_centers* - the center point of each bin *bin_edges* - locations of bin divisions, including 1 more value than the number of bins *bin_widths* - useful for passing to the ``width`` argument of ``matplotlib.pyplot.bar`` Notes ----- This function should not be taken as a pore size distribution in the explict sense, but rather an indicator of the sizes in the image. The distance transform contains a very skewed number of voxels with small values near the solid walls. Nonetheless, it does provide a useful indicator and it's mathematical formalism is handy. Torquato refers to this as the *pore-size density function*, and mentions that it is also known as the *pore-size distribution function*. These terms are avoided here since they have specific connotations in porous media analysis. References ---------- [1] Torquato, S. Random Heterogeneous Materials: Mircostructure and Macroscopic Properties. Springer, New York (2002) - See page 48 & 292
def start(self): """If we have a set of plugins that provide our expected listeners and messengers, tell our dispatcher to start up. Otherwise, raise InvalidApplication """ if not self.valid: err = ("\nMessengers and listeners that still need set:\n\n" "messengers : %s\n\n" "listeners : %s\n") raise InvalidApplication(err % (self.needed_messengers, self.needed_listeners)) self.dispatcher.start()
If we have a set of plugins that provide our expected listeners and messengers, tell our dispatcher to start up. Otherwise, raise InvalidApplication
def to_output(self, value): """Convert value to process output format.""" return json.loads(resolwe_runtime_utils.save_file(self.name, value.path, *value.refs))
Convert value to process output format.
def save(self, path, name, save_meta=True): '''Saves model as a sequence of files in the format: {path}/{name}_{'dec', 'disc', 'dec_opt', 'disc_opt', 'meta'}.h5 Parameters ---------- path : str The directory of the file you wish to save the model to. name : str The name prefix of the model and optimizer files you wish to save. save_meta [optional] : bool Flag that controls whether to save the class metadata along with the generator, discriminator, and respective optimizer states. ''' _save_model(self.dec, str(path), "%s_dec" % str(name)) _save_model(self.disc, str(path), "%s_disc" % str(name)) _save_model(self.dec_opt, str(path), "%s_dec_opt" % str(name)) _save_model(self.disc_opt, str(path), "%s_disc_opt" % str(name)) if save_meta: self._save_meta(os.path.join(path, "%s_meta" % str(name)))
Saves model as a sequence of files in the format: {path}/{name}_{'dec', 'disc', 'dec_opt', 'disc_opt', 'meta'}.h5 Parameters ---------- path : str The directory of the file you wish to save the model to. name : str The name prefix of the model and optimizer files you wish to save. save_meta [optional] : bool Flag that controls whether to save the class metadata along with the generator, discriminator, and respective optimizer states.
def by_coordinates(self, lat, lng, radius=25.0, zipcode_type=ZipcodeType.Standard, sort_by=SORT_BY_DIST, ascending=True, returns=DEFAULT_LIMIT): """ Search zipcode information near a coordinates on a map. Returns multiple results. :param lat: center latitude. :param lng: center longitude. :param radius: only returns zipcode within X miles from ``lat``, ``lng``. **中文文档** 1. 计算出在中心坐标处, 每一经度和纬度分别代表多少miles. 2. 以给定坐标为中心, 画出一个矩形, 长宽分别为半径的2倍多一点, 找到该 矩形内所有的Zipcode. 3. 对这些Zipcode计算出他们的距离, 然后按照距离远近排序。距离超过我们 限定的半径的直接丢弃. """ return self.query( lat=lat, lng=lng, radius=radius, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
Search zipcode information near a coordinates on a map. Returns multiple results. :param lat: center latitude. :param lng: center longitude. :param radius: only returns zipcode within X miles from ``lat``, ``lng``. **中文文档** 1. 计算出在中心坐标处, 每一经度和纬度分别代表多少miles. 2. 以给定坐标为中心, 画出一个矩形, 长宽分别为半径的2倍多一点, 找到该 矩形内所有的Zipcode. 3. 对这些Zipcode计算出他们的距离, 然后按照距离远近排序。距离超过我们 限定的半径的直接丢弃.
def render_next_step(self, form, **kwargs): """ When using the NamedUrlFormWizard, we have to redirect to update the browser's URL to match the shown step. """ next_step = self.get_next_step() self.storage.current_step = next_step return redirect(self.url_name, step=next_step)
When using the NamedUrlFormWizard, we have to redirect to update the browser's URL to match the shown step.
def cli_execute(self, cmd): """ sends the command to the CLI to be executed """ try: args = parse_quotes(cmd) if args and args[0] == 'feedback': self.config.set_feedback('yes') self.user_feedback = False azure_folder = get_config_dir() if not os.path.exists(azure_folder): os.makedirs(azure_folder) ACCOUNT.load(os.path.join(azure_folder, 'azureProfile.json')) CONFIG.load(os.path.join(azure_folder, 'az.json')) SESSION.load(os.path.join(azure_folder, 'az.sess'), max_age=3600) invocation = self.cli_ctx.invocation_cls(cli_ctx=self.cli_ctx, parser_cls=self.cli_ctx.parser_cls, commands_loader_cls=self.cli_ctx.commands_loader_cls, help_cls=self.cli_ctx.help_cls) if '--progress' in args: args.remove('--progress') execute_args = [args] thread = Thread(target=invocation.execute, args=execute_args) thread.daemon = True thread.start() self.threads.append(thread) self.curr_thread = thread progress_args = [self] thread = Thread(target=progress_view, args=progress_args) thread.daemon = True thread.start() self.threads.append(thread) result = None else: result = invocation.execute(args) self.last_exit = 0 if result and result.result is not None: if self.output: self.output.write(result) self.output.flush() else: formatter = self.cli_ctx.output.get_formatter(self.cli_ctx.invocation.data['output']) self.cli_ctx.output.out(result, formatter=formatter, out_file=sys.stdout) self.last = result except Exception as ex: # pylint: disable=broad-except self.last_exit = handle_exception(ex) except SystemExit as ex: self.last_exit = int(ex.code)
sends the command to the CLI to be executed
def append_docstring_attributes(docstring, locals): """Manually appends class' ``docstring`` with its attribute docstrings. For example:: class Entity(object): # ... __doc__ = append_docstring_attributes( __doc__, dict((k, v) for k, v in locals() if isinstance(v, MyDescriptor)) ) :param docstring: class docstring to be appended :type docstring: :class:`str` :param locals: attributes dict :type locals: :class:`~typing.Mapping`\ [:class:`str`, :class:`object`] :returns: appended docstring :rtype: :class:`str` """ docstring = docstring or '' for attr, val in locals.items(): doc = val.__doc__ if not doc: continue doc = get_minimum_indent(doc) + doc lines = [' ' + l for l in textwrap.dedent(doc).splitlines()] docstring = append_docstring( docstring, '', '.. attribute:: ' + attr, '', *lines ) return docstring
Manually appends class' ``docstring`` with its attribute docstrings. For example:: class Entity(object): # ... __doc__ = append_docstring_attributes( __doc__, dict((k, v) for k, v in locals() if isinstance(v, MyDescriptor)) ) :param docstring: class docstring to be appended :type docstring: :class:`str` :param locals: attributes dict :type locals: :class:`~typing.Mapping`\ [:class:`str`, :class:`object`] :returns: appended docstring :rtype: :class:`str`
def delete(self): """ Destroys a previously constructed :class:`ITotalizer` object. Internal variables ``self.cnf`` and ``self.rhs`` get cleaned. """ if self.tobj: if not self._merged: pycard.itot_del(self.tobj) # otherwise, this totalizer object is merged into a larger one # therefore, this memory should be freed in its destructor self.tobj = None self.lits = [] self.ubound = 0 self.top_id = 0 self.cnf = CNF() self.rhs = [] self.nof_new = 0
Destroys a previously constructed :class:`ITotalizer` object. Internal variables ``self.cnf`` and ``self.rhs`` get cleaned.
def get_index_text(self, modname, name_cls): """Return text for index entry based on object type.""" if self.objtype.endswith('function'): if not modname: return _('%s() (built-in %s)') % \ (name_cls[0], self.chpl_type_name) return _('%s() (in module %s)') % (name_cls[0], modname) elif self.objtype in ('data', 'type', 'enum'): if not modname: type_name = self.objtype if type_name == 'data': type_name = 'variable' return _('%s (built-in %s)') % (name_cls[0], type_name) return _('%s (in module %s)') % (name_cls[0], modname) else: return ''
Return text for index entry based on object type.
def tranz(parser, token, is_transchoice=False): """ Templatetagish wrapper for Translator.trans() :param parser: :param token: :param is_transchoice: :return: """ tokens = token.split_contents() id = tokens[1] number = domain = locale = None parameters = {} if len(tokens) > 2: skip_idx = None for idx, token in enumerate(tokens[2:], start=2): if idx == skip_idx: skip_idx = None continue if "=" in token: k, v = token[0:token.index('=')], token[token.index('=') + 1:] parameters[k] = v elif token == "number": number = tokens[idx + 1] skip_idx = idx + 1 elif token == "from": domain = tokens[idx + 1] skip_idx = idx + 1 elif token == "into": locale = tokens[idx + 1] skip_idx = idx + 1 else: raise TemplateSyntaxError( "Unexpected token {0} in tag tranz".format(token)) if is_transchoice and number is None: raise TemplateSyntaxError( "number parameter expected in tag {tag_name}") return TranzNode( id, parameters, domain, locale, number, is_transchoice=is_transchoice)
Templatetagish wrapper for Translator.trans() :param parser: :param token: :param is_transchoice: :return:
def build_output_map(protomap, get_tensor_by_name): """Builds a map of tensors from `protomap` using `get_tensor_by_name`. Args: protomap: A proto map<string,TensorInfo>. get_tensor_by_name: A lambda that receives a tensor name and returns a Tensor instance. Returns: A map from string to Tensor or SparseTensor instances built from `protomap` and resolving tensors using `get_tensor_by_name()`. Raises: ValueError: if a TensorInfo proto is malformed. """ def get_output_from_tensor_info(tensor_info): encoding = tensor_info.WhichOneof("encoding") if encoding == "name": return get_tensor_by_name(tensor_info.name) elif encoding == "coo_sparse": return tf.SparseTensor( get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name), get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name), get_tensor_by_name(tensor_info.coo_sparse.dense_shape_tensor_name)) else: raise ValueError("Invalid TensorInfo.encoding: %s" % encoding) return { key: get_output_from_tensor_info(tensor_info) for key, tensor_info in protomap.items() }
Builds a map of tensors from `protomap` using `get_tensor_by_name`. Args: protomap: A proto map<string,TensorInfo>. get_tensor_by_name: A lambda that receives a tensor name and returns a Tensor instance. Returns: A map from string to Tensor or SparseTensor instances built from `protomap` and resolving tensors using `get_tensor_by_name()`. Raises: ValueError: if a TensorInfo proto is malformed.
def combine_intersections( intersections, nodes1, degree1, nodes2, degree2, all_types ): r"""Combine curve-curve intersections into curved polygon(s). .. note:: This is a helper used only by :meth:`.Surface.intersect`. Does so assuming each intersection lies on an edge of one of two :class:`.Surface`-s. .. note :: This assumes that each ``intersection`` has been classified via :func:`classify_intersection` and only the intersections classified as ``FIRST`` and ``SECOND`` were kept. Args: intersections (List[.Intersection]): Intersections from each of the 9 edge-edge pairs from a surface-surface pairing. nodes1 (numpy.ndarray): The nodes defining the first surface in the intersection (assumed in :math:\mathbf{R}^2`). degree1 (int): The degree of the surface given by ``nodes1``. nodes2 (numpy.ndarray): The nodes defining the second surface in the intersection (assumed in :math:\mathbf{R}^2`). degree2 (int): The degree of the surface given by ``nodes2``. all_types (Set[.IntersectionClassification]): The set of all intersection classifications encountered among the intersections for the given surface-surface pair. Returns: Tuple[Optional[list], Optional[bool]]: Pair (2-tuple) of * List of "edge info" lists. Each list represents a curved polygon and contains 3-tuples of edge index, start and end (see the output of :func:`ends_to_curve`). * "Contained" boolean. If not :data:`None`, indicates that one of the surfaces is contained in the other. """ if intersections: return basic_interior_combine(intersections) elif all_types: return tangent_only_intersections(all_types) else: return no_intersections(nodes1, degree1, nodes2, degree2)
r"""Combine curve-curve intersections into curved polygon(s). .. note:: This is a helper used only by :meth:`.Surface.intersect`. Does so assuming each intersection lies on an edge of one of two :class:`.Surface`-s. .. note :: This assumes that each ``intersection`` has been classified via :func:`classify_intersection` and only the intersections classified as ``FIRST`` and ``SECOND`` were kept. Args: intersections (List[.Intersection]): Intersections from each of the 9 edge-edge pairs from a surface-surface pairing. nodes1 (numpy.ndarray): The nodes defining the first surface in the intersection (assumed in :math:\mathbf{R}^2`). degree1 (int): The degree of the surface given by ``nodes1``. nodes2 (numpy.ndarray): The nodes defining the second surface in the intersection (assumed in :math:\mathbf{R}^2`). degree2 (int): The degree of the surface given by ``nodes2``. all_types (Set[.IntersectionClassification]): The set of all intersection classifications encountered among the intersections for the given surface-surface pair. Returns: Tuple[Optional[list], Optional[bool]]: Pair (2-tuple) of * List of "edge info" lists. Each list represents a curved polygon and contains 3-tuples of edge index, start and end (see the output of :func:`ends_to_curve`). * "Contained" boolean. If not :data:`None`, indicates that one of the surfaces is contained in the other.
def generate_key(self, force=False): """ Creates a key file for this TaxPayer Creates a key file for this TaxPayer if it does not have one, and immediately saves it. Returns True if and only if a key was created. """ if self.key and not force: logger.warning( 'Tried to generate key for a taxpayer that already had one' ) return False with NamedTemporaryFile(suffix='.key') as file_: crypto.create_key(file_) self.key = File(file_, name='{}.key'.format(uuid.uuid4().hex)) self.save() return True
Creates a key file for this TaxPayer Creates a key file for this TaxPayer if it does not have one, and immediately saves it. Returns True if and only if a key was created.
def AREA(a, b): """area: Sort pack by area""" return cmp(b[0] * b[1], a[0] * a[1]) or cmp(b[1], a[1]) or cmp(b[0], a[0])
area: Sort pack by area
def _destroy(self): """Destruction code to decrement counters""" self.unuse_region() if self._rlist is not None: # Actual client count, which doesn't include the reference kept by the manager, nor ours # as we are about to be deleted try: if len(self._rlist) == 0: # Free all resources associated with the mapped file self._manager._fdict.pop(self._rlist.path_or_fd()) # END remove regions list from manager except (TypeError, KeyError): # sometimes, during shutdown, getrefcount is None. Its possible # to re-import it, however, its probably better to just ignore # this python problem (for now). # The next step is to get rid of the error prone getrefcount alltogether. pass
Destruction code to decrement counters
def recreate_grams(self): """Re-create grams for database. In normal situations, you never need to call this method. But after migrate DB, this method is useful. :param session: DB session :type session: :class:`sqlalchemt.orm.Session` """ session = self.Session() for document in session.query(Document).all(): logger.info(document.text) grams = self._get_grams(session, document.text, make=True) document.grams = list(grams) broken_links = session.query(Gram) \ .filter(~Gram.documents.any()).all() for gram in broken_links: session.delete(gram) session.commit()
Re-create grams for database. In normal situations, you never need to call this method. But after migrate DB, this method is useful. :param session: DB session :type session: :class:`sqlalchemt.orm.Session`
def create_multiple_replace_func(*args, **kwds): """ You can call this function and pass it a dictionary, or any other combination of arguments you could pass to built-in dict in order to construct a dictionary. The function will return a xlat closure that takes as its only argument text the string on which the substitutions are desired and returns a copy of text with all the substitutions performed. Source: Python Cookbook 2nd ed, Chapter 1.18. Replacing Multiple Patterns in a Single Pass. https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html """ adict = dict(*args, **kwds) rx = re.compile('|'.join(map(re.escape, adict))) def one_xlat(match): return adict[match.group(0)] def xlat(text): return rx.sub(one_xlat, text) return xlat
You can call this function and pass it a dictionary, or any other combination of arguments you could pass to built-in dict in order to construct a dictionary. The function will return a xlat closure that takes as its only argument text the string on which the substitutions are desired and returns a copy of text with all the substitutions performed. Source: Python Cookbook 2nd ed, Chapter 1.18. Replacing Multiple Patterns in a Single Pass. https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html
def init_from_wave_file(wavpath): """Init a sonic visualiser environment structure based the analysis of the main audio file. The audio file have to be encoded in wave Args: wavpath(str): the full path to the wavfile """ try: samplerate, data = SW.read(wavpath) nframes = data.shape[0] except: # scipy cannot handle 24 bit wav files # and wave cannot handle 32 bit wav files try: w = wave.open(wavpath) samplerate = w.getframerate() nframes = w.getnframes() except: raise Exception('Cannot decode wavefile ' + wavpath) return SVEnv(samplerate, nframes, wavpath)
Init a sonic visualiser environment structure based the analysis of the main audio file. The audio file have to be encoded in wave Args: wavpath(str): the full path to the wavfile
def clear_description(self): """Clears the description. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ if (self.get_description_metadata().is_read_only() or self.get_description_metadata().is_required()): raise errors.NoAccess() self._my_map['description'] = dict(self._description_default)
Clears the description. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def __cancel_timer(self): """ Cancels the timer, and calls its target method immediately """ if self.__timer is not None: self.__timer.cancel() self.__unbind_call(True) self.__timer_args = None self.__timer = None
Cancels the timer, and calls its target method immediately
def generic_div(a, b): """Simple function to divide two numbers""" logger.debug('Called generic_div({}, {})'.format(a, b)) return a / b
Simple function to divide two numbers
def connect(self, *args, **kwargs): """ Proxy to DynamoDBConnection.connect. """ self.connection = DynamoDBConnection.connect(*args, **kwargs) self._session = kwargs.get("session") if self._session is None: self._session = botocore.session.get_session()
Proxy to DynamoDBConnection.connect.
def xeval(source, optimize=True): """Compiles to native Python bytecode and runs program, returning the topmost value on the stack. Args: optimize: Whether to optimize the code after parsing it. Returns: None: If the stack is empty obj: If the stack contains a single value [obj, obj, ...]: If the stack contains many values """ native = xcompile(source, optimize=optimize) return native()
Compiles to native Python bytecode and runs program, returning the topmost value on the stack. Args: optimize: Whether to optimize the code after parsing it. Returns: None: If the stack is empty obj: If the stack contains a single value [obj, obj, ...]: If the stack contains many values
def render_import_image(self, use_auth=None): """ Configure the import_image plugin """ # import_image is a multi-phase plugin if self.user_params.imagestream_name.value is None: self.pt.remove_plugin('exit_plugins', 'import_image', 'imagestream not in user parameters') elif self.pt.has_plugin_conf('exit_plugins', 'import_image'): self.pt.set_plugin_arg('exit_plugins', 'import_image', 'imagestream', self.user_params.imagestream_name.value)
Configure the import_image plugin
def unpack_pargs(positional_args, param_kwargs, gnu=False): """Unpack multidict and positional args into a list appropriate for subprocess. :param param_kwargs: ``ParamDict`` storing '--param' style data. :param positional_args: flags :param gnu: if True, long-name args are unpacked as: --parameter=argument otherwise, they are unpacked as: --parameter argument :returns: list appropriate for sending to subprocess """ def _transform(argname): """Transform a python identifier into a shell-appropriate argument name """ if len(argname) == 1: return '-{}'.format(argname) return '--{}'.format(argname.replace('_', '-')) args = [] for item in param_kwargs.keys(): for value in param_kwargs.getlist(item): if gnu: args.append('{}={}'.format( _transform(item), value )) else: args.extend([ _transform(item), value ]) if positional_args: for item in positional_args: args.append(_transform(item)) return args
Unpack multidict and positional args into a list appropriate for subprocess. :param param_kwargs: ``ParamDict`` storing '--param' style data. :param positional_args: flags :param gnu: if True, long-name args are unpacked as: --parameter=argument otherwise, they are unpacked as: --parameter argument :returns: list appropriate for sending to subprocess
def name(self, name=None): '''api name, default is module.__name__''' if name: self._name = name return self return self._name
api name, default is module.__name__
def main(): ''' :param argv: :return: ''' log.configure(logging.DEBUG) tornado.log.enable_pretty_logging() # create the parser and parse the arguments (parser, child_parser) = args.create_parsers() (parsed_args, remaining) = parser.parse_known_args() if remaining: r = child_parser.parse_args(args=remaining, namespace=parsed_args) namespace = vars(r) if 'version' in namespace: common_config.print_build_info(zipped_pex=True) else: parser.print_help() parser.exit() # log additional information command_line_args = vars(parsed_args) Log.info("Listening at http://%s:%d%s", command_line_args['address'], command_line_args['port'], command_line_args['base_url']) Log.info("Using tracker url: %s", command_line_args['tracker_url']) # pass the options to tornado and start the ui server define_options(command_line_args['address'], command_line_args['port'], command_line_args['tracker_url'], command_line_args['base_url']) http_server = tornado.httpserver.HTTPServer(Application(command_line_args['base_url'])) http_server.listen(command_line_args['port'], address=command_line_args['address']) # pylint: disable=unused-argument # stop Tornado IO loop def signal_handler(signum, frame): # start a new line after ^C character because this looks nice print('\n', end='') Log.debug('SIGINT received. Stopping UI') tornado.ioloop.IOLoop.instance().stop() # associate SIGINT and SIGTERM with a handler signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) # start Tornado IO loop tornado.ioloop.IOLoop.instance().start()
:param argv: :return:
def as_dict(self): """ Returns a dictionary representation of the ChemicalEnvironments object :return: """ return {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "coord_geoms": jsanitize(self.coord_geoms)}
Returns a dictionary representation of the ChemicalEnvironments object :return:
def _get_param_iterator(self): """Return ParameterSampler instance for the given distributions""" return model_selection.ParameterSampler( self.param_distributions, self.n_iter, random_state=self.random_state )
Return ParameterSampler instance for the given distributions
def main(): """Create an identical user account on a pair of satellites.""" server_configs = ( {'url': url, 'auth': ('admin', 'changeme'), 'verify': False} for url in ('https://sat1.example.com', 'https://sat2.example.com') ) for server_config in server_configs: response = requests.post( server_config['url'] + '/api/v2/users', json.dumps({ 'user': { 'auth_source_id': 1, 'login': 'Alice', 'mail': 'alice@example.com', 'organization_ids': [get_organization_id( server_config, 'Default_Organization' )], 'password': 'hackme', } }), auth=server_config['auth'], headers={'content-type': 'application/json'}, verify=server_config['verify'], ) response.raise_for_status() pprint(response.json())
Create an identical user account on a pair of satellites.
def push_to_remote(self, base_branch, head_branch, commit_message=""): """ git push <origin> <branchname> """ set_state(WORKFLOW_STATES.PUSHING_TO_REMOTE) cmd = ["git", "push", self.pr_remote, f"{head_branch}:{head_branch}"] try: self.run_cmd(cmd) set_state(WORKFLOW_STATES.PUSHED_TO_REMOTE) except subprocess.CalledProcessError: click.echo(f"Failed to push to {self.pr_remote} \u2639") set_state(WORKFLOW_STATES.PUSHING_TO_REMOTE_FAILED) else: gh_auth = os.getenv("GH_AUTH") if gh_auth: set_state(WORKFLOW_STATES.PR_CREATING) self.create_gh_pr( base_branch, head_branch, commit_message=commit_message, gh_auth=gh_auth, ) else: set_state(WORKFLOW_STATES.PR_OPENING) self.open_pr(self.get_pr_url(base_branch, head_branch))
git push <origin> <branchname>
def generate_resource(config, raml_resource, parent_resource): """ Perform complete one resource configuration process This function generates: ACL, view, route, resource, database model for a given `raml_resource`. New nefertari resource is attached to `parent_resource` class which is an instance of `nefertari.resource.Resource`. Things to consider: * Top-level resources must be collection names. * No resources are explicitly created for dynamic (ending with '}') RAML resources as they are implicitly processed by parent collection resources. * Resource nesting must look like collection/id/collection/id/... * Only part of resource path after last '/' is taken into account, thus each level of resource nesting should add one more path element. E.g. /stories -> /stories/{id} and not /stories -> /stories/mystories/{id}. Latter route will be generated at /stories/{id}. :param raml_resource: Instance of ramlfications.raml.ResourceNode. :param parent_resource: Parent nefertari resource object. """ from .models import get_existing_model # Don't generate resources for dynamic routes as they are already # generated by their parent resource_uri = get_resource_uri(raml_resource) if is_dynamic_uri(resource_uri): if parent_resource.is_root: raise Exception("Top-level resources can't be dynamic and must " "represent collections instead") return route_name = get_route_name(resource_uri) log.info('Configuring resource: `{}`. Parent: `{}`'.format( route_name, parent_resource.uid or 'root')) # Get DB model. If this is an attribute or singular resource, # we don't need to get model is_singular = singular_subresource(raml_resource, route_name) is_attr_res = attr_subresource(raml_resource, route_name) if not parent_resource.is_root and (is_attr_res or is_singular): model_cls = parent_resource.view.Model else: model_name = generate_model_name(raml_resource) model_cls = get_existing_model(model_name) resource_kwargs = {} # Generate ACL log.info('Generating ACL for `{}`'.format(route_name)) resource_kwargs['factory'] = generate_acl( config, model_cls=model_cls, raml_resource=raml_resource) # Generate dynamic part name if not is_singular: resource_kwargs['id_name'] = dynamic_part_name( raml_resource=raml_resource, route_name=route_name, pk_field=model_cls.pk_field()) # Generate REST view log.info('Generating view for `{}`'.format(route_name)) view_attrs = resource_view_attrs(raml_resource, is_singular) resource_kwargs['view'] = generate_rest_view( config, model_cls=model_cls, attrs=view_attrs, attr_view=is_attr_res, singular=is_singular, ) # In case of singular resource, model still needs to be generated, # but we store it on a different view attribute if is_singular: model_name = generate_model_name(raml_resource) view_cls = resource_kwargs['view'] view_cls._parent_model = view_cls.Model view_cls.Model = get_existing_model(model_name) # Create new nefertari resource log.info('Creating new resource for `{}`'.format(route_name)) clean_uri = resource_uri.strip('/') resource_args = (singularize(clean_uri),) if not is_singular: resource_args += (clean_uri,) return parent_resource.add(*resource_args, **resource_kwargs)
Perform complete one resource configuration process This function generates: ACL, view, route, resource, database model for a given `raml_resource`. New nefertari resource is attached to `parent_resource` class which is an instance of `nefertari.resource.Resource`. Things to consider: * Top-level resources must be collection names. * No resources are explicitly created for dynamic (ending with '}') RAML resources as they are implicitly processed by parent collection resources. * Resource nesting must look like collection/id/collection/id/... * Only part of resource path after last '/' is taken into account, thus each level of resource nesting should add one more path element. E.g. /stories -> /stories/{id} and not /stories -> /stories/mystories/{id}. Latter route will be generated at /stories/{id}. :param raml_resource: Instance of ramlfications.raml.ResourceNode. :param parent_resource: Parent nefertari resource object.
def get_versioned_files(cls): """List all files versioned by git in the current directory.""" files = cls._git_ls_files() submodules = cls._list_submodules() for subdir in submodules: subdir = os.path.relpath(subdir).replace(os.path.sep, '/') files += add_prefix_to_each(subdir, cls._git_ls_files(subdir)) return add_directories(files)
List all files versioned by git in the current directory.
def _SMOTE(T, N, k, h = 1.0): """ Returns (N/100) * n_minority_samples synthetic minority samples. Parameters ---------- T : array-like, shape = [n_minority_samples, n_features] Holds the minority samples N : percetange of new synthetic samples: n_synthetic_samples = N/100 * n_minority_samples. Can be < 100. k : int. Number of nearest neighbours. Returns ------- S : Synthetic samples. array, shape = [(N/100) * n_minority_samples, n_features]. """ n_minority_samples, n_features = T.shape if N < 100: #create synthetic samples only for a subset of T. #TODO: select random minortiy samples N = 100 pass if (N % 100) != 0: raise ValueError("N must be < 100 or multiple of 100") N = N/100 n_synthetic_samples = N * n_minority_samples S = np.zeros(shape=(n_synthetic_samples, n_features)) #Learn nearest neighbours neigh = NearestNeighbors(n_neighbors = k) neigh.fit(T) #Calculate synthetic samples for i in range(n_minority_samples): nn = neigh.kneighbors(T[i], return_distance=False) for n in range(int(N)): nn_index = choice(nn[0]) #NOTE: nn includes T[i], we don't want to select it while nn_index == i: nn_index = choice(nn[0]) dif = T[nn_index] - T[i] gap = np.random.uniform(low = 0.0, high = h) S[n + i * N, :] = T[i,:] + gap * dif[:] return S
Returns (N/100) * n_minority_samples synthetic minority samples. Parameters ---------- T : array-like, shape = [n_minority_samples, n_features] Holds the minority samples N : percetange of new synthetic samples: n_synthetic_samples = N/100 * n_minority_samples. Can be < 100. k : int. Number of nearest neighbours. Returns ------- S : Synthetic samples. array, shape = [(N/100) * n_minority_samples, n_features].
def middleware(self, *args, **kwargs): """ Create a blueprint middleware from a decorated function. :param args: Positional arguments to be used while invoking the middleware :param kwargs: optional keyword args that can be used with the middleware. """ def register_middleware(_middleware): future_middleware = FutureMiddleware(_middleware, args, kwargs) self.middlewares.append(future_middleware) return _middleware # Detect which way this was called, @middleware or @middleware('AT') if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): middleware = args[0] args = [] return register_middleware(middleware) else: if kwargs.get("bp_group") and callable(args[0]): middleware = args[0] args = args[1:] kwargs.pop("bp_group") return register_middleware(middleware) else: return register_middleware
Create a blueprint middleware from a decorated function. :param args: Positional arguments to be used while invoking the middleware :param kwargs: optional keyword args that can be used with the middleware.
def is_consistent(self) -> bool: """ Returns True if number of nodes are consistent with number of leaves """ from ledger.compact_merkle_tree import CompactMerkleTree return self.nodeCount == CompactMerkleTree.get_expected_node_count( self.leafCount)
Returns True if number of nodes are consistent with number of leaves
def save(self): """ Save any outstanding settnig changes to the :class:`~plexapi.server.PlexServer`. This performs a full reload() of Settings after complete. """ params = {} for setting in self.all(): if setting._setValue: log.info('Saving PlexServer setting %s = %s' % (setting.id, setting._setValue)) params[setting.id] = quote(setting._setValue) if not params: raise BadRequest('No setting have been modified.') querystr = '&'.join(['%s=%s' % (k, v) for k, v in params.items()]) url = '%s?%s' % (self.key, querystr) self._server.query(url, self._server._session.put) self.reload()
Save any outstanding settnig changes to the :class:`~plexapi.server.PlexServer`. This performs a full reload() of Settings after complete.
def list_incidents(self, update_keys, session=None, lightweight=None): """ Returns a list of incidents for the given events. :param dict update_keys: The filter to select desired markets. All markets that match the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}] :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.Incidents] """ params = clean_locals(locals()) method = '%s%s' % (self.URI, 'listIncidents') (response, elapsed_time) = self.request(method, params, session) return self.process_response(response, resources.Incidents, elapsed_time, lightweight)
Returns a list of incidents for the given events. :param dict update_keys: The filter to select desired markets. All markets that match the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}] :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.Incidents]
def format_prettytable(table): """Converts SoftLayer.CLI.formatting.Table instance to a prettytable.""" for i, row in enumerate(table.rows): for j, item in enumerate(row): table.rows[i][j] = format_output(item) ptable = table.prettytable() ptable.hrules = prettytable.FRAME ptable.horizontal_char = '.' ptable.vertical_char = ':' ptable.junction_char = ':' return ptable
Converts SoftLayer.CLI.formatting.Table instance to a prettytable.
def start(self): """ Starts this bot in a separate thread. Therefore, this call is non-blocking. It will listen to all new comments created in the :attr:`~subreddits` list. """ super().start() comments_thread = BotThread(name='{}-comments-stream-thread'.format(self._name), target=self._listen_comments) comments_thread.start() self._threads.append(comments_thread) self.log.info('Starting comments stream ...')
Starts this bot in a separate thread. Therefore, this call is non-blocking. It will listen to all new comments created in the :attr:`~subreddits` list.
def load_rule_definitions(self, ruleset_generator = False, rule_dirs = []): """ Load definition of rules declared in the ruleset :param services: :param ip_ranges: :param aws_account_id: :param generator: :return: """ # Load rules from JSON files self.rule_definitions = {} for rule_filename in self.rules: for rule in self.rules[rule_filename]: if not rule.enabled and not ruleset_generator: continue self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename, rule_dirs = rule_dirs) # In case of the ruleset generator, list all available built-in rules if ruleset_generator: rule_dirs.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/findings')) rule_filenames = [] for rule_dir in rule_dirs: rule_filenames += [f for f in os.listdir(rule_dir) if os.path.isfile(os.path.join(rule_dir, f))] for rule_filename in rule_filenames: if rule_filename not in self.rule_definitions: self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename)
Load definition of rules declared in the ruleset :param services: :param ip_ranges: :param aws_account_id: :param generator: :return:
def get_old(dataset_label, data_id, destination_dir=None): """Get the 3D data from specified dataset with specified id. Download data if necessary. :param dataset_label: :param data_id: integer or wildcards file pattern :param destination_dir: :return: """ # TODO implement if destination_dir is None: destination_dir = op.join(dataset_path(get_root=True), "medical", "orig") destination_dir = op.expanduser(destination_dir) data_url, url, expected_hash, hash_path, relative_output_path = get_dataset_meta( dataset_label ) paths = glob.glob(os.path.join(destination_dir, hash_path)) paths.sort() import fnmatch print(paths) print(data_id) pathsf = fnmatch.filter(paths, data_id) print(pathsf) datap = io3d.read(pathsf[0], dataplus_format=True) return datap
Get the 3D data from specified dataset with specified id. Download data if necessary. :param dataset_label: :param data_id: integer or wildcards file pattern :param destination_dir: :return:
def write_to_screen(self, screen, mouse_handlers, write_position, parent_style, erase_bg, z_index): " Fill the whole area of write_position with dots. " default_char = Char(' ', 'class:background') dot = Char('.', 'class:background') ypos = write_position.ypos xpos = write_position.xpos for y in range(ypos, ypos + write_position.height): row = screen.data_buffer[y] for x in range(xpos, xpos + write_position.width): row[x] = dot if (x + y) % 3 == 0 else default_char
Fill the whole area of write_position with dots.
def run_checker(cls, ds_loc, checker_names, verbose, criteria, skip_checks=None, output_filename='-', output_format=['text']): """ Static check runner. @param ds_loc Dataset location (url or file) @param checker_names List of string names to run, should match keys of checkers dict (empty list means run all) @param verbose Verbosity of the output (0, 1, 2) @param criteria Determines failure (lenient, normal, strict) @param output_filename Path to the file for output @param skip_checks Names of checks to skip @param output_format Format of the output(s) @returns If the tests failed (based on the criteria) """ all_groups = [] cs = CheckSuite() # using OrderedDict is important here to preserve the order # of multiple datasets which may be passed in score_dict = OrderedDict() if not isinstance(ds_loc, six.string_types): locs = ds_loc # if single dataset, put in list else: locs = [ds_loc] # Make sure output format is a list if isinstance(output_format, six.string_types): output_format = [output_format] for loc in locs: # loop through each dataset and run specified checks ds = cs.load_dataset(loc) score_groups = cs.run(ds, skip_checks, *checker_names) for group in score_groups.values(): all_groups.append(group[0]) # TODO: consider wrapping in a proper context manager instead if hasattr(ds, 'close'): ds.close() if not score_groups: raise ValueError("No checks found, please check the name of the checker(s) and that they are installed") else: score_dict[loc] = score_groups # define a score limit to truncate the ouput to the strictness level # specified by the user if criteria == 'normal': limit = 2 elif criteria == 'strict': limit = 1 elif criteria == 'lenient': limit = 3 for out_fmt in output_format: if out_fmt == 'text': if output_filename == '-': cls.stdout_output(cs, score_dict, verbose, limit) # need to redirect output from stdout since print functions are # presently used to generate the standard report output else: if len(output_format) > 1: # Update file name if needed output_filename = '{}.txt'.format(os.path.splitext(output_filename)[0]) with io.open(output_filename, 'w', encoding='utf-8') as f: with stdout_redirector(f): cls.stdout_output(cs, score_dict, verbose, limit) elif out_fmt == 'html': # Update file name if needed if len(output_format) > 1 and output_filename != '-': output_filename = '{}.html'.format(os.path.splitext(output_filename)[0]) cls.html_output(cs, score_dict, output_filename, ds_loc, limit) elif out_fmt in {'json', 'json_new'}: # Update file name if needed if len(output_format) > 1 and output_filename != '-': output_filename = '{}.json'.format(os.path.splitext(output_filename)[0]) cls.json_output(cs, score_dict, output_filename, ds_loc, limit, out_fmt) else: raise TypeError('Invalid format %s' % out_fmt) errors_occurred = cls.check_errors(score_groups, verbose) return (all(cs.passtree(groups, limit) for groups in all_groups), errors_occurred)
Static check runner. @param ds_loc Dataset location (url or file) @param checker_names List of string names to run, should match keys of checkers dict (empty list means run all) @param verbose Verbosity of the output (0, 1, 2) @param criteria Determines failure (lenient, normal, strict) @param output_filename Path to the file for output @param skip_checks Names of checks to skip @param output_format Format of the output(s) @returns If the tests failed (based on the criteria)
def complex_fault_node(edges): """ :param edges: a list of lists of points :returns: a Node of kind complexFaultGeometry """ node = Node('complexFaultGeometry') node.append(edge_node('faultTopEdge', edges[0])) for edge in edges[1:-1]: node.append(edge_node('intermediateEdge', edge)) node.append(edge_node('faultBottomEdge', edges[-1])) return node
:param edges: a list of lists of points :returns: a Node of kind complexFaultGeometry
def remove_option(self, section, option): """Remove an option.""" if not section or section == DEFAULTSECT: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) option = self.optionxform(option) existed = option in sectdict if existed: del sectdict[option] return existed
Remove an option.
def to_vars_dict(self): """ Return local state which is relevant for the cluster setup process. """ return { 'azure_client_id': self.client_id, 'azure_location': self.location, 'azure_secret': self.secret, 'azure_subscription_id': self.subscription_id, 'azure_tenant_id': self.tenant_id, }
Return local state which is relevant for the cluster setup process.
def collect_pac_urls(from_os_settings=True, from_dns=True, **kwargs): """ Get all the URLs that potentially yield a PAC file. :param bool from_os_settings: Look for a PAC URL from the OS settings. If a value is found and is a URL, it comes first in the returned list. Doesn't do anything on non-Windows or non-macOS/OSX platforms. :param bool from_dns: Assemble a list of PAC URL candidates using the WPAD protocol. :return: A list of URLs that should be tried in order. :rtype: list[str] """ # Deprecated in 0.8.2 from_registry = kwargs.get('from_registry') if from_registry is not None: import warnings warnings.warn('from_registry is deprecated, use from_os_settings instead.') from_os_settings = from_registry pac_urls = [] if from_os_settings: if ON_WINDOWS: url_or_path = autoconfig_url_from_registry() elif ON_DARWIN: url_or_path = autoconfig_url_from_preferences() else: url_or_path = None if url_or_path and (url_or_path.lower().startswith('http://') or url_or_path.lower().startswith('https://')): pac_urls.append(url_or_path) if from_dns: pac_urls.extend(proxy_urls_from_dns()) return pac_urls
Get all the URLs that potentially yield a PAC file. :param bool from_os_settings: Look for a PAC URL from the OS settings. If a value is found and is a URL, it comes first in the returned list. Doesn't do anything on non-Windows or non-macOS/OSX platforms. :param bool from_dns: Assemble a list of PAC URL candidates using the WPAD protocol. :return: A list of URLs that should be tried in order. :rtype: list[str]
def pc_anova(self, covariates, num_pc=5): """ Calculate one-way ANOVA between the first num_pc prinicipal components and known covariates. The size and index of covariates determines whether u or v is used. Parameters ---------- covariates : pandas.DataFrame Dataframe of covariates whose index corresponds to the index of either u or v. num_pc : int Number of principal components to correlate with. Returns ------- anova : pandas.Panel Panel with F-values and p-values. """ from scipy.stats import f_oneway if (covariates.shape[0] == self.u.shape[0] and len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]): mat = self.u elif (covariates.shape[0] == self.v.shape[0] and len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]): mat = self.v anova = pd.Panel(items=['fvalue', 'pvalue'], major_axis=covariates.columns, minor_axis=mat.columns[0:num_pc]) for i in anova.major_axis: for j in anova.minor_axis: t = [mat[j][covariates[i] == x] for x in set(covariates[i])] f, p = f_oneway(*t) anova.ix['fvalue', i, j] = f anova.ix['pvalue', i, j] = p return anova
Calculate one-way ANOVA between the first num_pc prinicipal components and known covariates. The size and index of covariates determines whether u or v is used. Parameters ---------- covariates : pandas.DataFrame Dataframe of covariates whose index corresponds to the index of either u or v. num_pc : int Number of principal components to correlate with. Returns ------- anova : pandas.Panel Panel with F-values and p-values.
def count_mnemonic(self, mnemonic, uwis=uwis, alias=None): """ Counts the wells that have a given curve, given the mnemonic and an alias dict. """ all_mnemonics = self.get_mnemonics([mnemonic], uwis=uwis, alias=alias) return len(list(filter(None, utils.flatten_list(all_mnemonics))))
Counts the wells that have a given curve, given the mnemonic and an alias dict.
def load_exons(self, exons, genes=None, build='37'): """Create exon objects and insert them into the database Args: exons(iterable(dict)) """ genes = genes or self.ensembl_genes(build) for exon in exons: exon_obj = build_exon(exon, genes) if not exon_obj: continue res = self.exon_collection.insert_one(exon_obj)
Create exon objects and insert them into the database Args: exons(iterable(dict))
def standard_input(): """Generator that yields lines from standard input.""" with click.get_text_stream("stdin") as stdin: while stdin.readable(): line = stdin.readline() if line: yield line.strip().encode("utf-8")
Generator that yields lines from standard input.
def modify(db=None, sql=None): ''' Issue an SQL query to sqlite3 (with no return data), usually used to modify the database in some way (insert, delete, create, etc) CLI Example: .. code-block:: bash salt '*' sqlite3.modify /root/test.db 'CREATE TABLE test(id INT, testdata TEXT);' ''' cur = _connect(db) if not cur: return False cur.execute(sql) return True
Issue an SQL query to sqlite3 (with no return data), usually used to modify the database in some way (insert, delete, create, etc) CLI Example: .. code-block:: bash salt '*' sqlite3.modify /root/test.db 'CREATE TABLE test(id INT, testdata TEXT);'
def _update(self): """compute/update all derived data Can be called without harm and is idem-potent. Updates these attributes and methods: :attr:`origin` the center of the cell with index 0,0,0 :attr:`midpoints` centre coordinate of each grid cell :meth:`interpolated` spline interpolation function that can generated a value for coordinate """ self.delta = numpy.array(list( map(lambda e: (e[-1] - e[0]) / (len(e) - 1), self.edges))) self.midpoints = self._midpoints(self.edges) self.origin = numpy.array(list(map(lambda m: m[0], self.midpoints))) if self.__interpolated is not None: # only update if we are using it self.__interpolated = self._interpolationFunctionFactory()
compute/update all derived data Can be called without harm and is idem-potent. Updates these attributes and methods: :attr:`origin` the center of the cell with index 0,0,0 :attr:`midpoints` centre coordinate of each grid cell :meth:`interpolated` spline interpolation function that can generated a value for coordinate
def save_direction(self, rootpath, raw=False, as_int=False): """ Saves the direction of the slope to a file """ self.save_array(self.direction, None, 'ang', rootpath, raw, as_int=as_int)
Saves the direction of the slope to a file
def AAS(cpu): """ ASCII Adjust AL after subtraction. Adjusts the result of the subtraction of two unpacked BCD values to create a unpacked BCD result. The AL register is the implied source and destination operand for this instruction. The AAS instruction is only useful when it follows a SUB instruction that subtracts (binary subtraction) one unpacked BCD value from another and stores a byte result in the AL register. The AAA instruction then adjusts the contents of the AL register to contain the correct 1-digit unpacked BCD result. If the subtraction produced a decimal carry, the AH register is decremented by 1, and the CF and AF flags are set. If no decimal carry occurred, the CF and AF flags are cleared, and the AH register is unchanged. In either case, the AL register is left with its top nibble set to 0. The AF and CF flags are set to 1 if there is a decimal borrow; otherwise, they are cleared to 0. This instruction executes as described in compatibility mode and legacy mode. It is not valid in 64-bit mode.:: IF ((AL AND 0FH) > 9) Operators.OR(AF = 1) THEN AX = AX - 6; AH = AH - 1; AF = 1; CF = 1; ELSE CF = 0; AF = 0; FI; AL = AL AND 0FH; :param cpu: current CPU. """ if (cpu.AL & 0x0F > 9) or cpu.AF == 1: cpu.AX = cpu.AX - 6 cpu.AH = cpu.AH - 1 cpu.AF = True cpu.CF = True else: cpu.AF = False cpu.CF = False cpu.AL = cpu.AL & 0x0f
ASCII Adjust AL after subtraction. Adjusts the result of the subtraction of two unpacked BCD values to create a unpacked BCD result. The AL register is the implied source and destination operand for this instruction. The AAS instruction is only useful when it follows a SUB instruction that subtracts (binary subtraction) one unpacked BCD value from another and stores a byte result in the AL register. The AAA instruction then adjusts the contents of the AL register to contain the correct 1-digit unpacked BCD result. If the subtraction produced a decimal carry, the AH register is decremented by 1, and the CF and AF flags are set. If no decimal carry occurred, the CF and AF flags are cleared, and the AH register is unchanged. In either case, the AL register is left with its top nibble set to 0. The AF and CF flags are set to 1 if there is a decimal borrow; otherwise, they are cleared to 0. This instruction executes as described in compatibility mode and legacy mode. It is not valid in 64-bit mode.:: IF ((AL AND 0FH) > 9) Operators.OR(AF = 1) THEN AX = AX - 6; AH = AH - 1; AF = 1; CF = 1; ELSE CF = 0; AF = 0; FI; AL = AL AND 0FH; :param cpu: current CPU.
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString): """Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default="("); can also be a pyparsing expression - closer - closing character for a nested list (default=")"); can also be a pyparsing expression - content - expression for items within the nested lists (default=None) - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the ignoreExpr argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an Or or MatchFirst. The default is quotedString, but if no expressions are to be ignored, then pass None for this argument. """ if opener == closer: raise ValueError("opening and closing strings cannot be the same") if content is None: if isinstance(opener,basestring) and isinstance(closer,basestring): if ignoreExpr is not None: content = (Combine(OneOrMore(~ignoreExpr + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[0].strip())) else: raise ValueError("opening and closing arguments must be strings if no content expression is given") ret = Forward() if ignoreExpr is not None: ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) else: ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) return ret
Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default="("); can also be a pyparsing expression - closer - closing character for a nested list (default=")"); can also be a pyparsing expression - content - expression for items within the nested lists (default=None) - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the ignoreExpr argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an Or or MatchFirst. The default is quotedString, but if no expressions are to be ignored, then pass None for this argument.
def page(self, date_created_before=values.unset, date_created=values.unset, date_created_after=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of RecordingInstance records from the API. Request is executed immediately :param date date_created_before: The `YYYY-MM-DD` value of the resources to read :param date date_created: The `YYYY-MM-DD` value of the resources to read :param date date_created_after: The `YYYY-MM-DD` value of the resources to read :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of RecordingInstance :rtype: twilio.rest.api.v2010.account.call.recording.RecordingPage """ params = values.of({ 'DateCreated<': serialize.iso8601_date(date_created_before), 'DateCreated': serialize.iso8601_date(date_created), 'DateCreated>': serialize.iso8601_date(date_created_after), 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return RecordingPage(self._version, response, self._solution)
Retrieve a single page of RecordingInstance records from the API. Request is executed immediately :param date date_created_before: The `YYYY-MM-DD` value of the resources to read :param date date_created: The `YYYY-MM-DD` value of the resources to read :param date date_created_after: The `YYYY-MM-DD` value of the resources to read :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of RecordingInstance :rtype: twilio.rest.api.v2010.account.call.recording.RecordingPage
def _updateViewer(self, force=False): """Updates the viewer dialog. If dialog is not visible and force=False, does nothing. Otherwise, checks the mtime of the current purrer index.html file against self._viewer_timestamp. If it is newer, reloads it. """ if not force and not self.viewer_dialog.isVisible(): return # default text if nothing is found path = self.purrer.indexfile mtime = self.fileModTime(path) # return if file is older than our content if mtime and mtime <= (self._viewer_timestamp or 0): return busy = BusyIndicator() self.viewer_dialog.setDocument(path, empty= "<P>Nothing in the log yet. Try adding some log entries.</P>") self.viewer_dialog.reload() self.viewer_dialog.setLabel("""<P>Below is your full HTML-rendered log. Note that this is only a bare-bones viewer, so only a limited set of links will work. For a fully-functional view, use a proper HTML browser to look at the index file residing here:<BR> <tt>%s</tt></P> """ % self.purrer.indexfile) self._viewer_timestamp = mtime
Updates the viewer dialog. If dialog is not visible and force=False, does nothing. Otherwise, checks the mtime of the current purrer index.html file against self._viewer_timestamp. If it is newer, reloads it.
def render(self, data, accepted_media_type=None, renderer_context=None): """ Renders `data` into serialized XML. """ if data is None: return '' stream = StringIO() xml = SimplerXMLGenerator(stream, self.charset) xml.startDocument() xml.startElement(self.root_tag_name, {}) self._to_xml(xml, data) xml.endElement(self.root_tag_name) xml.endDocument() return stream.getvalue()
Renders `data` into serialized XML.
def get_inasafe_default_value_qsetting( qsetting, category, inasafe_field_key): """Helper method to get the inasafe default value from qsetting. :param qsetting: QSetting. :type qsetting: QSetting :param category: Category of the default value. It can be global or recent. Global means the global setting for default value. Recent means the last set custom for default value from the user. :type category: str :param inasafe_field_key: Key for the field. :type inasafe_field_key: str :returns: Value of the inasafe_default_value. :rtype: float """ key = 'inasafe/default_value/%s/%s' % (category, inasafe_field_key) default_value = qsetting.value(key) if default_value is None: if category == GLOBAL: # If empty for global setting, use default one. inasafe_field = definition(inasafe_field_key) default_value = inasafe_field.get('default_value', {}) return default_value.get('default_value', zero_default_value) return zero_default_value try: return float(default_value) except ValueError: return zero_default_value
Helper method to get the inasafe default value from qsetting. :param qsetting: QSetting. :type qsetting: QSetting :param category: Category of the default value. It can be global or recent. Global means the global setting for default value. Recent means the last set custom for default value from the user. :type category: str :param inasafe_field_key: Key for the field. :type inasafe_field_key: str :returns: Value of the inasafe_default_value. :rtype: float
def subvolume_get_default(path): ''' Get the default subvolume of the filesystem path path Mount point for the subvolume CLI Example: .. code-block:: bash salt '*' btrfs.subvolume_get_default /var/volumes/tmp ''' cmd = ['btrfs', 'subvolume', 'get-default', path] res = __salt__['cmd.run_all'](cmd) salt.utils.fsutils._verify_run(res) line = res['stdout'].strip() # The ID is the second parameter, and the name the last one, or # '(FS_TREE)' # # When the default one is set: # ID 5 (FS_TREE) # # When we manually set a different one (var): # ID 257 gen 8 top level 5 path var # id_ = line.split()[1] name = line.split()[-1] return { 'id': id_, 'name': name, }
Get the default subvolume of the filesystem path path Mount point for the subvolume CLI Example: .. code-block:: bash salt '*' btrfs.subvolume_get_default /var/volumes/tmp
def clean_proced(self, proced): """Small helper function to delete the features from the final dictionary. These features are mostly interesting for debugging but won't be relevant for most users. """ for loc in proced: try: del loc['all_countries'] except KeyError: pass try: del loc['matrix'] except KeyError: pass try: del loc['all_confidence'] except KeyError: pass try: del loc['place_confidence'] except KeyError: pass try: del loc['text'] except KeyError: pass try: del loc['label'] except KeyError: pass try: del loc['features'] except KeyError: pass return proced
Small helper function to delete the features from the final dictionary. These features are mostly interesting for debugging but won't be relevant for most users.
def assemble(self, ops, target=None): """ Assemble a series of operations and labels into bytecode, analyse its stack usage and replace the bytecode and stack size of this code object. Can also (optionally) change the target python version. Arguments: ops(list): The opcodes (and labels) to assemble into bytecode. target: The opcode specification of the targeted python version. If this is ``None`` the specification of the currently running python version will be used. Returns: CodeObject: A reference to this :class:`CodeObject`. """ self.internals = target = get_py_internals(target, self.internals) self.co_code = assemble(ops, target) self.co_stacksize = calculate_max_stack_depth(ops, target) return self
Assemble a series of operations and labels into bytecode, analyse its stack usage and replace the bytecode and stack size of this code object. Can also (optionally) change the target python version. Arguments: ops(list): The opcodes (and labels) to assemble into bytecode. target: The opcode specification of the targeted python version. If this is ``None`` the specification of the currently running python version will be used. Returns: CodeObject: A reference to this :class:`CodeObject`.
def write_dfile(self): """ Write the generated d_file to a temporary file. """ f_in = self.tempfiles.get_tempfile(prefix="bmds-", suffix=".(d)") with open(f_in, "w") as f: f.write(self.as_dfile()) return f_in
Write the generated d_file to a temporary file.
def _get_unknown_value(self): """ Finds the smallest integer value >=0 that is not in `labels` :return: Value that is not in the labels :rtype: int """ label_set = set(self.labels) value = 0 while value in label_set: value += 1 return value
Finds the smallest integer value >=0 that is not in `labels` :return: Value that is not in the labels :rtype: int
def normalize_paths(value, parent=os.curdir): """Parse a comma-separated list of paths. Return a list of absolute paths. """ if not value: return [] if isinstance(value, list): return value paths = [] for path in value.split(','): path = path.strip() if '/' in path: path = os.path.abspath(os.path.join(parent, path)) paths.append(path.rstrip('/')) return paths
Parse a comma-separated list of paths. Return a list of absolute paths.
def inv(z: int) -> int: """$= z^{-1} mod q$, for z != 0""" # Adapted from curve25519_athlon.c in djb's Curve25519. z2 = z * z % q # 2 z9 = pow2(z2, 2) * z % q # 9 z11 = z9 * z2 % q # 11 z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0 z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0 z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ... z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0 return pow2(z2_250_0, 5) * z11 % q
$= z^{-1} mod q$, for z != 0
def describe(cwd, rev='HEAD', user=None, password=None, ignore_retcode=False, output_encoding=None): ''' Returns the `git-describe(1)`_ string (or the SHA1 hash if there are no tags) for the given revision. cwd The path to the git checkout rev : HEAD The revision to describe user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-describe(1)`: http://git-scm.com/docs/git-describe CLI Examples: .. code-block:: bash salt myminion git.describe /path/to/repo salt myminion git.describe /path/to/repo develop ''' cwd = _expand_path(cwd, user) command = ['git', 'describe'] if _LooseVersion(version(versioninfo=False)) >= _LooseVersion('1.5.6'): command.append('--always') command.append(rev) return _git_run(command, cwd=cwd, user=user, password=password, ignore_retcode=ignore_retcode, output_encoding=output_encoding)['stdout']
Returns the `git-describe(1)`_ string (or the SHA1 hash if there are no tags) for the given revision. cwd The path to the git checkout rev : HEAD The revision to describe user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-describe(1)`: http://git-scm.com/docs/git-describe CLI Examples: .. code-block:: bash salt myminion git.describe /path/to/repo salt myminion git.describe /path/to/repo develop
def get_list_store(data_frame): ''' Return a `pandas.DataFrame` containing Python type information for the columns in `data_frame` and a `gtk.ListStore` matching the contents of the data frame. Args: data_frame (pandas.DataFrame) : Data frame containing data columns. Returns: (tuple) : The first element is a data frame as returned by `get_py_dtypes` and the second element is a `gtk.ListStore` matching the contents of the data frame. ''' df_py_dtypes = get_py_dtypes(data_frame) list_store = gtk.ListStore(*df_py_dtypes.dtype) for i, row_i in data_frame.iterrows(): list_store.append(row_i.tolist()) return df_py_dtypes, list_store
Return a `pandas.DataFrame` containing Python type information for the columns in `data_frame` and a `gtk.ListStore` matching the contents of the data frame. Args: data_frame (pandas.DataFrame) : Data frame containing data columns. Returns: (tuple) : The first element is a data frame as returned by `get_py_dtypes` and the second element is a `gtk.ListStore` matching the contents of the data frame.
def GenerateDateTripsDeparturesList(self, date_start, date_end): """Return a list of (date object, number of trips, number of departures). The list is generated for dates in the range [date_start, date_end). Args: date_start: The first date in the list, a date object date_end: The first date after the list, a date object Returns: a list of (date object, number of trips, number of departures) tuples """ service_id_to_trips = defaultdict(lambda: 0) service_id_to_departures = defaultdict(lambda: 0) for trip in self.GetTripList(): headway_start_times = trip.GetFrequencyStartTimes() if headway_start_times: trip_runs = len(headway_start_times) else: trip_runs = 1 service_id_to_trips[trip.service_id] += trip_runs service_id_to_departures[trip.service_id] += ( (trip.GetCountStopTimes() - 1) * trip_runs) date_services = self.GetServicePeriodsActiveEachDate(date_start, date_end) date_trips = [] for date, services in date_services: day_trips = sum(service_id_to_trips[s.service_id] for s in services) day_departures = sum( service_id_to_departures[s.service_id] for s in services) date_trips.append((date, day_trips, day_departures)) return date_trips
Return a list of (date object, number of trips, number of departures). The list is generated for dates in the range [date_start, date_end). Args: date_start: The first date in the list, a date object date_end: The first date after the list, a date object Returns: a list of (date object, number of trips, number of departures) tuples
def reset(self): '''Reset stream.''' self._text = None self._markdown = False self._channel = Incoming.DEFAULT_CHANNEL self._attachments = [] return self
Reset stream.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'name') and self.name is not None: _dict['name'] = self.name if hasattr(self, 'limit') and self.limit is not None: _dict['limit'] = self.limit return _dict
Return a json dictionary representing this model.
def _is_json_serialized_jws(self, json_jws): """ Check if we've got a JSON serialized signed JWT. :param json_jws: The message :return: True/False """ json_ser_keys = {"payload", "signatures"} flattened_json_ser_keys = {"payload", "signature"} if not json_ser_keys.issubset( json_jws.keys()) and not flattened_json_ser_keys.issubset( json_jws.keys()): return False return True
Check if we've got a JSON serialized signed JWT. :param json_jws: The message :return: True/False
def _trace_summary(self): """ Summarizes the trace of values used to update the DynamicArgs and the arguments subsequently returned. May be used to implement the summary method. """ for (i, (val, args)) in enumerate(self.trace): if args is StopIteration: info = "Terminated" else: pprint = ','.join('{' + ','.join('%s=%r' % (k,v) for (k,v) in arg.items()) + '}' for arg in args) info = ("exploring arguments [%s]" % pprint ) if i == 0: print("Step %d: Initially %s." % (i, info)) else: print("Step %d: %s after receiving input(s) %s." % (i, info.capitalize(), val))
Summarizes the trace of values used to update the DynamicArgs and the arguments subsequently returned. May be used to implement the summary method.
def add_network_profile(self, obj, params): """Add an AP profile for connecting to afterward.""" network_id = self._send_cmd_to_wpas(obj['name'], 'ADD_NETWORK', True) network_id = network_id.strip() params.process_akm() self._send_cmd_to_wpas( obj['name'], 'SET_NETWORK {} ssid \"{}\"'.format(network_id, params.ssid)) key_mgmt = '' if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]: key_mgmt = 'WPA-PSK' elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]: key_mgmt = 'WPA-EAP' else: key_mgmt = 'NONE' if key_mgmt: self._send_cmd_to_wpas( obj['name'], 'SET_NETWORK {} key_mgmt {}'.format( network_id, key_mgmt)) proto = '' if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]: proto = 'WPA' elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]: proto = 'RSN' if proto: self._send_cmd_to_wpas( obj['name'], 'SET_NETWORK {} proto {}'.format( network_id, proto)) if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]: self._send_cmd_to_wpas( obj['name'], 'SET_NETWORK {} psk \"{}\"'.format(network_id, params.key)) return params
Add an AP profile for connecting to afterward.
def reject_sender(self, link_handle, pn_condition=None): """Rejects the SenderLink, and destroys the handle.""" link = self._sender_links.get(link_handle) if not link: raise Exception("Invalid link_handle: %s" % link_handle) link.reject(pn_condition) # note: normally, link.destroy() cannot be called from a callback, # but this link was never made available to the application so this # link is only referenced by the connection link.destroy()
Rejects the SenderLink, and destroys the handle.
def boundaries(self, boundaryEdges=True, featureAngle=65, nonManifoldEdges=True): """ Return an ``Actor`` that shows the boundary lines of an input mesh. :param bool boundaryEdges: Turn on/off the extraction of boundary edges. :param float featureAngle: Specify the feature angle for extracting feature edges. :param bool nonManifoldEdges: Turn on/off the extraction of non-manifold edges. """ fe = vtk.vtkFeatureEdges() fe.SetInputData(self.polydata()) fe.SetBoundaryEdges(boundaryEdges) if featureAngle: fe.FeatureEdgesOn() fe.SetFeatureAngle(featureAngle) else: fe.FeatureEdgesOff() fe.SetNonManifoldEdges(nonManifoldEdges) fe.ColoringOff() fe.Update() return Actor(fe.GetOutput(), c="p").lw(5)
Return an ``Actor`` that shows the boundary lines of an input mesh. :param bool boundaryEdges: Turn on/off the extraction of boundary edges. :param float featureAngle: Specify the feature angle for extracting feature edges. :param bool nonManifoldEdges: Turn on/off the extraction of non-manifold edges.
def get(self, flex_sched_rule_id): """Retrieve the information for a flexscheduleRule entity.""" path = '/'.join(['flexschedulerule', flex_sched_rule_id]) return self.rachio.get(path)
Retrieve the information for a flexscheduleRule entity.
def _get_network_vswitch_map_by_port_id(self, port_id): """Get the vswitch name for the received port id.""" for network_id, vswitch in six.iteritems(self._network_vswitch_map): if port_id in vswitch['ports']: return (network_id, vswitch) # If the port was not found, just return (None, None) return (None, None)
Get the vswitch name for the received port id.
def guess_export_format(filename, data, **kwargs): ''' guess_export_format(filename, data) attempts to guess the export file format for the given filename and data (to be exported); it does this guessing by looking at the file extension and using registered sniff-tests from exporters. It will not attempt to save the file, so if the extension of the filename is missing, it is less likely that this function will deduce the file-type (though save will often succeeed at extracting the data by trying all types exhaustively). If guess_export_format cannot deduce the format, it yields None. Note that if the filename has an extension that is recognized by neuropythy but the data itself is inappropriate for that format, this function will never look beyond the extention in the filename; neither this function nor save perform that level of deduction. Keyword arguments that are passed to save should also be passed to guess_export_format. ''' # First try file endings (_,filename) = os.path.split(filename) fnm = filename.lower() fmt = None # to make sure we get the most specific ending, sort the exporters by their length es = sorted(((k,e) for (k,es) in six.iteritems(exporters) for e in es[1]), key=lambda x:-len(x[1])) for (k,e) in es: if fnm.endswith(('.' + e) if e[0] != '.' else e): return k # that didn't work; let's check the sniffers for (k,(_,_,sniff)) in six.iteritems(exporters): try: if sniff(filename, data, **kwargs): return k except Exception: pass return None
guess_export_format(filename, data) attempts to guess the export file format for the given filename and data (to be exported); it does this guessing by looking at the file extension and using registered sniff-tests from exporters. It will not attempt to save the file, so if the extension of the filename is missing, it is less likely that this function will deduce the file-type (though save will often succeeed at extracting the data by trying all types exhaustively). If guess_export_format cannot deduce the format, it yields None. Note that if the filename has an extension that is recognized by neuropythy but the data itself is inappropriate for that format, this function will never look beyond the extention in the filename; neither this function nor save perform that level of deduction. Keyword arguments that are passed to save should also be passed to guess_export_format.
def _run_lint_on_file(file_path, linter_functions, tool_options, fix_what_you_can): """Run each function in linter_functions on filename. If fix_what_you_can is specified, then the first error that has a possible replacement will be automatically fixed on this file. """ with io.open(file_path, "r+", encoding="utf-8") as found_file: file_contents = found_file.read() file_lines = file_contents.splitlines(True) try: errors = lint(file_path[len(os.getcwd()) + 1:], file_contents, linter_functions, **tool_options) except RuntimeError as err: msg = ("""RuntimeError in processing """ """{0} - {1}""".format(file_path, str(err))) errors = [("polysquarelinter/failure", LinterFailure(msg, 0, None))] if fix_what_you_can: for error_index, error in enumerate(errors): if error[1].replacement is not None: _apply_replacement(error, found_file, file_lines) errors[error_index] = (error[0], LinterFailure(error[1].description + " ... FIXED", error[1].line, error[1].replacement)) break return [FileLinterFailure(file_path, e) for e in errors]
Run each function in linter_functions on filename. If fix_what_you_can is specified, then the first error that has a possible replacement will be automatically fixed on this file.