code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def parse_section_packages__find(self, section_options): section_data = self._parse_section_to_dict( section_options, self._parse_list) valid_keys = ['where', 'include', 'exclude'] find_kwargs = dict( [(k, v) for k, v in section_data.items() if k in valid_keys and v]) where = find_kwargs.get('where') if where is not None: find_kwargs['where'] = where[0] return find_kwargs
Parses `packages.find` configuration file section. To be used in conjunction with _parse_packages(). :param dict section_options:
def fromkeys(cls, iterable, value, **kwargs): return cls(((k, value) for k in iterable), **kwargs)
Return a new pqict mapping keys from an iterable to the same value.
def authorize_security_group_egress(self, group_id, ip_protocol, from_port=None, to_port=None, src_group_id=None, cidr_ip=None): params = { 'GroupId': group_id, 'IpPermissions.1.IpProtocol': ip_protocol } if from_port is not None: params['IpPermissions.1.FromPort'] = from_port if to_port is not None: params['IpPermissions.1.ToPort'] = to_port if src_group_id is not None: params['IpPermissions.1.Groups.1.GroupId'] = src_group_id if cidr_ip is not None: params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip return self.get_status('AuthorizeSecurityGroupEgress', params, verb='POST')
The action adds one or more egress rules to a VPC security group. Specifically, this action permits instances in a security group to send traffic to one or more destination CIDR IP address ranges, or to one or more destination security groups in the same VPC.
def _ParseKeysFromFindSpecs(self, parser_mediator, win_registry, find_specs): searcher = dfwinreg_registry_searcher.WinRegistrySearcher(win_registry) for registry_key_path in iter(searcher.Find(find_specs=find_specs)): if parser_mediator.abort: break registry_key = searcher.GetKeyByPath(registry_key_path) self._ParseKey(parser_mediator, registry_key)
Parses the Registry keys from FindSpecs. Args: parser_mediator (ParserMediator): parser mediator. win_registry (dfwinreg.WinRegistryKey): root Windows Registry key. find_specs (dfwinreg.FindSpecs): Keys to search for.
def get_token(self): signed_headers = self._get_v4_signed_headers() for header in self.HEADERS: signed_headers[header] = self.HEADERS[header] resp = post_with_retry(self.cerberus_url + '/v2/auth/sts-identity', headers=signed_headers) throw_if_bad_response(resp) token = resp.json()['client_token'] iam_principal_arn = resp.json()['metadata']['aws_iam_principal_arn'] if self.verbose: print('Successfully authenticated with Cerberus as {}'.format(iam_principal_arn), file=sys.stderr) logger.info('Successfully authenticated with Cerberus as {}'.format(iam_principal_arn)) return token
Returns a client token from Cerberus
def assert_boolean_false(expr, msg_fmt="{msg}"): if expr is not False: msg = "{!r} is not False".format(expr) fail(msg_fmt.format(msg=msg, expr=expr))
Fail the test unless the expression is the constant False. >>> assert_boolean_false(False) >>> assert_boolean_false(0) Traceback (most recent call last): ... AssertionError: 0 is not False The following msg_fmt arguments are supported: * msg - the default error message * expr - tested expression
def _git_config(cwd, user, password, output_encoding=None): contextkey = 'git.config.' + cwd if contextkey not in __context__: git_dir = rev_parse(cwd, opts=['--git-dir'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if not os.path.isabs(git_dir): paths = (cwd, git_dir, 'config') else: paths = (git_dir, 'config') __context__[contextkey] = os.path.join(*paths) return __context__[contextkey]
Helper to retrieve git config options
def random_walk(self, path_length, alpha=0, rand=random.Random(), start=None): G = self if start: path = [start] else: path = [rand.choice(list(G.keys()))] while len(path) < path_length: cur = path[-1] if len(G[cur]) > 0: if rand.random() >= alpha: path.append(rand.choice(G[cur])) else: path.append(path[0]) else: break return [str(node) for node in path]
Returns a truncated random walk. path_length: Length of the random walk. alpha: probability of restarts. start: the start node of the random walk.
def _output(cls, fluents: Sequence[FluentPair]) -> Sequence[tf.Tensor]: return tuple(cls._dtype(t) for t in cls._tensors(fluents))
Returns output tensors for `fluents`.
def pdf(self, x_test): N,D = self.data.shape x_test = np.asfortranarray(x_test) x_test = x_test.reshape([-1, D]) pdfs = self._individual_pdfs(x_test) if self.fully_dimensional: pdfs = np.sum(np.prod(pdfs, axis=-1)*self.weights[None, :], axis=-1) else: pdfs = np.prod(np.sum(pdfs*self.weights[None,:,None], axis=-2), axis=-1) return(pdfs)
Computes the probability density function at all x_test
def reload(self, client=None): path = self.reload_path client = self._require_client(client) query_params = {} if self.user_project is not None: query_params["userProject"] = self.user_project self.entities.clear() found = client._connection.api_request( method="GET", path=path, query_params=query_params ) self.loaded = True for entry in found.get("items", ()): self.add_entity(self.entity_from_dict(entry))
Reload the ACL data from Cloud Storage. If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the ACL's parent.
def stats_for(self, dt): if not isinstance(dt, datetime): raise TypeError('stats_for requires a datetime object!') return self._client.get('{}/stats/'.format(dt.strftime('%Y/%m')))
Returns stats for the month containing the given datetime
def _get_dialog_title(self): title_filetype = self.filetype[0].upper() + self.filetype[1:] if self.filetype == "print": title_export = "" else: title_export = " export" return _("{filetype}{export} options").format(filetype=title_filetype, export=title_export)
Returns title string
def set_centralized_assembled_rows_cols(self, irn, jcn): if self.myid != 0: return assert irn.size == jcn.size self._refs.update(irn=irn, jcn=jcn) self.id.nz = irn.size self.id.irn = self.cast_array(irn) self.id.jcn = self.cast_array(jcn)
Set assembled matrix indices on processor 0. The row and column indices (irn & jcn) should be one based.
def create_pre_execute(task_params, parameter_map): gp_params = [_PRE_EXECUTE_INIT_TEMPLATE] for task_param in task_params: if task_param['direction'].upper() == 'OUTPUT': continue data_type = task_param['type'].upper() if 'dimensions' in task_param: data_type += 'ARRAY' if data_type in parameter_map: gp_params.append(parameter_map[data_type].pre_execute().substitute(task_param)) gp_params.append(_PRE_EXECUTE_CLEANUP_TEMPLATE) return ''.join(gp_params)
Builds the code block for the GPTool Execute method before the job is submitted based on the input task_params. :param task_params: A list of task parameters from the task info structure. :return: A string representing the code block to the GPTool Execute method.
def get_cfg(ast_func): cfg_func = cfg.Function() for ast_var in ast_func.input_variable_list: cfg_var = cfg_func.get_variable(ast_var.name) cfg_func.add_input_variable(cfg_var) for ast_var in ast_func.output_variable_list: cfg_var = cfg_func.get_variable(ast_var.name) cfg_func.add_output_variable(cfg_var) bb_start = cfg.BasicBlock() cfg_func.add_basic_block(bb_start) for stmt in ast_func.body: bb_temp = bb_start bb_temp = process_cfg(stmt, bb_temp, cfg_func) cfg_func.clean_up() cfg_func.add_summary(ast_func.summary) return cfg_func
Traverses the AST and returns the corresponding CFG :param ast_func: The AST representation of function :type ast_func: ast.Function :returns: The CFG representation of the function :rtype: cfg.Function
def find_next_word_beginning(self, count=1, WORD=False): if count < 0: return self.find_previous_word_beginning(count=-count, WORD=WORD) regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE iterator = regex.finditer(self.text_after_cursor) try: for i, match in enumerate(iterator): if i == 0 and match.start(1) == 0: count += 1 if i + 1 == count: return match.start(1) except StopIteration: pass
Return an index relative to the cursor position pointing to the start of the next word. Return `None` if nothing was found.
def _did_create_child(self, connection): response = connection.response try: connection.user_info['nurest_object'].from_dict(response.data[0]) except Exception: pass return self._did_perform_standard_operation(connection)
Callback called after adding a new child nurest_object
def walk(self, top, topdown=True, onerror=None, **kwargs): try: listing = self.list_status(top, **kwargs) except HdfsException as e: if onerror is not None: onerror(e) return dirnames, filenames = [], [] for f in listing: if f.type == 'DIRECTORY': dirnames.append(f.pathSuffix) elif f.type == 'FILE': filenames.append(f.pathSuffix) else: raise AssertionError("Unexpected type {}".format(f.type)) if topdown: yield top, dirnames, filenames for name in dirnames: new_path = posixpath.join(top, name) for x in self.walk(new_path, topdown, onerror, **kwargs): yield x if not topdown: yield top, dirnames, filenames
See ``os.walk`` for documentation
def smallest_flagged(heap, row): ind = heap[0, row] dist = heap[1, row] flag = heap[2, row] min_dist = np.inf result_index = -1 for i in range(ind.shape[0]): if flag[i] == 1 and dist[i] < min_dist: min_dist = dist[i] result_index = i if result_index >= 0: flag[result_index] = 0.0 return int(ind[result_index]) else: return -1
Search the heap for the smallest element that is still flagged. Parameters ---------- heap: array of shape (3, n_samples, n_neighbors) The heaps to search row: int Which of the heaps to search Returns ------- index: int The index of the smallest flagged element of the ``row``th heap, or -1 if no flagged elements remain in the heap.
def impute(self, M_c, X_L, X_D, Y, Q, seed, n): get_next_seed = make_get_next_seed(seed) e = su.impute(M_c, X_L, X_D, Y, Q, n, get_next_seed) return e
Impute values from predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r,d,v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d): r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to use in the imputation :type n: int :returns: list of floats -- imputed values in the same order as specified by Q
def check_cores(cores): cores = min(multiprocessing.cpu_count(), cores) if six.PY3: log = logging.getLogger("Aegean") log.info("Multi-cores not supported in python 3+, using one core") return 1 try: queue = pprocess.Queue(limit=cores, reuse=1) except: cores = 1 else: try: _ = queue.manage(pprocess.MakeReusable(fix_shape)) except: cores = 1 return cores
Determine how many cores we are able to use. Return 1 if we are not able to make a queue via pprocess. Parameters ---------- cores : int The number of cores that are requested. Returns ------- cores : int The number of cores available.
def _contained_parameters(expression): if isinstance(expression, BinaryExp): return _contained_parameters(expression.op1) | _contained_parameters(expression.op2) elif isinstance(expression, Function): return _contained_parameters(expression.expression) elif isinstance(expression, Parameter): return {expression} else: return set()
Determine which parameters are contained in this expression. :param Expression expression: expression involving parameters :return: set of parameters contained in this expression :rtype: set
def digest(dna, restriction_enzyme): pattern = restriction_enzyme.recognition_site located = dna.locate(pattern) if not located[0] and not located[1]: return [dna] pattern_len = len(pattern) r_indices = [len(dna) - index - pattern_len for index in located[1]] if pattern.is_palindrome(): r_indices = [index for index in r_indices if index not in located[0]] cut_sites = sorted(located[0] + r_indices) current = [dna] for cut_site in cut_sites[::-1]: new = _cut(current, cut_site, restriction_enzyme) current.append(new[1]) current.append(new[0]) current.reverse() if dna.circular: current[0] = current.pop() + current[0] return current
Restriction endonuclease reaction. :param dna: DNA template to digest. :type dna: coral.DNA :param restriction_site: Restriction site to use. :type restriction_site: RestrictionSite :returns: list of digested DNA fragments. :rtype: coral.DNA list
def _init_grad(self): if self.grad_req == 'null': self._grad = None return self._grad = [ndarray.zeros(shape=i.shape, dtype=i.dtype, ctx=i.context, stype=self._grad_stype) for i in self._data] autograd.mark_variables(self._check_and_get(self._data, list), self._grad, self.grad_req)
Initialize grad buffers.
def enableSync(self, url, definition = None): adminFS = AdminFeatureService(url=url, securityHandler=self._securityHandler) cap = str(adminFS.capabilities) existingDef = {} enableResults = 'skipped' if 'Sync' in cap: return "Sync is already enabled" else: capItems = cap.split(',') capItems.append('Sync') existingDef['capabilities'] = ','.join(capItems) enableResults = adminFS.updateDefinition(json_dict=existingDef) if 'error' in enableResults: return enableResults['error'] adminFS = None del adminFS return enableResults
Enables Sync capability for an AGOL feature service. Args: url (str): The URL of the feature service. definition (dict): A dictionary containing valid definition values. Defaults to ``None``. Returns: dict: The result from :py:func:`arcrest.hostedservice.service.AdminFeatureService.updateDefinition`.
def handle(self, sock, read_data, path, headers): "Just waits, and checks for other actions to replace us" for i in range(self.timeout // self.check_interval): eventlet.sleep(self.check_interval) action = self.balancer.resolve_host(self.host) if not isinstance(action, Spin): return action.handle(sock, read_data, path, headers) action = Static(self.balancer, self.host, self.matched_host, type="timeout") return action.handle(sock, read_data, path, headers)
Just waits, and checks for other actions to replace us
def face_adjacency_angles(self): pairs = self.face_normals[self.face_adjacency] angles = geometry.vector_angle(pairs) return angles
Return the angle between adjacent faces Returns -------- adjacency_angle : (n,) float Angle between adjacent faces Each value corresponds with self.face_adjacency
def get_banks_by_query(self, bank_query): if self._catalog_session is not None: return self._catalog_session.get_catalogs_by_query(bank_query) query_terms = dict(bank_query._query_terms) collection = JSONClientValidated('assessment', collection='Bank', runtime=self._runtime) result = collection.find(query_terms).sort('_id', DESCENDING) return objects.BankList(result, runtime=self._runtime)
Gets a list of ``Bank`` objects matching the given bank query. arg: bank_query (osid.assessment.BankQuery): the bank query return: (osid.assessment.BankList) - the returned ``BankList`` raise: NullArgument - ``bank_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``bank_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
def read_relative_file(filename): path = join(dirname(abspath(__file__)), filename) with io.open(path, encoding='utf-8') as f: return f.read()
Return the contents of the given file. Its path is supposed relative to this module.
def find_tor_binary(globs=('/usr/sbin/', '/usr/bin/', '/Applications/TorBrowser_*.app/Contents/MacOS/'), system_tor=True): if system_tor: try: proc = subprocess.Popen( ('which tor'), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True ) except OSError: pass else: stdout, _ = proc.communicate() if proc.poll() == 0 and stdout != '': return stdout.strip() for pattern in globs: for path in glob.glob(pattern): torbin = os.path.join(path, 'tor') if is_executable(torbin): return torbin return None
Tries to find the tor executable using the shell first or in in the paths whose glob-patterns is in the given 'globs'-tuple. :param globs: A tuple of shell-style globs of directories to use to find tor (TODO consider making that globs to actual tor binary?) :param system_tor: This controls whether bash is used to seach for 'tor' or not. If False, we skip that check and use only the 'globs' tuple.
def clear_cached(self): _TABLE_CACHE.pop(self.name, None) for col in _columns_for_table(self.name).values(): col.clear_cached() logger.debug('cleared cached columns for table {!r}'.format(self.name))
Remove cached results from this table's computed columns.
def htseq_stats_table(self): headers = OrderedDict() headers['percent_assigned'] = { 'title': '% Assigned', 'description': '% Assigned reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'RdYlGn' } headers['assigned'] = { 'title': '{} Assigned'.format(config.read_count_prefix), 'description': 'Assigned Reads ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuBu', 'modify': lambda x: float(x) * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.htseq_data, headers)
Take the parsed stats from the HTSeq Count report and add them to the basic stats table at the top of the report
def toggle_word_wrap(self): self.setWordWrapMode(not self.wordWrapMode() and QTextOption.WordWrap or QTextOption.NoWrap) return True
Toggles document word wrap. :return: Method success. :rtype: bool
def _ConvertToCanonicalSqlDict(self, schema, raw_dict, prefix=""): flattened_dict = {} for k, v in iteritems(raw_dict): if isinstance(v, dict): flattened_dict.update( self._ConvertToCanonicalSqlDict( schema, v, prefix="%s%s." % (prefix, k))) else: field_name = prefix + k flattened_dict[field_name] = schema[field_name].convert_fn(v) return flattened_dict
Converts a dict of RDF values into a SQL-ready form.
def _group_by_batches(samples, check_fn): batch_groups = collections.defaultdict(list) extras = [] for data in [x[0] for x in samples]: if check_fn(data): batch_groups[multi.get_batch_for_key(data)].append(data) else: extras.append([data]) return batch_groups, extras
Group calls by batches, processing families together during ensemble calling.
def index_bams(job, config): job.fileStore.logToMaster('Indexed sample BAMS: ' + config.uuid) disk = '1G' if config.ci_test else '20G' config.normal_bai = job.addChildJobFn(run_samtools_index, config.normal_bam, cores=1, disk=disk).rv() config.tumor_bai = job.addChildJobFn(run_samtools_index, config.tumor_bam, cores=1, disk=disk).rv() job.addFollowOnJobFn(preprocessing_declaration, config)
Convenience job for handling bam indexing to make the workflow declaration cleaner :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs
async def _retreive_websocket_info(self): if self._web_client is None: self._web_client = WebClient( token=self.token, base_url=self.base_url, ssl=self.ssl, proxy=self.proxy, run_async=True, loop=self._event_loop, session=self._session, ) self._logger.debug("Retrieving websocket info.") if self.connect_method in ["rtm.start", "rtm_start"]: resp = await self._web_client.rtm_start() else: resp = await self._web_client.rtm_connect() url = resp.get("url") if url is None: msg = "Unable to retreive RTM URL from Slack." raise client_err.SlackApiError(message=msg, response=resp) return url, resp.data
Retreives the WebSocket info from Slack. Returns: A tuple of websocket information. e.g. ( "wss://...", { "self": {"id": "U01234ABC","name": "robotoverlord"}, "team": { "domain": "exampledomain", "id": "T123450FP", "name": "ExampleName" } } ) Raises: SlackApiError: Unable to retreive RTM URL from Slack.
def send_key(self, key): cmd = struct.pack(">BBBBBBH", 4, 1, 0, 0, 0, 0, key) self.con.send(cmd) cmd = struct.pack(">BBBBBBH", 4, 0, 0, 0, 0, 0, key) self.con.send(cmd)
Send a key to the Horizon box.
def _find_alphas_param(self): for attr in ("cv_alphas_", "alphas_", "alphas",): try: return getattr(self.estimator, attr) except AttributeError: continue raise YellowbrickValueError( "could not find alphas param on {} estimator".format( self.estimator.__class__.__name__ ) )
Searches for the parameter on the estimator that contains the array of alphas that was used to produce the error selection. If it cannot find the parameter then a YellowbrickValueError is raised.
def get_name_from_path(full_path, root_path): relative_image_path = os.path.relpath(full_path, root_path) return "_".join(relative_image_path.split('.')[:-1]).replace('/', '_')\ .replace(';', '').replace(':', '')
Create a filename by merging path after root directory.
async def getArtifact(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["getArtifact"], *args, **kwargs)
Get Artifact from Run Get artifact by `<name>` from a specific run. **Public Artifacts**, in-order to get an artifact you need the scope `queue:get-artifact:<name>`, where `<name>` is the name of the artifact. But if the artifact `name` starts with `public/`, authentication and authorization is not necessary to fetch the artifact. **API Clients**, this method will redirect you to the artifact, if it is stored externally. Either way, the response may not be JSON. So API client users might want to generate a signed URL for this end-point and use that URL with an HTTP client that can handle responses correctly. **Downloading artifacts** There are some special considerations for those http clients which download artifacts. This api endpoint is designed to be compatible with an HTTP 1.1 compliant client, but has extra features to ensure the download is valid. It is strongly recommend that consumers use either taskcluster-lib-artifact (JS), taskcluster-lib-artifact-go (Go) or the CLI written in Go to interact with artifacts. In order to download an artifact the following must be done: 1. Obtain queue url. Building a signed url with a taskcluster client is recommended 1. Make a GET request which does not follow redirects 1. In all cases, if specified, the x-taskcluster-location-{content,transfer}-{sha256,length} values must be validated to be equal to the Content-Length and Sha256 checksum of the final artifact downloaded. as well as any intermediate redirects 1. If this response is a 500-series error, retry using an exponential backoff. No more than 5 retries should be attempted 1. If this response is a 400-series error, treat it appropriately for your context. This might be an error in responding to this request or an Error storage type body. This request should not be retried. 1. If this response is a 200-series response, the response body is the artifact. If the x-taskcluster-location-{content,transfer}-{sha256,length} and x-taskcluster-location-content-encoding are specified, they should match this response body 1. If the response type is a 300-series redirect, the artifact will be at the location specified by the `Location` header. There are multiple artifact storage types which use a 300-series redirect. 1. For all redirects followed, the user must verify that the content-sha256, content-length, transfer-sha256, transfer-length and content-encoding match every further request. The final artifact must also be validated against the values specified in the original queue response 1. Caching of requests with an x-taskcluster-artifact-storage-type value of `reference` must not occur 1. A request which has x-taskcluster-artifact-storage-type value of `blob` and does not have x-taskcluster-location-content-sha256 or x-taskcluster-location-content-length must be treated as an error **Headers** The following important headers are set on the response to this method: * location: the url of the artifact if a redirect is to be performed * x-taskcluster-artifact-storage-type: the storage type. Example: blob, s3, error The following important headers are set on responses to this method for Blob artifacts * x-taskcluster-location-content-sha256: the SHA256 of the artifact *after* any content-encoding is undone. Sha256 is hex encoded (e.g. [0-9A-Fa-f]{64}) * x-taskcluster-location-content-length: the number of bytes *after* any content-encoding is undone * x-taskcluster-location-transfer-sha256: the SHA256 of the artifact *before* any content-encoding is undone. This is the SHA256 of what is sent over the wire. Sha256 is hex encoded (e.g. [0-9A-Fa-f]{64}) * x-taskcluster-location-transfer-length: the number of bytes *after* any content-encoding is undone * x-taskcluster-location-content-encoding: the content-encoding used. It will either be `gzip` or `identity` right now. This is hardcoded to a value set when the artifact was created and no content-negotiation occurs * x-taskcluster-location-content-type: the content-type of the artifact **Caching**, artifacts may be cached in data centers closer to the workers in-order to reduce bandwidth costs. This can lead to longer response times. Caching can be skipped by setting the header `x-taskcluster-skip-cache: true`, this should only be used for resources where request volume is known to be low, and caching not useful. (This feature may be disabled in the future, use is sparingly!) This method is ``stable``
def validate_tls(self, hostname): self._validate_path() validate_tls_hostname(self._context, self._certificate, hostname) return self._path
Validates the certificate path, that the certificate is valid for the hostname provided and that the certificate is valid for the purpose of a TLS connection. :param hostname: A unicode string of the TLS server hostname :raises: certvalidator.errors.PathValidationError - when an error occurs validating the path certvalidator.errors.RevokedError - when the certificate or another certificate in its path has been revoked certvalidator.errors.InvalidCertificateError - when the certificate is not valid for TLS or the hostname :return: A certvalidator.path.ValidationPath object of the validated certificate validation path
def on_welcome(self, c, e): self.backoff = 1 if self.nickserv: if Utilities.isNotEmpty(self.nickserv_pass): self.identify(c, e, self.nickserv_pass) time.sleep(3) else: logger.error('If nickserv is enabled, you must supply' ' a password') if self.nickserv is False and self.nickserv_pass is not None: logger.warn('It appears you provided a nickserv password but ' 'did not enable nickserv authentication') for channel in self.my_channels: logger.debug('Attempting to join {0!s}'.format(channel)) c.join(channel)
This function runs when the bot successfully connects to the IRC server
def _entry_allocated_bitmap(self, entry_number): index, offset = divmod(entry_number, 8) return bool(self._bitmap[index] & (1 << offset))
Checks if a particular index is allocated. Args: entry_number (int): Index to verify Returns: bool: True if it is allocated, False otherwise.
def load_corpus(*data_file_paths): for file_path in data_file_paths: corpus = [] corpus_data = read_corpus(file_path) conversations = corpus_data.get('conversations', []) corpus.extend(conversations) categories = corpus_data.get('categories', []) yield corpus, categories, file_path
Return the data contained within a specified corpus.
def subspace_index(self, little_endian_bits_int: int ) -> Tuple[Union[slice, int, 'ellipsis'], ...]: return linalg.slice_for_qubits_equal_to(self.axes, little_endian_bits_int)
An index for the subspace where the target axes equal a value. Args: little_endian_bits_int: The desired value of the qubits at the targeted `axes`, packed into an integer. The least significant bit of the integer is the desired bit for the first axis, and so forth in increasing order. Returns: A value that can be used to index into `target_tensor` and `available_buffer`, and manipulate only the part of Hilbert space corresponding to a given bit assignment. Example: If `target_tensor` is a 4 qubit tensor and `axes` is `[1, 3]` and then this method will return the following when given `little_endian_bits=0b01`: `(slice(None), 0, slice(None), 1, Ellipsis)` Therefore the following two lines would be equivalent: args.target_tensor[args.subspace_index(0b01)] += 1 args.target_tensor[:, 0, :, 1] += 1
def success_response(self, message=None): return self.render(self.request, redirect_url=self.get_success_url(), obj=self.object, message=message, collect_render_data=False)
Returns a 'render redirect' to the result of the `get_success_url` method.
def registered(self, driver, executorInfo, frameworkInfo, agentInfo): self.id = executorInfo.executor_id.get('value', None) log.debug("Registered executor %s with framework", self.id) self.address = socket.gethostbyname(agentInfo.hostname) nodeInfoThread = threading.Thread(target=self._sendFrameworkMessage, args=[driver]) nodeInfoThread.daemon = True nodeInfoThread.start()
Invoked once the executor driver has been able to successfully connect with Mesos.
def SetField(cls, default=NOTHING, required=True, repr=False, key=None): default = _init_fields.init_default(required, default, set()) converter = converters.to_set_field(cls) validator = _init_fields.init_validator(required, types.TypedSet) return attrib(default=default, converter=converter, validator=validator, repr=repr, metadata=dict(key=key))
Create new set field on a model. :param cls: class (or name) of the model to be related in Set. :param default: any TypedSet or set :param bool required: whether or not the object is invalid if not provided. :param bool repr: include this field should appear in object's repr. :param bool cmp: include this field in generated comparison. :param string key: override name of the value when converted to dict.
def make_lines_texture(num_lines=10, resolution=50): x, y = np.meshgrid( np.hstack([np.linspace(0, 1, resolution), np.nan]), np.linspace(0, 1, num_lines), ) y[np.isnan(x)] = np.nan return x.flatten(), y.flatten()
Makes a texture consisting of a given number of horizontal lines. Args: num_lines (int): the number of lines to draw resolution (int): the number of midpoints on each line Returns: A texture.
def prepare_data(problem, hparams, params, config): input_fn = problem.make_estimator_input_fn( tf.estimator.ModeKeys.EVAL, hparams, force_repeat=True) dataset = input_fn(params, config) features, _ = dataset.make_one_shot_iterator().get_next() inputs, labels = features["targets"], features["inputs"] inputs = tf.to_float(inputs) input_shape = inputs.shape.as_list() inputs = tf.reshape(inputs, [hparams.batch_size] + input_shape[1:]) labels = tf.reshape(labels, [hparams.batch_size]) return inputs, labels, features
Construct input pipeline.
def update( self, filename=None, batch_id=None, prev_batch_id=None, producer=None, count=None, ): if not filename: raise BatchHistoryError("Invalid filename. Got None") if not batch_id: raise BatchHistoryError("Invalid batch_id. Got None") if not prev_batch_id: raise BatchHistoryError("Invalid prev_batch_id. Got None") if not producer: raise BatchHistoryError("Invalid producer. Got None") if self.exists(batch_id=batch_id): raise IntegrityError("Duplicate batch_id") try: obj = self.model.objects.get(batch_id=batch_id) except self.model.DoesNotExist: obj = self.model( filename=filename, batch_id=batch_id, prev_batch_id=prev_batch_id, producer=producer, total=count, ) obj.transaction_file.name = filename obj.save() return obj
Creates an history model instance.
def update_value(self, offset, value): if offset + len(value) > self.total_size: return Error.INPUT_BUFFER_TOO_LONG if len(self.current_value) < offset: self.current_value += bytearray(offset - len(self.current_value)) if len(self.current_value) > offset: self.current_value = self.current_value[:offset] self.current_value += bytearray(value) return 0
Update the binary value currently stored for this config value. Returns: int: An opaque error code that can be returned from a set_config rpc
def validate_gpg_sig(self, path, sig=None): logger.debug("Verifying GPG signature of Insights configuration") if sig is None: sig = path + ".asc" command = ("/usr/bin/gpg --no-default-keyring " "--keyring " + constants.pub_gpg_path + " --verify " + sig + " " + path) if not six.PY3: command = command.encode('utf-8', 'ignore') args = shlex.split(command) logger.debug("Executing: %s", args) proc = Popen( args, shell=False, stdout=PIPE, stderr=STDOUT, close_fds=True) stdout, stderr = proc.communicate() logger.debug("STDOUT: %s", stdout) logger.debug("STDERR: %s", stderr) logger.debug("Status: %s", proc.returncode) if proc.returncode: logger.error("ERROR: Unable to validate GPG signature: %s", path) return False else: logger.debug("GPG signature verified") return True
Validate the collection rules
def _agent_notification(self, context, method, hosting_devices, operation): admin_context = context.is_admin and context or context.elevated() for hosting_device in hosting_devices: agents = self._dmplugin.get_cfg_agents_for_hosting_devices( admin_context, hosting_device['id'], admin_state_up=True, schedule=True) for agent in agents: LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the ' 'message %(method)s', {'agent_type': agent.agent_type, 'topic': agent.topic, 'host': agent.host, 'method': method}) cctxt = self.client.prepare(server=agent.host) cctxt.cast(context, method)
Notify individual Cisco cfg agents.
def compare_enums(autogen_context, upgrade_ops, schema_names): to_add = set() for schema in schema_names: default = autogen_context.dialect.default_schema_name if schema is None: schema = default defined = get_defined_enums(autogen_context.connection, schema) declared = get_declared_enums(autogen_context.metadata, schema, default) for name, new_values in declared.items(): old_values = defined.get(name) if name in defined and new_values.difference(old_values): to_add.add((schema, name, old_values, new_values)) for schema, name, old_values, new_values in sorted(to_add): op = SyncEnumValuesOp(schema, name, old_values, new_values) upgrade_ops.ops.append(op)
Walk the declared SQLAlchemy schema for every referenced Enum, walk the PG schema for every definde Enum, then generate SyncEnumValuesOp migrations for each defined enum that has grown new entries when compared to its declared version. Enums that don't exist in the database yet are ignored, since SQLAlchemy/Alembic will create them as part of the usual migration process.
def insertCallSet(self, callSet): try: models.Callset.create( id=callSet.getId(), name=callSet.getLocalId(), variantsetid=callSet.getParentContainer().getId(), biosampleid=callSet.getBiosampleId(), attributes=json.dumps(callSet.getAttributes())) except Exception as e: raise exceptions.RepoManagerException(e)
Inserts a the specified callSet into this repository.
def compare(self, vertex0, vertex1, subject_graph): return ( self.pattern_graph.vertex_fingerprints[vertex0] == subject_graph.vertex_fingerprints[vertex1] ).all()
Returns true when the two vertices are of the same kind
def set_nr_track(self, nr_track): self._set_attr(TRCK(encoding=3, text=str(nr_track)))
Sets song's track numb :param nr_track: of track
def move_mission(self, key, selection_index): idx = self.selection_index_to_idx(key, selection_index) self.moving_wp = idx print("Moving wp %u" % idx)
move a mission point
def get_imported_namespaces(self, must_have_imported_data_type=False, consider_annotations=False, consider_annotation_types=False): imported_namespaces = [] for imported_namespace, reason in self._imported_namespaces.items(): if must_have_imported_data_type and not reason.data_type: continue if (not consider_annotations) and not ( reason.data_type or reason.alias or reason.annotation_type ): continue if (not consider_annotation_types) and not ( reason.data_type or reason.alias or reason.annotation ): continue imported_namespaces.append(imported_namespace) imported_namespaces.sort(key=lambda n: n.name) return imported_namespaces
Returns a list of Namespace objects. A namespace is a member of this list if it is imported by the current namespace and a data type is referenced from it. Namespaces are in ASCII order by name. Args: must_have_imported_data_type (bool): If true, result does not include namespaces that were not imported for data types. consider_annotations (bool): If false, result does not include namespaces that were only imported for annotations consider_annotation_types (bool): If false, result does not include namespaces that were only imported for annotation types. Returns: List[Namespace]: A list of imported namespaces.
def _nics_equal(nic1, nic2): def _filter_nic(nic): return { 'type': nic.attrib['type'], 'source': nic.find('source').attrib[nic.attrib['type']] if nic.find('source') is not None else None, 'mac': nic.find('mac').attrib['address'].lower() if nic.find('mac') is not None else None, 'model': nic.find('model').attrib['type'] if nic.find('model') is not None else None, } return _filter_nic(nic1) == _filter_nic(nic2)
Test if two interface elements should be considered like the same device
def source_url(farm, server, id, secret, size): if size == 'small': img_size = 'n' elif size == 'medium': img_size = 'c' elif size == 'large': img_size = 'b' return 'https://farm{}.staticflickr.com/{}/{}_{}_{}.jpg'.format( farm, server, id, secret, img_size)
Url for direct jpg use.
def rationalize_file(item_f, charset, mode='rb', lock=False): if hasattr(item_f, 'fileno'): n_fd = os.dup(item_f.fileno()) try: _lock(n_fd, lock) del item_f return os.fdopen(n_fd) except Exception, e: os.close(n_fd) raise e elif hasattr(item_f, 'readline'): return item_f elif isinstance(item_f, numbers.Integral): n_fd = os.dup(item_f) try: _lock(n_fd, lock) return os.fdopen(n_fd) except Exception, e: os.close(n_fd) raise e f = open(coerce_unicode(item_f, charset), mode, 1) try: _lock(f.fileno(), lock) return f except Exception, e: f.close() raise e
FSQ attempts to treat all file-like things as line-buffered as an optimization to the average case. rationalize_file will handle file objects, buffers, raw file-descriptors, sockets, and string file-addresses, and will return a file object that can be safely closed in FSQ scope without closing the file in the bounding caller.
def state_dict(self) -> Dict[str, Any]: return { "best_so_far": self._best_so_far, "patience": self._patience, "epochs_with_no_improvement": self._epochs_with_no_improvement, "is_best_so_far": self._is_best_so_far, "should_decrease": self._should_decrease, "best_epoch_metrics": self.best_epoch_metrics, "epoch_number": self._epoch_number, "best_epoch": self.best_epoch }
A ``Trainer`` can use this to serialize the state of the metric tracker.
def find_val(self, eq, val): if eq not in ('f', 'g', 'q'): return elif eq in ('f', 'q'): key = 'unamex' elif eq == 'g': key = 'unamey' idx = 0 for m, n in zip(self.system.varname.__dict__[key], self.__dict__[eq]): if n == val: return m, idx idx += 1 return
Return the name of the equation having the given value
def parse_tect_region_dict_to_tuples(region_dict): output_region_dict = [] tuple_keys = ['Displacement_Length_Ratio', 'Shear_Modulus'] for region in region_dict: for val_name in tuple_keys: region[val_name] = weight_list_to_tuple(region[val_name], val_name) region['Magnitude_Scaling_Relation'] = weight_list_to_tuple( region['Magnitude_Scaling_Relation'], 'Magnitude Scaling Relation') output_region_dict.append(region) return output_region_dict
Parses the tectonic regionalisation dictionary attributes to tuples
def frequency_app(parser, cmd, args): parser.add_argument('value', help='the value to analyse, read from stdin if omitted', nargs='?') args = parser.parse_args(args) data = frequency(six.iterbytes(pwnypack.main.binary_value_or_stdin(args.value))) return '\n'.join( '0x%02x (%c): %d' % (key, chr(key), value) if key >= 32 and chr(key) in string.printable else '0x%02x ---: %d' % (key, value) for key, value in data.items() )
perform frequency analysis on a value.
def set_text_color(self, r,g=-1,b=-1): "Set color for text" if((r==0 and g==0 and b==0) or g==-1): self.text_color=sprintf('%.3f g',r/255.0) else: self.text_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0) self.color_flag=(self.fill_color!=self.text_color)
Set color for text
def set_continue(self, name, action, seqno, value=None, default=False, disable=False): commands = ['route-map %s %s %s' % (name, action, seqno)] if default: commands.append('default continue') elif disable: commands.append('no continue') else: if not str(value).isdigit() or value < 1: raise ValueError('seqno must be a positive integer unless ' 'default or disable is specified') commands.append('continue %s' % value) return self.configure(commands)
Configures the routemap continue value Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. value (integer): The value to configure for the routemap continue default (bool): Specifies to default the routemap continue value disable (bool): Specifies to negate the routemap continue value Returns: True if the operation succeeds otherwise False is returned
def ping_external_urls_handler(sender, **kwargs): entry = kwargs['instance'] if entry.is_visible and settings.SAVE_PING_EXTERNAL_URLS: ExternalUrlsPinger(entry)
Ping externals URLS when an entry is saved.
def add_document(self, question, answer): question = question.strip() answer = answer.strip() session = self.Session() if session.query(Document) \ .filter_by(text=question, answer=answer).count(): logger.info('Already here: {0} -> {1}'.format(question, answer)) return logger.info('add document: {0} -> {1}'.format(question, answer)) grams = self._get_grams(session, question, make=True) doc = Document(question, answer) doc.grams = list(grams) self._recalc_idfs(session, grams) session.add(doc) session.commit()
Add question answer set to DB. :param question: A question to an answer :type question: :class:`str` :param answer: An answer to a question :type answer: :class:`str`
def b(self): b = Point(self.center) if self.xAxisIsMinor: b.x += self.minorRadius else: b.y += self.minorRadius return b
Positive antipodal point on the minor axis, Point class.
def write(self, tid, data, offset, fh): if tid[1] == " next": d = True elif tid[1] == " prev": d = False else: raise FuseOSError(errno.EPERM) try: self.searches[tid[0]].updateResults(d) except KeyError: raise FuseOSError(errno.EINVAL) except ConnectionError: raise FuseOSError(errno.ENETDOWN) return len(data)
Write operation. Applicable only for control files - updateResults is called. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. data : bytes Ignored. offset : int Ignored. fh : int File descriptor. Returns ------- int Length of data written.
def obj_from_file(filename='annotation.yaml', filetype='auto'): if filetype == 'auto': _, ext = os.path.splitext(filename) filetype = ext[1:] if filetype in ('yaml', 'yml'): from ruamel.yaml import YAML yaml = YAML(typ="unsafe") with open(filename, encoding="utf-8") as f: obj = yaml.load(f) if obj is None: obj = {} elif filetype in ('pickle', 'pkl', 'pklz', 'picklezip'): fcontent = read_pkl_and_pklz(filename) if sys.version_info[0] < 3: import cPickle as pickle else: import _pickle as pickle if sys.version_info.major == 2: obj = pickle.loads(fcontent) else: obj = pickle.loads(fcontent, encoding="latin1") else: logger.error('Unknown filetype ' + filetype) return obj
Read object from file
def can_user_update_settings(request, view, obj=None): if obj is None: return if obj.customer and not obj.shared: return permissions.is_owner(request, view, obj) else: return permissions.is_staff(request, view, obj)
Only staff can update shared settings, otherwise user has to be an owner of the settings.
def key_exists(self, key): assert isinstance(key, str) self._close() try: return self._unsafe_key_exists(key) finally: self._open()
Check if key has previously been added to this store. This function makes a linear search through the log file and is very slow. Returns True if the event has previously been added, False otherwise.
def _find_by(self, key): by_path = glob.glob('/dev/input/by-{key}/*-event-*'.format(key=key)) for device_path in by_path: self._parse_device_path(device_path)
Find devices.
def get_section(section_name, cfg_file=cfg_file): parser = get_parser(cfg_file=cfg_file) options = parser.options(section_name) result = {} for option in options: result[option] = parser.get(section=section_name, option=option) return result
Returns a dictionary of an entire section
def can_receive_messages(self): with self.lock: return not self._state.is_waiting_for_start() and \ not self._state.is_connection_closed()
Whether tihs communication is ready to receive messages.] :rtype: bool .. code:: python assert not communication.can_receive_messages() communication.start() assert communication.can_receive_messages() communication.stop() assert not communication.can_receive_messages()
def from_api(cls, api): ux = TodoUX(api) from .pseudorpc import PseudoRpc rpc = PseudoRpc(api) return cls({ViaAPI: api, ViaUX: ux, ViaRPC: rpc})
create an application description for the todo app, that based on the api can use either tha api or the ux for interaction
def get_groups_dict(self) -> Dict: return { k: deserializer.inventory.InventoryElement.serialize(v).dict() for k, v in self.groups.items() }
Returns serialized dictionary of groups from inventory
def login_required(self, fresh=False, redirect_to=None): if not self.logged_in() or (fresh and not self.login_manager.login_fresh()): if redirect_to: resp = redirect(redirect_to) else: resp = self.login_manager.unauthorized() current_context.exit(resp, trigger_action_group="missing_user")
Ensures that a user is authenticated
def get_privkey_address(privkey_info, blockchain='bitcoin', **blockchain_opts): if blockchain == 'bitcoin': return btc_get_privkey_address(privkey_info, **blockchain_opts) else: raise ValueError('Unknown blockchain "{}"'.format(blockchain))
Get the address from a private key bundle
async def rewrite_middleware(server, request): if singletons.settings.SECURITY is not None: security_class = singletons.settings.load('SECURITY') else: security_class = DummySecurity security = security_class() try: new_path = await security.rewrite(request) except SecurityException as e: msg = '' if DEBUG: msg = str(e) return server.response.text(msg, status=400) request.path = new_path
Sanic middleware that utilizes a security class's "rewrite" method to check
def dict_to_pendulum(d: Dict[str, Any], pendulum_class: ClassType) -> DateTime: return pendulum.parse(d['iso'])
Converts a ``dict`` object back to a ``Pendulum``.
def attach(self, listener): if listener not in self.listeners: self.listeners.append(listener)
Attach an object that should be notified of events. The object should have a notify(msg_type, param_dict) function.
def select(select, tag, namespaces=None, limit=0, flags=0, **kwargs): return compile(select, namespaces, flags, **kwargs).select(tag, limit)
Select the specified tags.
def build_info(self): if 'build_info' not in self._memo: self._memo['build_info'] = _get_url(self.artifact_url('json')).json() return self._memo['build_info']
Return the build's info
def __expand_limits(ax, limits, which='x'): if which == 'x': getter, setter = ax.get_xlim, ax.set_xlim elif which == 'y': getter, setter = ax.get_ylim, ax.set_ylim else: raise ValueError('invalid axis: {}'.format(which)) old_lims = getter() new_lims = list(limits) if np.isfinite(old_lims[0]): new_lims[0] = min(old_lims[0], limits[0]) if np.isfinite(old_lims[1]): new_lims[1] = max(old_lims[1], limits[1]) setter(new_lims)
Helper function to expand axis limits
def get_es_mappings(self): es_mappings = json.loads(requests.get(self.mapping_url).text) es_mappings = {"_".join(key.split("_")[:-1]): value['mappings'] \ for key, value in es_mappings.items()} return es_mappings
Returns the mapping defitions presetn in elasticsearh
def backprop(self, input_data, targets, cache=None): df_input = gpuarray.zeros_like(input_data) if cache is None: cache = self.n_tasks * [None] gradients = [] for targets_task, cache_task, task, task_weight in \ izip(targets, cache, self.tasks, self.task_weights): gradients_task, df_input_task = \ task.backprop(input_data, targets_task, cache_task) df_input = df_input.mul_add(1., df_input_task, task_weight) gradients.extend(gradients_task) return gradients, df_input
Compute gradients for each task and combine the results. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : list Gradients with respect to the weights and biases for each task df_input : ``GPUArray`` Gradients with respect to the input, obtained by adding the gradients with respect to the inputs from each task, weighted by ``MultitaskTopLayer.task_weights``.
def post(self, request, *args, **kwargs): versions = self._get_versions() url = self.get_done_url() msg = None try: vid = int(request.POST.get('version', '')) version = versions.get(vid=vid) if request.POST.get('revert'): object_url = self.get_object_url() msg = self.revert(version, object_url) elif request.POST.get('delete'): msg = self.delete(version) url = self.request.build_absolute_uri() except (ValueError, versions.model.DoesNotExist): pass return self.render(request, redirect_url=url, message=msg, obj=self.object, collect_render_data=False)
Method for handling POST requests. Expects the 'vid' of the version to act on to be passed as in the POST variable 'version'. If a POST variable 'revert' is present this will call the revert method and then return a 'render redirect' to the result of the `get_done_url` method. If a POST variable 'delete' is present this will call the delete method and return a 'render redirect' to the result of the `get_done_url` method. If this method receives unexpected input, it will silently redirect to the result of the `get_done_url` method.
def splitext(path): parent_path, pathname = split(path) if pathname.startswith(".") and pathname.count(".") == 1: return path, "" if "." not in pathname: return path, "" pathname, ext = pathname.rsplit(".", 1) path = join(parent_path, pathname) return path, "." + ext
Split the extension from the path. Arguments: path (str): A path to split. Returns: (str, str): A tuple containing the path and the extension. Example: >>> splitext('baz.txt') ('baz', '.txt') >>> splitext('foo/bar/baz.txt') ('foo/bar/baz', '.txt') >>> splitext('foo/bar/.foo') ('foo/bar/.foo', '')
def is_compatible(self, other): super(Array2D, self).is_compatible(other) if isinstance(other, type(self)): try: if not self.dy == other.dy: raise ValueError("%s sample sizes do not match: " "%s vs %s." % (type(self).__name__, self.dy, other.dy)) except AttributeError: raise ValueError("Series with irregular y-indexes cannot " "be compatible") return True
Check whether this array and ``other`` have compatible metadata
def set_config(self, data=None, **kwargs): config = self.bot.config['server_config'] for opt in data.split(' '): if '=' in opt: opt, value = opt.split('=', 1) else: value = True if opt.isupper(): config[opt] = value
Store server config
def qhull_cmd(cmd, options, points): prep_str = [str(len(points[0])), str(len(points))] prep_str.extend([' '.join(map(repr, row)) for row in points]) output = getattr(hull, cmd)(options, "\n".join(prep_str)) return list(map(str.strip, output.strip().split("\n")))
Generalized helper method to perform a qhull based command. Args: cmd: Command to perform. Supported commands are qconvex, qdelaunay and qvoronoi. options: Options to be provided for qhull command. See specific methods for info on supported options. Up to two options separated by spaces are supported. points: Sequence of points as input to qhull command. Returns: Output as a list of strings. E.g., ['4', '0 2', '1 0', '2 3 ', '3 1']
def get_filename_by_suffixes(dir_src, suffixes): list_files = os.listdir(dir_src) re_files = list() if is_string(suffixes): suffixes = [suffixes] if not isinstance(suffixes, list): return None for i, suf in enumerate(suffixes): if len(suf) >= 1 and suf[0] != '.': suffixes[i] = '.' + suf for f in list_files: name, ext = os.path.splitext(f) if StringClass.string_in_list(ext, suffixes): re_files.append(f) return re_files
get file names with the given suffixes in the given directory Args: dir_src: directory path suffixes: wanted suffixes list, the suffix in suffixes can with or without '.' Returns: file names with the given suffixes as list
def attributes(): attribute_classes = get_attribute_classes() for name, class_ in attribute_classes.items(): click.echo( u'{name} - Added in: {ai} ({cv})'.format( name=click.style(name, fg='green'), ai=click.style(class_.ADDED_IN, fg='yellow'), cv=click.style( ClassVersion(*class_.MINIMUM_CLASS_VERSION).human, fg='yellow' ) ) )
List enabled Attributes. Prints a list of all enabled ClassFile Attributes.