code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def connect(self, callback=None, timeout=None): if hasattr(self, '_connecting_future') and not self._connecting_future.done(): future = self._connecting_future else: if hasattr(self, '_connecting_future'): self._connecting_future.exception() future = tornado.concurrent.Future() self._connecting_future = future self._connect(timeout=timeout) if callback is not None: def handle_future(future): response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) return future
Connect to the IPC socket
def _prep_sample_cnvs(cnv_file, data): import pybedtools sample_name = tz.get_in(["rgnames", "sample"], data) def make_names(name): return re.sub("[^\w.]", '.', name) def matches_sample_name(feat): return (feat.name == sample_name or feat.name == "X%s" % sample_name or feat.name == make_names(sample_name)) def update_sample_name(feat): feat.name = sample_name return feat sample_file = os.path.join(os.path.dirname(cnv_file), "%s-cnv.bed" % sample_name) if not utils.file_exists(sample_file): with file_transaction(data, sample_file) as tx_out_file: with shared.bedtools_tmpdir(data): pybedtools.BedTool(cnv_file).filter(matches_sample_name).each(update_sample_name).saveas(tx_out_file) return sample_file
Convert a multiple sample CNV file into a single BED file for a sample. Handles matching and fixing names where R converts numerical IDs (1234) into strings by adding an X (X1234), and converts other characters into '.'s. http://stat.ethz.ch/R-manual/R-devel/library/base/html/make.names.html
def stem(self, form, tag): key = (form, tag) if key not in self.lemma_cache: lemma = self.stemmer(*key).word() self.lemma_cache[key] = lemma return self.lemma_cache[key]
Returns the stem of word with specific form and part-of-speech tag according to the Stanford lemmatizer. Lemmas are cached.
def _map_purchase_request_to_func(self, purchase_request_type): if purchase_request_type in self._intent_view_funcs: view_func = self._intent_view_funcs[purchase_request_type] else: raise NotImplementedError('Request type "{}" not found and no default view specified.'.format(purchase_request_type)) argspec = inspect.getargspec(view_func) arg_names = argspec.args arg_values = self._map_params_to_view_args(purchase_request_type, arg_names) print('_map_purchase_request_to_func', arg_names, arg_values, view_func, purchase_request_type) return partial(view_func, *arg_values)
Provides appropriate parameters to the on_purchase functions.
def get_default_vpc(): ec2 = get_ec2_resource() for vpc in ec2.vpcs.all(): if vpc.is_default: return vpc
Return default VPC or none if not present
def from_key_bytes(cls, algorithm, key_bytes): return cls( algorithm=algorithm, key=serialization.load_der_public_key(data=key_bytes, backend=default_backend()) )
Creates a `Verifier` object based on the supplied algorithm and raw verification key. :param algorithm: Algorithm on which to base verifier :type algorithm: aws_encryption_sdk.identifiers.Algorithm :param bytes encoded_point: Raw verification key :returns: Instance of Verifier generated from encoded point :rtype: aws_encryption_sdk.internal.crypto.Verifier
def authenticate(self, request): authenticators = self._meta.authenticators if request.method == 'OPTIONS' and ADREST_ALLOW_OPTIONS: self.auth = AnonimousAuthenticator(self) return True error_message = "Authorization required." for authenticator in authenticators: auth = authenticator(self) try: if not auth.authenticate(request): raise AssertionError(error_message) self.auth = auth auth.configure(request) return True except AssertionError, e: error_message = str(e) raise HttpError(error_message, status=status.HTTP_401_UNAUTHORIZED)
Attempt to authenticate the request. :param request: django.http.Request instance :return bool: True if success else raises HTTP_401
def change_password(username, password, uid=None, host=None, admin_username=None, admin_password=None, module=None): if len(password) > 20: raise CommandExecutionError('Supplied password should be 20 characters or less') if uid is None: user = list_users(host=host, admin_username=admin_username, admin_password=admin_password, module=module) uid = user[username]['index'] if uid: return __execute_cmd('config -g cfgUserAdmin -o ' 'cfgUserAdminPassword -i {0} {1}' .format(uid, password), host=host, admin_username=admin_username, admin_password=admin_password, module=module) else: log.warning('racadm: user \'%s\' does not exist', username) return False
Change user's password CLI Example: .. code-block:: bash salt dell dracr.change_password [USERNAME] [PASSWORD] uid=[OPTIONAL] host=<remote DRAC> admin_username=<DRAC user> admin_password=<DRAC PW> salt dell dracr.change_password diana secret Note that if only a username is specified then this module will look up details for all 16 possible DRAC users. This is time consuming, but might be necessary if one is not sure which user slot contains the one you want. Many late-model Dell chassis have 'root' as UID 1, so if you can depend on that then setting the password is much quicker. Raises an error if the supplied password is greater than 20 chars.
def seek(self, value, target="offset"): if target not in (u'offset', u'id'): raise ArgumentError("You must specify target as either offset or id", target=target) if target == u'offset': self._verify_offset(value) self.offset = value else: self.offset = self._find_id(value) self._count = self.engine.count_matching(self.selector, offset=self.offset) curr = self.engine.get(self.storage_type, self.offset) return self.matches(DataStream.FromEncoded(curr.stream))
Seek this stream to a specific offset or reading id. There are two modes of use. You can seek to a specific reading id, which means the walker will be positioned exactly at the reading pointed to by the reading ID. If the reading id cannot be found an exception will be raised. The reading id can be found but corresponds to a reading that is not selected by this walker, the walker will be moved to point at the first reading after that reading and False will be returned. If target=="offset", the walker will be positioned at the specified offset in the sensor log. It will also update the count of available readings based on that new location so that the count remains correct. The offset does not need to correspond to a reading selected by this walker. If offset does not point to a selected reading, the effective behavior will be as if the walker pointed to the next selected reading after `offset`. Args: value (int): The identifier to seek, either an offset or a reading id. target (str): The type of thing to seek. Can be offset or id. If id is given, then a reading with the given ID will be searched for. If offset is given then the walker will be positioned at the given offset. Returns: bool: True if an exact match was found, False otherwise. An exact match means that the offset or reading ID existed and corresponded to a reading selected by this walker. An inexact match means that the offset or reading ID existed but corresponded to reading that was not selected by this walker. If the offset or reading ID could not be found an Exception is thrown instead. Raises: ArgumentError: target is an invalid string, must be offset or id. UnresolvedIdentifierError: the desired offset or reading id could not be found.
def main(): setup_main_logger(console=True, file_logging=False) params = argparse.ArgumentParser(description='Quick usage: python3 -m sockeye.init_embedding ' '-w embed-in-src.npy embed-in-tgt.npy ' '-i vocab-in-src.json vocab-in-tgt.json ' '-o vocab-out-src.json vocab-out-tgt.json ' '-n source_embed_weight target_embed_weight ' '-f params.init') arguments.add_init_embedding_args(params) args = params.parse_args() init_embeddings(args)
Commandline interface to initialize Sockeye embedding weights with pretrained word representations.
def right_axis_label(self, label, position=None, rotation=-60, offset=0.08, **kwargs): if not position: position = (2. / 5 + offset, 3. / 5, 0) self._labels["right"] = (label, position, rotation, kwargs)
Sets the label on the right axis. Parameters ---------- label: String The axis label position: 3-Tuple of floats, None The position of the text label rotation: float, -60 The angle of rotation of the label offset: float, Used to compute the distance of the label from the axis kwargs: Any kwargs to pass through to matplotlib.
def ToDebugString(self): text_parts = ['Path segment index\tWeight'] for path_segment_index, weight in self._weight_per_index.items(): text_parts.append('{0:d}\t\t\t{1:d}'.format( path_segment_index, weight)) text_parts.append('') text_parts.append('Weight\t\t\tPath segment index(es)') for weight, path_segment_indexes in self._indexes_per_weight.items(): text_parts.append('{0:d}\t\t\t{1!s}'.format( weight, path_segment_indexes)) text_parts.append('') return '\n'.join(text_parts)
Converts the path segment weights into a debug string.
def has_started(self): assessment_offered = self.get_assessment_offered() if assessment_offered.has_start_time(): return DateTime.utcnow() >= assessment_offered.get_start_time() else: return True
Tests if this assessment has begun. return: (boolean) - ``true`` if the assessment has begun, ``false`` otherwise *compliance: mandatory -- This method must be implemented.*
def dict_snake_to_camel_case(snake_dict, convert_keys=True, convert_subkeys=False): converted = {} for key, value in snake_dict.items(): if isinstance(value, dict): new_value = dict_snake_to_camel_case(value, convert_keys=convert_subkeys, convert_subkeys=True) elif isinstance(value, list): new_value = [] for subvalue in value: new_subvalue = dict_snake_to_camel_case(subvalue, convert_keys=convert_subkeys, convert_subkeys=True) \ if isinstance(subvalue, dict) else subvalue new_value.append(new_subvalue) else: new_value = value new_key = to_camel_case(key) if convert_keys else key converted[new_key] = new_value return converted
Recursively convert a snake_cased dict into a camelCased dict :param snake_dict: Dictionary to convert :param convert_keys: Whether the key should be converted :param convert_subkeys: Whether to also convert the subkeys, in case they are named properties of the dict :return:
def handleSync(self, msg: Any) -> Any: if isinstance(msg, tuple) and len( msg) == 2 and not hasattr(msg, '_field_types'): return self.getFunc(msg[0])(*msg) else: return self.getFunc(msg)(msg)
Pass the message as an argument to the function defined in `routes`. If the msg is a tuple, pass the values as multiple arguments to the function. :param msg: tuple of object and callable
def home_slug(): prefix = get_script_prefix() slug = reverse("home") if slug.startswith(prefix): slug = '/' + slug[len(prefix):] try: return resolve(slug).kwargs["slug"] except KeyError: return slug
Returns the slug arg defined for the ``home`` urlpattern, which is the definitive source of the ``url`` field defined for an editable homepage object.
def sky_dist(src1, src2): if np.all(src1 == src2): return 0 return gcd(src1.ra, src1.dec, src2.ra, src2.dec)
Great circle distance between two sources. A check is made to determine if the two sources are the same object, in this case the distance is zero. Parameters ---------- src1, src2 : object Two sources to check. Objects must have parameters (ra,dec) in degrees. Returns ------- distance : float The distance between the two sources. See Also -------- :func:`AegeanTools.angle_tools.gcd`
def execute(self): if not self.cmd.is_conn_available(): return if self.cmd.connection.lowest_server_version >= SYSINFO_MIN_VERSION: success, rows = self._sys_info() self.cmd.exit_code = self.cmd.exit_code or int(not success) if success: for result in rows: self.cmd.pprint(result.rows, result.cols) self.cmd.logger.info( "For debugging purposes you can send above listed information to support@crate.io") else: tmpl = 'Crate {version} does not support the cluster "sysinfo" command' self.cmd.logger.warn(tmpl .format(version=self.cmd.connection.lowest_server_version))
print system and cluster info
def _create_worker(self, worker): worker.sig_started.connect(self._start) self._workers.append(worker)
Common worker setup.
def get_token(self, code, headers=None, **kwargs): self._check_configuration("site", "token_url", "redirect_uri", "client_id", "client_secret") url = "%s%s" % (self.site, quote(self.token_url)) data = { 'redirect_uri': self.redirect_uri, 'client_id': self.client_id, 'client_secret': self.client_secret, 'code': code, } data.update(kwargs) return self._make_request(url, data=data, headers=headers)
Requests an access token
def calculate_row_format(columns, keys=None): row_format = '' if keys is None: keys = columns.keys() else: keys = [key for key in keys if key in columns] for key in keys: if len(row_format) > 0: row_format += "|" row_format += "%%(%s)-%ds" % (key, columns[key]) return '|' + row_format + '|'
Calculate row format. Args: columns (dict): the keys are the column name and the value the max length. keys (list): optional list of keys to order columns as well as to filter for them. Returns: str: format for table row
def rms(x): try: return (np.array(x) ** 2).mean() ** 0.5 except: x = np.array(dropna(x)) invN = 1.0 / len(x) return (sum(invN * (x_i ** 2) for x_i in x)) ** .5
Root Mean Square" Arguments: x (seq of float): A sequence of numerical values Returns: The square root of the average of the squares of the values math.sqrt(sum(x_i**2 for x_i in x) / len(x)) or return (np.array(x) ** 2).mean() ** 0.5 >>> rms([0, 2, 4, 4]) 3.0
def _repr(obj): vals = ", ".join("{}={!r}".format( name, getattr(obj, name)) for name in obj._attribs) if vals: t = "{}(name={}, {})".format(obj.__class__.__name__, obj.name, vals) else: t = "{}(name={})".format(obj.__class__.__name__, obj.name) return t
Show the received object as precise as possible.
def updateMesh(self, polydata): self.poly = polydata self.mapper.SetInputData(polydata) self.mapper.Modified() return self
Overwrite the polygonal mesh of the actor with a new one.
def seek(self, offset, whence=0, mode='rw'): try: st = self._sndfile.seek(offset, whence, mode) except IOError, e: raise PyaudioIOError(str(e)) return st
similar to python seek function, taking only in account audio data. :Parameters: offset : int the number of frames (eg two samples for stereo files) to move relatively to position set by whence. whence : int only 0 (beginning), 1 (current) and 2 (end of the file) are valid. mode : string If set to 'rw', both read and write pointers are updated. If 'r' is given, only read pointer is updated, if 'w', only the write one is (this may of course make sense only if you open the file in a certain mode). Notes ----- - one only takes into accound audio data. - if an invalid seek is given (beyond or before the file), a PyaudioIOError is launched.
def set_acl(self, role, users): acl_updates = [{"user": user, "role": role} for user in users] r = fapi.update_repository_method_acl( self.namespace, self.name, self.snapshot_id, acl_updates, self.api_url ) fapi._check_response_code(r, 200)
Set permissions for this method. Args: role (str): Access level one of {one of "OWNER", "READER", "WRITER", "NO ACCESS"} users (list(str)): List of users to give role to
def clean(self, list_article_candidates): results = [] for article_candidate in list_article_candidates: article_candidate.title = self.do_cleaning(article_candidate.title) article_candidate.description = self.do_cleaning(article_candidate.description) article_candidate.text = self.do_cleaning(article_candidate.text) article_candidate.topimage = self.do_cleaning(article_candidate.topimage) article_candidate.author = self.do_cleaning(article_candidate.author) article_candidate.publish_date = self.do_cleaning(article_candidate.publish_date) results.append(article_candidate) return results
Iterates over each article_candidate and cleans every extracted data. :param list_article_candidates: A list, the list of ArticleCandidate-Objects which have been extracted :return: A list, the list with the cleaned ArticleCandidate-Objects
def delete(ctx, componentname): col = ctx.obj['col'] if col.count({'name': componentname}) > 1: log('More than one component configuration of this name! Try ' 'one of the uuids as argument. Get a list with "config ' 'list"') return log('Deleting component configuration', componentname, emitter='MANAGE') configuration = col.find_one({'name': componentname}) if configuration is None: configuration = col.find_one({'uuid': componentname}) if configuration is None: log('Component configuration not found:', componentname, emitter='MANAGE') return configuration.delete() log('Done')
Delete an existing component configuration. This will trigger the creation of its default configuration upon next restart.
def alterar( self, id_interface, nome, protegida, descricao, id_ligacao_front, id_ligacao_back, tipo=None, vlan=None): if not is_valid_int_param(id_interface): raise InvalidParameterError( u'Interface id is invalid or was not informed.') url = 'interface/' + str(id_interface) + '/' interface_map = dict() interface_map['nome'] = nome interface_map['protegida'] = protegida interface_map['descricao'] = descricao interface_map['id_ligacao_front'] = id_ligacao_front interface_map['id_ligacao_back'] = id_ligacao_back interface_map['tipo'] = tipo interface_map['vlan'] = vlan code, xml = self.submit({'interface': interface_map}, 'PUT', url) return self.response(code, xml)
Edit an interface by its identifier. Equipment identifier is not changed. :param nome: Interface name. :param protegida: Indication of protected ('0' or '1'). :param descricao: Interface description. :param id_ligacao_front: Front end link interface identifier. :param id_ligacao_back: Back end link interface identifier. :param id_interface: Interface identifier. :return: None :raise InvalidParameterError: The parameters interface id, nome and protegida are none or invalid. :raise NomeInterfaceDuplicadoParaEquipamentoError: There is already an interface with this name for this equipment. :raise InterfaceNaoExisteError: Front link interface and/or back link interface doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def float(cls, name, description=None, unit='', params=None, default=None, initial_status=None): return cls(cls.FLOAT, name, description, unit, params, default, initial_status)
Instantiate a new float sensor object. Parameters ---------- name : str The name of the sensor. description : str A short description of the sensor. units : str The units of the sensor value. May be the empty string if there are no applicable units. params : list [min, max] -- miniumum and maximum values of the sensor default : float An initial value for the sensor. Defaults to 0.0. initial_status : int enum or None An initial status for the sensor. If None, defaults to Sensor.UNKNOWN. `initial_status` must be one of the keys in Sensor.STATUSES
def norm(x, encoding="latin1"): "Convertir acentos codificados en ISO 8859-1 u otro, a ASCII regular" if not isinstance(x, basestring): x = unicode(x) elif isinstance(x, str): x = x.decode(encoding, 'ignore') return unicodedata.normalize('NFKD', x).encode('ASCII', 'ignore')
Convertir acentos codificados en ISO 8859-1 u otro, a ASCII regular
def fetchExternalUpdates(self): seeds = seeder.fetchDynamicProperties( self.buildSpec['target'], self.buildSpec['encoding'] ) for config in self.configs: config.seedUI(seeds)
!Experimental! Calls out to the client code requesting seed values to use in the UI !Experimental!
def _get_ids_from_hostname(self, hostname): results = self.list_instances(hostname=hostname, mask="id") return [result['id'] for result in results]
List VS ids which match the given hostname.
def request_args(self): kwargs = {} kwargs.update(self.request.match_info.items()) kwargs.update(self.request.query.items()) return kwargs
Returns the arguments passed with the request in a dictionary. Returns both URL resolved arguments and query string arguments.
def iter_pages_builds(self, number=-1, etag=None): url = self._build_url('pages', 'builds', base_url=self._api) return self._iter(int(number), url, PagesBuild, etag=etag)
Iterate over pages builds of this repository. :returns: generator of :class:`PagesBuild <github3.repos.pages.PagesBuild>`
def regret(self): return (sum(self.pulls)*np.max(np.nan_to_num(self.wins/self.pulls)) - sum(self.wins)) / sum(self.pulls)
Calculate expected regret, where expected regret is maximum optimal reward - sum of collected rewards, i.e. expected regret = T*max_k(mean_k) - sum_(t=1-->T) (reward_t) Returns ------- float
def download(ui, repo, clname, **opts): if codereview_disabled: raise hg_util.Abort(codereview_disabled) cl, vers, patch, err = DownloadCL(ui, repo, clname) if err != "": return err ui.write(cl.EditorText() + "\n") ui.write(patch + "\n") return
download a change from the code review server Download prints a description of the given change list followed by its diff, downloaded from the code review server.
def respond(request, code): redirect = request.GET.get('next', request.POST.get('next')) if redirect: return HttpResponseRedirect(redirect) return type('Response%d' % code, (HttpResponse, ), {'status_code': code})()
Responds to the request with the given response code. If ``next`` is in the form, it will redirect instead.
def write_csv(data, file_name, encoding='utf-8'): name_extension = len(data) > 1 root, ext = os.path.splitext(file_name) for i, sheet in enumerate(data): fname = file_name if not name_extension else root+"_"+str(i)+ext with open(fname, 'wb') as date_file: csv_file = csv.writer(date_file, encoding=encoding) for line in sheet: csv_file.writerow(line)
Writes out to csv format. Args: data: 2D list of tables/worksheets. file_name: Name of the output file.
def update(self, obj_id, is_public=NotUpdated, is_protected=NotUpdated): data = {} self._copy_if_updated(data, is_public=is_public, is_protected=is_protected) return self._patch('/job-executions/%s' % obj_id, data)
Update a Job Execution.
def get_attribute_option(self, attribute, option_name): self.__validate_attribute_option_name(option_name) attribute_key = self.__make_key(attribute) return self.__attribute_options[attribute_key].get(option_name)
Returns the value of the given attribute option for the specified attribute.
def write_file(self, filename, distance=6, velocity=8, charge=3): with open(filename, "w") as f: f.write(self.get_string(distance=distance, velocity=velocity, charge=charge))
Writes LammpsData to file. Args: filename (str): Filename. distance (int): No. of significant figures to output for box settings (bounds and tilt) and atomic coordinates. Default to 6. velocity (int): No. of significant figures to output for velocities. Default to 8. charge (int): No. of significant figures to output for charges. Default to 3.
def write(self, text): 'Uses curses to print in the fanciest way possible.' if not self.no_color: text = self.colorize_text(text) else: pattern = re.compile('\<\<[A-Z]*?\>\>') text = pattern.sub('', text) text += '\n' self.buffer.write(text) return self
Uses curses to print in the fanciest way possible.
def _jws_header(keyid, algorithm): data = { 'typ': 'JWT', 'alg': algorithm.name, 'kid': keyid } datajson = json.dumps(data, sort_keys=True).encode('utf8') return base64url_encode(datajson)
Produce a base64-encoded JWS header.
def _parse_reassign_label(cls, args): argparser = ArgumentParser(prog="cluster reassign_label") argparser.add_argument("destination_cluster", metavar="destination_cluster_id_label", help="id/label of the cluster to move the label to") argparser.add_argument("label", help="label to be moved from the source cluster") arguments = argparser.parse_args(args) return arguments
Parse command line arguments for reassigning label.
def get_summary_page_link(ifo, utc_time): search_form = search_form_string data = {'H1': data_h1_string, 'L1': data_l1_string} if ifo not in data: return ifo else: alog_utc = '%02d-%02d-%4d' % (utc_time[2], utc_time[1], utc_time[0]) ext = '%4d%02d%02d' % (utc_time[0], utc_time[1], utc_time[2]) return_string = search_form % (ifo.lower(), ifo.lower(), alog_utc, alog_utc) return return_string + data[ifo] % ext
Return a string that links to the summary page and aLOG for this ifo Parameters ---------- ifo : string The detector name utc_time : sequence First three elements must be strings giving year, month, day resp. Returns ------- return_string : string String containing HTML for links to summary page and aLOG search
def postorder(self, skip_seed=False): for node in self._tree.postorder_node_iter(): if skip_seed and node is self._tree.seed_node: continue yield node
Return a generator that yields the nodes of the tree in postorder. If skip_seed=True then the root node is not included.
def inserir(self, name): brand_map = dict() brand_map['name'] = name code, xml = self.submit({'brand': brand_map}, 'POST', 'brand/') return self.response(code, xml)
Inserts a new Brand and returns its identifier :param name: Brand name. String with a minimum 3 and maximum of 100 characters :return: Dictionary with the following structure: :: {'marca': {'id': < id_brand >}} :raise InvalidParameterError: Name is null and invalid. :raise NomeMarcaDuplicadoError: There is already a registered Brand with the value of name. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def make_retrigger_request(repo_name, request_id, auth, count=DEFAULT_COUNT_NUM, priority=DEFAULT_PRIORITY, dry_run=True): url = '{}/{}/request'.format(SELF_SERVE, repo_name) payload = {'request_id': request_id} if count != DEFAULT_COUNT_NUM or priority != DEFAULT_PRIORITY: payload.update({'count': count, 'priority': priority}) if dry_run: LOG.info('We would make a POST request to %s with the payload: %s' % (url, str(payload))) return None LOG.info("We're going to re-trigger an existing completed job with request_id: %s %i time(s)." % (request_id, count)) req = requests.post( url, headers={'Accept': 'application/json'}, data=payload, auth=auth, timeout=TCP_TIMEOUT, ) return req
Retrigger a request using buildapi self-serve. Returns a request. Buildapi documentation: POST /self-serve/{branch}/request Rebuild `request_id`, which must be passed in as a POST parameter. `priority` and `count` are also accepted as optional parameters. `count` defaults to 1, and represents the number of times this build will be rebuilt.
def do_hdr(self, line, hdrs_usr): if self.hdr_ex is None: self._init_hdr(line, hdrs_usr) return True elif self.hdr_ex in line: self._init_hdr(line, hdrs_usr) return True return False
Initialize self.h2i.
def trim(self): for key, value in list(iteritems(self.counters)): if value.empty(): del self.counters[key]
Clear not used counters
def _onClassAttribute(self, name, line, pos, absPosition, level): attributes = self.objectsStack[level].classAttributes for item in attributes: if item.name == name: return attributes.append(ClassAttribute(name, line, pos, absPosition))
Memorizes a class attribute
def make_filter(self, fieldname, query_func, expct_value): def actual_filter(item): value = getattr(item, fieldname) if query_func in NULL_AFFECTED_FILTERS and value is None: return False if query_func == 'eq': return value == expct_value elif query_func == 'ne': return value != expct_value elif query_func == 'lt': return value < expct_value elif query_func == 'lte': return value <= expct_value elif query_func == 'gt': return value > expct_value elif query_func == 'gte': return value >= expct_value elif query_func == 'startswith': return value.startswith(expct_value) elif query_func == 'endswith': return value.endswith(expct_value) actual_filter.__doc__ = '{} {} {}'.format('val', query_func, expct_value) return actual_filter
makes a filter that will be appliead to an object's property based on query_func
def create_assignment_group(self, course_id, group_weight=None, integration_data=None, name=None, position=None, rules=None, sis_source_id=None): path = {} data = {} params = {} path["course_id"] = course_id if name is not None: data["name"] = name if position is not None: data["position"] = position if group_weight is not None: data["group_weight"] = group_weight if sis_source_id is not None: data["sis_source_id"] = sis_source_id if integration_data is not None: data["integration_data"] = integration_data if rules is not None: data["rules"] = rules self.logger.debug("POST /api/v1/courses/{course_id}/assignment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignment_groups".format(**path), data=data, params=params, single_item=True)
Create an Assignment Group. Create a new assignment group for this course.
async def unpack(self, ciphertext: bytes) -> (str, str, str): LOGGER.debug('Wallet.unpack >>> ciphertext: %s', ciphertext) if not ciphertext: LOGGER.debug('Wallet.pack <!< No ciphertext to unpack') raise AbsentMessage('No ciphertext to unpack') try: unpacked = json.loads(await crypto.unpack_message(self.handle, ciphertext)) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemNotFound: LOGGER.debug('Wallet.unpack <!< Wallet %s has no local key to unpack ciphertext', self.name) raise AbsentRecord('Wallet {} has no local key to unpack ciphertext'.format(self.name)) LOGGER.debug('Wallet.unpack <!< Wallet %s unpack() raised indy error code {}', x_indy.error_code) raise rv = (unpacked['message'], unpacked.get('sender_verkey', None), unpacked.get('recipient_verkey', None)) LOGGER.debug('Wallet.unpack <<< %s', rv) return rv
Unpack a message. Return triple with cleartext, sender verification key, and recipient verification key. Raise AbsentMessage for missing ciphertext, or WalletState if wallet is closed. Raise AbsentRecord if wallet has no key to unpack ciphertext. :param ciphertext: JWE-like formatted message as pack() produces :return: cleartext, sender verification key, recipient verification key
def packet_write(self): bytes_written = 0 if self.sock == NC.INVALID_SOCKET: return NC.ERR_NO_CONN, bytes_written while len(self.out_packet) > 0: pkt = self.out_packet[0] write_length, status = nyamuk_net.write(self.sock, pkt.payload) if write_length > 0: pkt.to_process -= write_length pkt.pos += write_length bytes_written += write_length if pkt.to_process > 0: return NC.ERR_SUCCESS, bytes_written else: if status == errno.EAGAIN or status == errno.EWOULDBLOCK: return NC.ERR_SUCCESS, bytes_written elif status == errno.ECONNRESET: return NC.ERR_CONN_LOST, bytes_written else: return NC.ERR_UNKNOWN, bytes_written del self.out_packet[0] self.last_msg_out = time.time() return NC.ERR_SUCCESS, bytes_written
Write packet to network.
def eliminate_sequential_children(paths): "helper for infer_columns. removes paths that are direct children of the n-1 or n-2 path" return [p for i,p in enumerate(paths) if not ((i>0 and paths[i-1]==p[:-1]) or (i>1 and paths[i-2]==p[:-1]))]
helper for infer_columns. removes paths that are direct children of the n-1 or n-2 path
def extract_col_name(string): prefixes = ["presence_pass_", "value_pass_", "type_pass_"] end = string.rfind("_") for prefix in prefixes: if string.startswith(prefix): return prefix[:-6], string[len(prefix):end] return string, string
Take a string and split it. String will be a format like "presence_pass_azimuth", where "azimuth" is the MagIC column name and "presence_pass" is the validation. Return "presence", "azimuth".
def setattr(self, req, ino, attr, to_set, fi): self.reply_err(req, errno.EROFS)
Set file attributes Valid replies: reply_attr reply_err
def from_desmond(cls, path, **kwargs): dms = DesmondDMSFile(path) pos = kwargs.pop('positions', dms.getPositions()) return cls(master=dms, topology=dms.getTopology(), positions=pos, path=path, **kwargs)
Loads a topology from a Desmond DMS file located at `path`. Arguments --------- path : str Path to a Desmond DMS file
def proc_decorator(req_set): def decorator(func): @wraps(func) def inner(self, *args, **kwargs): proc = func.__name__.lower() inner.proc_decorator = kwargs self.logger.debug("processing proc:{}".format(func.__name__)) self.logger.debug(req_set) self.logger.debug("kwargs type: " + str(type(kwargs))) if proc in ['hplogistic', 'hpreg']: kwargs['ODSGraphics'] = kwargs.get('ODSGraphics', False) if proc == 'hpcluster': proc = 'hpclus' legal_set = set(kwargs.keys()) self.logger.debug(legal_set) return SASProcCommons._run_proc(self, proc, req_set, legal_set, **kwargs) return inner return decorator
Decorator that provides the wrapped function with an attribute 'actual_kwargs' containing just those keyword arguments actually passed in to the function.
def email_users(users, subject, text_body, html_body=None, sender=None, configuration=None, **kwargs): if not users: raise ValueError('No users supplied') recipients = list() for user in users: recipients.append(user.data['email']) if configuration is None: configuration = users[0].configuration configuration.emailer().send(recipients, subject, text_body, html_body=html_body, sender=sender, **kwargs)
Email a list of users Args: users (List[User]): List of users subject (str): Email subject text_body (str): Plain text email body html_body (str): HTML email body sender (Optional[str]): Email sender. Defaults to SMTP username. configuration (Optional[Configuration]): HDX configuration. Defaults to configuration of first user in list. **kwargs: See below mail_options (List): Mail options (see smtplib documentation) rcpt_options (List): Recipient options (see smtplib documentation) Returns: None
def has_ops_before(self, ts): spec = {'ts': {'$lt': ts}} return bool(self.coll.find_one(spec))
Determine if there are any ops before ts
def get_font(self, font): font = {'a': 0, 'b': 1}.get(font, font) if not six.text_type(font) in self.fonts: raise NotSupported( '"{}" is not a valid font in the current profile'.format(font)) return font
Return the escpos index for `font`. Makes sure that the requested `font` is valid.
def add(x, y, context=None): return _apply_function_in_current_context( BigFloat, mpfr.mpfr_add, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
Return ``x`` + ``y``.
def save(self, heads, console=True): self._save_junit() self._save_html_report(heads) if console: self._print_console_summary()
Create reports in different formats. :param heads: html table extra values in title rows :param console: Boolean, default is True. If set, also print out the console log.
def has_value_of_type(self, var_type): if self.has_value() and self.has_type(var_type): return True return False
Does the variable both have the given type and have a variable value we can use?
def call(self, method, *args, **kw): if args and kw: raise ValueError("JSON-RPC method calls allow only either named or positional arguments.") if not method: raise ValueError("JSON-RPC method call requires a method name.") request = self._data_serializer.assemble_request( method, args or kw or None ) if self._in_batch_mode: self._requests.append(request) return request.get('id') else: return request
In context of a batch we return the request's ID else we return the actual json
def naive_grouped_rowwise_apply(data, group_labels, func, func_args=(), out=None): if out is None: out = np.empty_like(data) for (row, label_row, out_row) in zip(data, group_labels, out): for label in np.unique(label_row): locs = (label_row == label) out_row[locs] = func(row[locs], *func_args) return out
Simple implementation of grouped row-wise function application. Parameters ---------- data : ndarray[ndim=2] Input array over which to apply a grouped function. group_labels : ndarray[ndim=2, dtype=int64] Labels to use to bucket inputs from array. Should be the same shape as array. func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]] Function to apply to pieces of each row in array. func_args : tuple Additional positional arguments to provide to each row in array. out : ndarray, optional Array into which to write output. If not supplied, a new array of the same shape as ``data`` is allocated and returned. Examples -------- >>> data = np.array([[1., 2., 3.], ... [2., 3., 4.], ... [5., 6., 7.]]) >>> labels = np.array([[0, 0, 1], ... [0, 1, 0], ... [1, 0, 2]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min()) array([[ 0., 1., 0.], [ 0., 0., 2.], [ 0., 0., 0.]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum()) array([[ 0.33333333, 0.66666667, 1. ], [ 0.33333333, 1. , 0.66666667], [ 1. , 1. , 1. ]])
def create_key(self, master_secret=b""): master_secret = deserialize.bytes_str(master_secret) bip32node = control.create_wallet(self.testnet, master_secret=master_secret) return bip32node.wif()
Create new private key and return in wif format. @param: master_secret Create from master secret, otherwise random.
def extract_forward_and_reverse_complement( self, forward_reads_to_extract, reverse_reads_to_extract, database_fasta_file, output_file): self.extract(forward_reads_to_extract, database_fasta_file, output_file) cmd_rev = "fxtract -XH -f /dev/stdin '%s'" % database_fasta_file output = extern.run(cmd_rev, stdin='\n'.join(reverse_reads_to_extract)) with open(output_file, 'a') as f: for record in SeqIO.parse(StringIO(output), 'fasta'): record.seq = record.reverse_complement().seq SeqIO.write(record, f, 'fasta')
As per extract except also reverse complement the sequences.
def _from_dict(cls, _dict): args = {} if 'name' in _dict: args['name'] = _dict.get('name') else: raise ValueError( 'Required property \'name\' not present in ClassifierResult JSON' ) if 'classifier_id' in _dict: args['classifier_id'] = _dict.get('classifier_id') else: raise ValueError( 'Required property \'classifier_id\' not present in ClassifierResult JSON' ) if 'classes' in _dict: args['classes'] = [ ClassResult._from_dict(x) for x in (_dict.get('classes')) ] else: raise ValueError( 'Required property \'classes\' not present in ClassifierResult JSON' ) return cls(**args)
Initialize a ClassifierResult object from a json dictionary.
def conditional_probability_alive(self, frequency, recency, T): r, alpha, a, b = self._unload_params("r", "alpha", "a", "b") log_div = (r + frequency) * np.log((alpha + T) / (alpha + recency)) + np.log( a / (b + np.maximum(frequency, 1) - 1) ) return np.atleast_1d(np.where(frequency == 0, 1.0, expit(-log_div)))
Compute conditional probability alive. Compute the probability that a customer with history (frequency, recency, T) is currently alive. From http://www.brucehardie.com/notes/021/palive_for_BGNBD.pdf Parameters ---------- frequency: array or scalar historical frequency of customer. recency: array or scalar historical recency of customer. T: array or scalar age of the customer. Returns ------- array value representing a probability
def get_curl_command_line(self, method, url, **kwargs): if kwargs.get("query"): url = "{}?{}".format(url, d1_common.url.urlencode(kwargs["query"])) curl_list = ["curl"] if method.lower() == "head": curl_list.append("--head") else: curl_list.append("-X {}".format(method)) for k, v in sorted(list(kwargs["headers"].items())): curl_list.append('-H "{}: {}"'.format(k, v)) curl_list.append("{}".format(url)) return " ".join(curl_list)
Get request as cURL command line for debugging.
def percent_pareto_interactions(records, percentage=0.8): if len(records) == 0: return None user_count = Counter(r.correspondent_id for r in records) target = int(math.ceil(sum(user_count.values()) * percentage)) user_sort = sorted(user_count.keys(), key=lambda x: user_count[x]) while target > 0 and len(user_sort) > 0: user_id = user_sort.pop() target -= user_count[user_id] return (len(user_count) - len(user_sort)) / len(records)
The percentage of user's contacts that account for 80% of its interactions.
def _create_rubber_bands_action(self): icon = resources_path('img', 'icons', 'toggle-rubber-bands.svg') self.action_toggle_rubberbands = QAction( QIcon(icon), self.tr('Toggle Scenario Outlines'), self.iface.mainWindow()) message = self.tr('Toggle rubber bands showing scenario extents.') self.action_toggle_rubberbands.setStatusTip(message) self.action_toggle_rubberbands.setWhatsThis(message) self.action_toggle_rubberbands.setCheckable(True) flag = setting('showRubberBands', False, expected_type=bool) self.action_toggle_rubberbands.setChecked(flag) self.action_toggle_rubberbands.triggered.connect( self.dock_widget.toggle_rubber_bands) self.add_action(self.action_toggle_rubberbands)
Create action for toggling rubber bands.
def render_value_for_node(node_id): value = None result = [] try: result = db.execute(text(fetch_query_string('select_node_from_id.sql')), node_id=node_id).fetchall() except DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) if result: kw = dict(zip(result[0].keys(), result[0].values())) value = render_node(node_id, noderequest={'_no_template':True}, **kw) return value
Wrap render_node for usage in operate scripts. Returns without template rendered.
def hardware_version(self): res = self.rpc(0x00, 0x02, result_type=(0, True)) binary_version = res['buffer'] ver = "" for x in binary_version: if x != 0: ver += chr(x) return ver
Return the embedded hardware version string for this tile. The hardware version is an up to 10 byte user readable string that is meant to encode any necessary information about the specific hardware that this tile is running on. For example, if you have multiple assembly variants of a given tile, you could encode that information here. Returns: str: The hardware version read from the tile.
def _calcidxs(func): timegrids = hydpy.pub.get('timegrids') if timegrids is None: raise RuntimeError( 'An Indexer object has been asked for an %s array. Such an ' 'array has neither been determined yet nor can it be ' 'determined automatically at the moment. Either define an ' '%s array manually and pass it to the Indexer object, or make ' 'a proper Timegrids object available within the pub module. ' 'In usual HydPy applications, the latter is done ' 'automatically.' % (func.__name__, func.__name__)) idxs = numpy.empty(len(timegrids.init), dtype=int) for jdx, date in enumerate(hydpy.pub.timegrids.init): idxs[jdx] = func(date) return idxs
Return the required indexes based on the given lambda function and the |Timegrids| object handled by module |pub|. Raise a |RuntimeError| if the latter is not available.
def verbose(self): enabled = self.lib.iperf_get_verbose(self._test) if enabled: self._verbose = True else: self._verbose = False return self._verbose
Toggles verbose output for the iperf3 instance :rtype: bool
def score_n1(matrix, matrix_size): score = 0 for i in range(matrix_size): prev_bit_row, prev_bit_col = -1, -1 row_counter, col_counter = 0, 0 for j in range(matrix_size): bit = matrix[i][j] if bit == prev_bit_row: row_counter += 1 else: if row_counter >= 5: score += row_counter - 2 row_counter = 1 prev_bit_row = bit bit = matrix[j][i] if bit == prev_bit_col: col_counter += 1 else: if col_counter >= 5: score += col_counter - 2 col_counter = 1 prev_bit_col = bit if row_counter >= 5: score += row_counter - 2 if col_counter >= 5: score += col_counter - 2 return score
\ Implements the penalty score feature 1. ISO/IEC 18004:2015(E) -- 7.8.3 Evaluation of data masking results - Table 11 (page 54) ============================================ ======================== ====== Feature Evaluation condition Points ============================================ ======================== ====== Adjacent modules in row/column in same color No. of modules = (5 + i) N1 + i ============================================ ======================== ====== N1 = 3 :param matrix: The matrix to evaluate :param matrix_size: The width (or height) of the matrix. :return int: The penalty score (feature 1) of the matrix.
def _jitter(c, magnitude:uniform): "Replace pixels by random neighbors at `magnitude`." c.flow.add_((torch.rand_like(c.flow)-0.5)*magnitude*2) return c
Replace pixels by random neighbors at `magnitude`.
def _create_user(self, email, password, is_superuser, **extra_fields): now = timezone.now() if not email: raise ValueError('The given email must be set') email = self.normalize_email(email) user = self.model( email=email, password=password, is_active=True, is_superuser=is_superuser, last_login=now, date_joined=now, **extra_fields) user.set_password(password) user.save(using=self._db) return user
Create new user
def get_appliance_event_after_time(self, location_id, since, per_page=None, page=None, min_power=None): url = "https://api.neur.io/v1/appliances/events" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "locationId": location_id, "since": since } if min_power: params["minPower"] = min_power if per_page: params["perPage"] = per_page if page: params["page"] = page url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
Get appliance events by location Id after defined time. Args: location_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` since (string): ISO 8601 start time for getting the events that are created or updated after it. Maxiumim value allowed is 1 day from the current time. min_power (string): The minimum average power (in watts) for filtering. Only events with an average power above this value will be returned. (default: 400) per_page (string, optional): the number of returned results per page (min 1, max 500) (default: 10) page (string, optional): the page number to return (min 1, max 100000) (default: 1) Returns: list: dictionary objects containing appliance events meeting specified criteria
def get_term(self,term_id): if term_id in self.idx: return Cterm(self.idx[term_id],self.type) else: return None
Returns the term object for the supplied identifier @type term_id: string @param term_id: term identifier
def remove_prohibited_element(tag_name, document_element): elements = document_element.getElementsByTagName(tag_name) for element in elements: p = element.parentNode p.removeChild(element)
To fit the Evernote DTD need, drop this tag name
def Process( self, parser_mediator, cache=None, database=None, **unused_kwargs): if cache is None: raise ValueError('Missing cache value.') if database is None: raise ValueError('Missing database value.') super(SQLitePlugin, self).Process(parser_mediator) for query, callback_method in self.QUERIES: if parser_mediator.abort: break callback = getattr(self, callback_method, None) if callback is None: logger.warning( '[{0:s}] missing callback method: {1:s} for query: {2:s}'.format( self.NAME, callback_method, query)) continue self._ParseQuery(parser_mediator, database, query, callback, cache)
Determine if this is the right plugin for this database. This function takes a SQLiteDatabase object and compares the list of required tables against the available tables in the database. If all the tables defined in REQUIRED_TABLES are present in the database then this plugin is considered to be the correct plugin and the function will return back a generator that yields event objects. Args: parser_mediator (ParserMediator): parser mediator. cache (Optional[SQLiteCache]): cache. database (Optional[SQLiteDatabase]): database. Raises: ValueError: If the database or cache value are missing.
def initialize_from_bucket(self): tmp_dir = tempfile.mkdtemp("tfds") data_files = gcs_utils.gcs_dataset_info_files(self.full_name) if not data_files: return logging.info("Loading info from GCS for %s", self.full_name) for fname in data_files: out_fname = os.path.join(tmp_dir, os.path.basename(fname)) gcs_utils.download_gcs_file(fname, out_fname) self.read_from_directory(tmp_dir)
Initialize DatasetInfo from GCS bucket info files.
def _handle_response(response): if not str(response.status_code).startswith('2'): raise KucoinAPIException(response) try: res = response.json() if 'code' in res and res['code'] != "200000": raise KucoinAPIException(response) if 'success' in res and not res['success']: raise KucoinAPIException(response) if 'data' in res: res = res['data'] return res except ValueError: raise KucoinRequestException('Invalid Response: %s' % response.text)
Internal helper for handling API responses from the Quoine server. Raises the appropriate exceptions when necessary; otherwise, returns the response.
def reset(self): self.indchar = None self.comments = {} self.refs = [] self.set_skips([]) self.docstring = "" self.ichain_count = 0 self.tre_store_count = 0 self.case_check_count = 0 self.stmt_lambdas = [] if self.strict: self.unused_imports = set() self.bind()
Resets references.
def heptad_register(self): base_reg = 'abcdefg' exp_base = base_reg * (self.cc_len//7+2) ave_ca_layers = self.calc_average_parameters(self.ca_layers)[0][:-1] reg_fit = fit_heptad_register(ave_ca_layers) hep_pos = reg_fit[0][0] return exp_base[hep_pos:hep_pos+self.cc_len], reg_fit[0][1:]
Returns the calculated register of the coiled coil and the fit quality.
def set_exception(self, exception): was_handled = self._finish(self.errbacks, exception) if not was_handled: traceback.print_exception( type(exception), exception, exception.__traceback__)
Signal unsuccessful completion.
def save_veto_definer(cp, out_dir, tags=None): if tags is None: tags = [] make_analysis_dir(out_dir) veto_def_url = cp.get_opt_tags("workflow-segments", "segments-veto-definer-url", tags) veto_def_base_name = os.path.basename(veto_def_url) veto_def_new_path = os.path.abspath(os.path.join(out_dir, veto_def_base_name)) resolve_url(veto_def_url,out_dir) cp.set("workflow-segments", "segments-veto-definer-file", veto_def_new_path) return veto_def_new_path
Retrieve the veto definer file and save it locally Parameters ----------- cp : ConfigParser instance out_dir : path tags : list of strings Used to retrieve subsections of the ini file for configuration options.
def print_label(self, package_num=None): if package_num: packages = [ self.shipment.response.CompletedShipmentDetail.CompletedPackageDetails[package_num] ] else: packages = self.shipment.response.CompletedShipmentDetail.CompletedPackageDetails for package in packages: label_binary = binascii.a2b_base64(package.Label.Parts[0].Image) self._print_base64(label_binary)
Prints all of a shipment's labels, or optionally just one. @type package_num: L{int} @param package_num: 0-based index of the package to print. This is only useful for shipments with more than one package.
def execute_cross_join(op, left, right, **kwargs): key = "cross_join_{}".format(ibis.util.guid()) join_key = {key: True} new_left = left.assign(**join_key) new_right = right.assign(**join_key) result = pd.merge( new_left, new_right, how='inner', on=key, copy=False, suffixes=constants.JOIN_SUFFIXES, ) del result[key] return result
Execute a cross join in pandas. Notes ----- We create a dummy column of all :data:`True` instances and use that as the join key. This results in the desired Cartesian product behavior guaranteed by cross join.
def charge_transfer_to_string(self): ch = self.charge_transfer chts = ['\nCharge Transfer\n\nabsorbing atom'] for i in range(len(ch)): for atom, v2 in ch[str(i)].items(): a = ['\n', atom, '\n', 's ', str(v2['s']), '\n', 'p ', str(v2['p']), '\n', 'd ', str(v2['d']), '\n', 'f ', str(v2['f']), '\n', 'tot ', str(v2['tot']), '\n'] chts.extend(a) return ''.join(chts)
returns shrage transfer as string
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): if grouper is None: self._set_binner() grouper = self.grouper obj = self._selected_obj grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis) try: if isinstance(obj, ABCDataFrame) and callable(how): result = grouped._aggregate_item_by_item(how, *args, **kwargs) else: result = grouped.aggregate(how, *args, **kwargs) except Exception: result = grouped.apply(how, *args, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result)
Re-evaluate the obj with a groupby aggregation.
def prog(text): def decorator(func): adaptor = ScriptAdaptor._get_adaptor(func) adaptor.prog = text return func return decorator
Decorator used to specify the program name for the console script help message. :param text: The text to use for the program name.
def get_relationships_for_idents(self, cid, idents): keys = [(cid, ident,) for ident in idents] key_ranges = zip(keys, keys) mapping = {} for k, v in self.kvl.scan(self.TABLE, *key_ranges): label = self._label_from_kvlayer(k, v) ident = label.other(cid) rel_strength = label.rel_strength mapping[ident] = label.rel_strength return mapping
Get relationships between ``idents`` and a ``cid``. Returns a dictionary mapping the identifiers in ``idents`` to either None, if no relationship label is found between the identifier and ``cid``, or a RelationshipType classifying the strength of the relationship between the identifier and ``cid``.
def compose_item_handle(tokens): if len(tokens) < 1: raise CoconutInternalException("invalid function composition tokens", tokens) elif len(tokens) == 1: return tokens[0] else: return "_coconut_forward_compose(" + ", ".join(reversed(tokens)) + ")"
Process function composition.