text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def generate(self, *args, **kwargs): """ Implementation for the generate method defined in ReportBase. Generates a html report and saves it. :param args: 1 argument, which is the filename :param kwargs: 3 keyword arguments with keys 'title', 'heads' and 'refresh' :return: Nothing. """ title = kwargs.get("title") heads = kwargs.get("heads") refresh = kwargs.get("refresh") filename = args[0] report = self._create(title, heads, refresh, path_start=os.path.dirname(filename)) ReportHtml.save(report, filename)
[ "def", "generate", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "title", "=", "kwargs", ".", "get", "(", "\"title\"", ")", "heads", "=", "kwargs", ".", "get", "(", "\"heads\"", ")", "refresh", "=", "kwargs", ".", "get", "(", ...
40
15.066667
def set_user_profile(self, displayname=None, avatar_url=None, reason="Changing room profile information"): """Set user profile within a room. This sets displayname and avatar_url for the logged in user only in a specific room. It does not change the user's global user profile. """ member = self.client.api.get_membership(self.room_id, self.client.user_id) if member["membership"] != "join": raise Exception("Can't set profile if you have not joined the room.") if displayname is None: displayname = member["displayname"] if avatar_url is None: avatar_url = member["avatar_url"] self.client.api.set_membership( self.room_id, self.client.user_id, 'join', reason, { "displayname": displayname, "avatar_url": avatar_url } )
[ "def", "set_user_profile", "(", "self", ",", "displayname", "=", "None", ",", "avatar_url", "=", "None", ",", "reason", "=", "\"Changing room profile information\"", ")", ":", "member", "=", "self", ".", "client", ".", "api", ".", "get_membership", "(", "self"...
39.2
15.88
def _get_available_extensions(): """Get a list of available file extensions to make it easy for tab-completion and exception handling. """ extensions = [] # from filenames parsers_dir = os.path.join(os.path.dirname(__file__)) glob_filename = os.path.join(parsers_dir, "*" + _FILENAME_SUFFIX + ".py") ext_re = re.compile(glob_filename.replace('*', "(?P<ext>\w+)")) for filename in glob.glob(glob_filename): ext_match = ext_re.match(filename) ext = ext_match.groups()[0] extensions.append(ext) extensions.append('.' + ext) # from relevant synonyms (don't use the '' synonym) for ext in EXTENSION_SYNONYMS.keys(): if ext: extensions.append(ext) extensions.append(ext.replace('.', '', 1)) extensions.sort() return extensions
[ "def", "_get_available_extensions", "(", ")", ":", "extensions", "=", "[", "]", "# from filenames", "parsers_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "glob_filename", "=", "os", ".", "p...
35.478261
14.652174
def _parse_args(): """ Parses the command line arguments. :return: Namespace with arguments. :rtype: Namespace """ parser = argparse.ArgumentParser(description='rain - a new sort of automated builder.') parser.add_argument('action', help='what shall we do?', default='build', nargs='?', choices=['build', 'ls', 'keep'] + removal_cmds) parser.add_argument('-c', '--count', type=int, default=1, help='a count of items on which to operate. [default: %(default)s]') parser.add_argument('--keep', type=int, default=-1, help='how many builds should we keep around? [default: %(default)s]') parser.add_argument('-v', '--verbose', action='count', default=0, help='Be more verbose. (can be repeated)') parser.add_argument('--version', default=False, action='store_true', help='print version number and exit. [default: %(default)s]') return parser.parse_args()
[ "def", "_parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'rain - a new sort of automated builder.'", ")", "parser", ".", "add_argument", "(", "'action'", ",", "help", "=", "'what shall we do?'", ",", "default",...
40.230769
28.384615
def screenshots_done(self, jobid): """ Return true if the screenshots job is done """ resp = self.session.get(os.path.join(self.api_url, '{0}.json'.format(jobid))) resp = self._process_response(resp) return True if resp.json()['state'] == 'done' else False
[ "def", "screenshots_done", "(", "self", ",", "jobid", ")", ":", "resp", "=", "self", ".", "session", ".", "get", "(", "os", ".", "path", ".", "join", "(", "self", ".", "api_url", ",", "'{0}.json'", ".", "format", "(", "jobid", ")", ")", ")", "resp"...
42.571429
12.571429
def update_cov(self): """Recursively compute the covariance matrix for the multivariate normal proposal distribution. This method is called every self.interval once self.delay iterations have been performed. """ scaling = (2.4) ** 2 / self.dim # Gelman et al. 1996. epsilon = 1.0e-5 chain = np.asarray(self._trace) # Recursively compute the chain mean self.C, self.chain_mean = self.recursive_cov(self.C, self._trace_count, self.chain_mean, chain, scaling=scaling, epsilon=epsilon) # Shrink covariance if acceptance rate is too small acc_rate = self.accepted / (self.accepted + self.rejected) if self.shrink_if_necessary: if acc_rate < .001: self.C *= .01 elif acc_rate < .01: self.C *= .25 if self.verbose > 1: if acc_rate < .01: print_( '\tAcceptance rate was', acc_rate, 'shrinking covariance') self.accepted = 0. self.rejected = 0. if self.verbose > 1: print_("\tUpdating covariance ...\n", self.C) print_("\tUpdating mean ... ", self.chain_mean) # Update state adjustmentwarning = '\n' +\ 'Covariance was not positive definite and proposal_sd cannot be computed by \n' + \ 'Cholesky decomposition. The next jumps will be based on the last \n' + \ 'valid covariance matrix. This situation may have arisen because no \n' + \ 'jumps were accepted during the last `interval`. One solution is to \n' + \ 'increase the interval, or specify an initial covariance matrix with \n' + \ 'a smaller variance. For this simulation, each time a similar error \n' + \ 'occurs, proposal_sd will be reduced by a factor .9 to reduce the \n' + \ 'jumps and increase the likelihood of accepted jumps.' try: self.updateproposal_sd() except np.linalg.LinAlgError: warnings.warn(adjustmentwarning) self.covariance_adjustment(.9) self._trace_count += len(self._trace) self._trace = []
[ "def", "update_cov", "(", "self", ")", ":", "scaling", "=", "(", "2.4", ")", "**", "2", "/", "self", ".", "dim", "# Gelman et al. 1996.", "epsilon", "=", "1.0e-5", "chain", "=", "np", ".", "asarray", "(", "self", ".", "_trace", ")", "# Recursively comput...
41.454545
21.945455
def build_actions(self): """Create an ActionCollection that will perform sanity checks, copy the file, create a database entry and perform cleanup actions and in case of a failure clean everything up. :param work: the workfile :type work: :class:`JB_File` :param release: the releasefile :type release: :class:`JB_File` :param checks: the action collection object with sanity checks It should accept a :class:`JB_File` as object for execute. :type checks: :class:`ActionCollection` :param cleanup: a action collection object that holds cleanup actions for the given file. It should accept a :class:`JB_File` as object for execute. :type cleanup: :class:`ActionCollection` :param comment: comment for the release :type comment: :class:`str` :returns: An ActionCollection ready to execute. :rtype: :class:`ActionCollection` :raises: None """ checkau = ActionUnit("Sanity Checks", "Check the workfile. If the file is not conform, ask the user to continue.", self.sanity_check) copyau = ActionUnit("Copy File", "Copy the workfile to the releasefile location.", self.copy, depsuccess=[checkau]) dbau = ActionUnit("Create DB entry", "Create an entry in the database for the releasefile", self.create_db_entry, depsuccess=[copyau]) cleanau = ActionUnit("Cleanup", "Cleanup the releasefile. If something fails, ask the user to continue.", self.cleanup, depsuccess=[dbau]) deletefau1 = ActionUnit("Delete the releasefile.", "In case the db entry creation fails, delete the releasefile.", self.delete_releasefile, depfail=[dbau]) deletefau2 = ActionUnit("Delete the releasefile.", "In case the cleanup fails, delete the releasefile.", self.delete_releasefile, depsuccess=[copyau], depfail=[cleanau]) deletedbau = ActionUnit("Delete the database entry.", "In case the cleanup fails, delete the database entry", self.delete_db_entry, depsuccess=[dbau], depfail=[cleanau]) return ActionCollection([checkau, copyau, dbau, cleanau, deletefau1, deletefau2, deletedbau])
[ "def", "build_actions", "(", "self", ")", ":", "checkau", "=", "ActionUnit", "(", "\"Sanity Checks\"", ",", "\"Check the workfile. If the file is not conform, ask the user to continue.\"", ",", "self", ".", "sanity_check", ")", "copyau", "=", "ActionUnit", "(", "\"Copy Fi...
56.06
19.58
def get_reversed_statuses(context): """Return a mapping of exit codes to status strings. Args: context (scriptworker.context.Context): the scriptworker context Returns: dict: the mapping of exit codes to status strings. """ _rev = {v: k for k, v in STATUSES.items()} _rev.update(dict(context.config['reversed_statuses'])) return _rev
[ "def", "get_reversed_statuses", "(", "context", ")", ":", "_rev", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "STATUSES", ".", "items", "(", ")", "}", "_rev", ".", "update", "(", "dict", "(", "context", ".", "config", "[", "'reversed_statuse...
28.307692
21.769231
def case_insensitive_file_search(directory, pattern): """ Looks for file with pattern with case insensitive search """ try: return os.path.join( directory, [filename for filename in os.listdir(directory) if re.search(pattern, filename, re.IGNORECASE)][0]) except IndexError: print("{0} not found".format(pattern)) raise
[ "def", "case_insensitive_file_search", "(", "directory", ",", "pattern", ")", ":", "try", ":", "return", "os", ".", "path", ".", "join", "(", "directory", ",", "[", "filename", "for", "filename", "in", "os", ".", "listdir", "(", "directory", ")", "if", "...
32.416667
15.75
def extract_uhs(dstore, what): """ Extracts uniform hazard spectra. Use it as /extract/uhs?kind=mean or /extract/uhs?kind=rlz-0, etc """ info = get_info(dstore) if what == '': # npz exports for QGIS sitecol = dstore['sitecol'] mesh = get_mesh(sitecol, complete=False) dic = {} for stat, s in info['stats'].items(): hmap = dstore['hmaps-stats'][:, s] dic[stat] = calc.make_uhs(hmap, info) yield from hazard_items( dic, mesh, investigation_time=info['investigation_time']) return params = parse(what, info) periods = [] for m, imt in enumerate(info['imtls']): if imt == 'PGA' or imt.startswith('SA'): periods.append(m) if 'site_id' in params: sids = params['site_id'] else: sids = ALL if params['rlzs']: dset = dstore['hmaps-rlzs'] for k in params['k']: yield ('rlz-%03d' % k, hdf5.extract(dset, sids, k, periods, ALL)[:, 0]) else: dset = dstore['hmaps-stats'] stats = list(info['stats']) for k in params['k']: yield stats[k], hdf5.extract(dset, sids, k, periods, ALL)[:, 0] yield from params.items()
[ "def", "extract_uhs", "(", "dstore", ",", "what", ")", ":", "info", "=", "get_info", "(", "dstore", ")", "if", "what", "==", "''", ":", "# npz exports for QGIS", "sitecol", "=", "dstore", "[", "'sitecol'", "]", "mesh", "=", "get_mesh", "(", "sitecol", ",...
33.972222
13.305556
def add_method(self, pattern): """Decorator to add new dispatch functions.""" def wrap(f): def frozen_function(class_instance, f): def _(pattern, *args, **kwargs): return f(class_instance, pattern, *args, **kwargs) return _ self.functions.append((frozen_function(self, f), pattern)) return f return wrap
[ "def", "add_method", "(", "self", ",", "pattern", ")", ":", "def", "wrap", "(", "f", ")", ":", "def", "frozen_function", "(", "class_instance", ",", "f", ")", ":", "def", "_", "(", "pattern", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "...
31.230769
22
def values(obj, glob, separator="/", afilter=None, dirs=True): """ Given an object and a path glob, return an array of all values which match the glob. The arguments to this function are identical to those of search(), and it is primarily a shorthand for a list comprehension over a yielded search call. """ return [x[1] for x in dpath.util.search(obj, glob, yielded=True, separator=separator, afilter=afilter, dirs=dirs)]
[ "def", "values", "(", "obj", ",", "glob", ",", "separator", "=", "\"/\"", ",", "afilter", "=", "None", ",", "dirs", "=", "True", ")", ":", "return", "[", "x", "[", "1", "]", "for", "x", "in", "dpath", ".", "util", ".", "search", "(", "obj", ","...
55.375
29.625
def update(self, query_name, saved_query_attributes): """ Given a dict of attributes to be updated, update only those attributes in the Saved Query at the resource given by 'query_name'. This will perform two HTTP requests--one to fetch the query definition, and one to set the new attributes. This method will intend to preserve any other properties on the query. Master key must be set. """ query_name_attr_name = "query_name" refresh_rate_attr_name = "refresh_rate" query_attr_name = "query" metadata_attr_name = "metadata" old_saved_query = self.get(query_name) # Create a new query def to send back. We cannot send values for attributes like 'urls', # 'last_modified_date', 'run_information', etc. new_saved_query = { query_name_attr_name: old_saved_query[query_name_attr_name], # expected refresh_rate_attr_name: old_saved_query[refresh_rate_attr_name], # expected query_attr_name: {} } # If metadata was set, preserve it. The Explorer UI currently stores information here. old_metadata = (old_saved_query[metadata_attr_name] if metadata_attr_name in old_saved_query else None) if old_metadata: new_saved_query[metadata_attr_name] = old_metadata # Preserve any non-empty properties of the existing query. We get back values like None # for 'group_by', 'interval' or 'timezone', but those aren't accepted values when updating. old_query = old_saved_query[query_attr_name] # expected # Shallow copy since we want the entire object heirarchy to start with. for (key, value) in six.iteritems(old_query): if value: new_saved_query[query_attr_name][key] = value # Now, recursively overwrite any attributes passed in. SavedQueriesInterface._deep_update(new_saved_query, saved_query_attributes) return self.create(query_name, new_saved_query)
[ "def", "update", "(", "self", ",", "query_name", ",", "saved_query_attributes", ")", ":", "query_name_attr_name", "=", "\"query_name\"", "refresh_rate_attr_name", "=", "\"refresh_rate\"", "query_attr_name", "=", "\"query\"", "metadata_attr_name", "=", "\"metadata\"", "old...
43.617021
26.723404
def write_to_file(self, file_path='', date=(datetime.date.today()), organization='llnl'): """ Writes stargazers data to file. """ with open(file_path, 'w+') as out: out.write('date,organization,stargazers\n') sorted_stargazers = sorted(self.stargazers)#sort based on lowercase for star in sorted_stargazers: out.write(star + ',' + str(self.stargazers[star]) + '\n') out.close()
[ "def", "write_to_file", "(", "self", ",", "file_path", "=", "''", ",", "date", "=", "(", "datetime", ".", "date", ".", "today", "(", ")", ")", ",", "organization", "=", "'llnl'", ")", ":", "with", "open", "(", "file_path", ",", "'w+'", ")", "as", "...
42.454545
13.727273
def plot_subtract_from_data_all(self): """ subtract model components from data :return: """ f, axes = plt.subplots(2, 3, figsize=(16, 8)) self.subtract_from_data_plot(ax=axes[0, 0], text='Data') self.subtract_from_data_plot(ax=axes[0, 1], text='Data - Point Source', point_source_add=True) self.subtract_from_data_plot(ax=axes[0, 2], text='Data - Lens Light', lens_light_add=True) self.subtract_from_data_plot(ax=axes[1, 0], text='Data - Source Light', source_add=True) self.subtract_from_data_plot(ax=axes[1, 1], text='Data - Source Light - Point Source', source_add=True, point_source_add=True) self.subtract_from_data_plot(ax=axes[1, 2], text='Data - Lens Light - Point Source', lens_light_add=True, point_source_add=True) f.tight_layout() f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05) return f, axes
[ "def", "plot_subtract_from_data_all", "(", "self", ")", ":", "f", ",", "axes", "=", "plt", ".", "subplots", "(", "2", ",", "3", ",", "figsize", "=", "(", "16", ",", "8", ")", ")", "self", ".", "subtract_from_data_plot", "(", "ax", "=", "axes", "[", ...
53.842105
31.631579
def refresh_token( self, token_url, refresh_token=None, body="", auth=None, timeout=None, headers=None, verify=True, proxies=None, **kwargs ): """Fetch a new access token using a refresh token. :param token_url: The token endpoint, must be HTTPS. :param refresh_token: The refresh_token to use. :param body: Optional application/x-www-form-urlencoded body to add the include in the token request. Prefer kwargs over body. :param auth: An auth tuple or method as accepted by `requests`. :param timeout: Timeout of the request in seconds. :param headers: A dict of headers to be used by `requests`. :param verify: Verify SSL certificate. :param proxies: The `proxies` argument will be passed to `requests`. :param kwargs: Extra parameters to include in the token request. :return: A token dict """ if not token_url: raise ValueError("No token endpoint set for auto_refresh.") if not is_secure_transport(token_url): raise InsecureTransportError() refresh_token = refresh_token or self.token.get("refresh_token") log.debug( "Adding auto refresh key word arguments %s.", self.auto_refresh_kwargs ) kwargs.update(self.auto_refresh_kwargs) body = self._client.prepare_refresh_body( body=body, refresh_token=refresh_token, scope=self.scope, **kwargs ) log.debug("Prepared refresh token request body %s", body) if headers is None: headers = { "Accept": "application/json", "Content-Type": ("application/x-www-form-urlencoded;charset=UTF-8"), } r = self.post( token_url, data=dict(urldecode(body)), auth=auth, timeout=timeout, headers=headers, verify=verify, withhold_token=True, proxies=proxies, ) log.debug("Request to refresh token completed with status %s.", r.status_code) log.debug("Response headers were %s and content %s.", r.headers, r.text) log.debug( "Invoking %d token response hooks.", len(self.compliance_hook["refresh_token_response"]), ) for hook in self.compliance_hook["refresh_token_response"]: log.debug("Invoking hook %s.", hook) r = hook(r) self.token = self._client.parse_request_body_response(r.text, scope=self.scope) if not "refresh_token" in self.token: log.debug("No new refresh token given. Re-using old.") self.token["refresh_token"] = refresh_token return self.token
[ "def", "refresh_token", "(", "self", ",", "token_url", ",", "refresh_token", "=", "None", ",", "body", "=", "\"\"", ",", "auth", "=", "None", ",", "timeout", "=", "None", ",", "headers", "=", "None", ",", "verify", "=", "True", ",", "proxies", "=", "...
37.216216
22.797297
def add_item(self, item, replace = False): """ Add an item to the roster. This will not automatically update the roster on the server. :Parameters: - `item`: the item to add - `replace`: if `True` then existing item will be replaced, otherwise a `ValueError` will be raised on conflict :Types: - `item`: `RosterItem` - `replace`: `bool` """ if item.jid in self._jids: if replace: self.remove_item(item.jid) else: raise ValueError("JID already in the roster") index = len(self._items) self._items.append(item) self._jids[item.jid] = index
[ "def", "add_item", "(", "self", ",", "item", ",", "replace", "=", "False", ")", ":", "if", "item", ".", "jid", "in", "self", ".", "_jids", ":", "if", "replace", ":", "self", ".", "remove_item", "(", "item", ".", "jid", ")", "else", ":", "raise", ...
32.545455
14.727273
def QA_fetch_user(user_cookie, db=DATABASE): """ get the user Arguments: user_cookie : str the unique cookie_id for a user Keyword Arguments: db: database for query Returns: list --- [ACCOUNT] """ collection = DATABASE.account return [res for res in collection.find({'user_cookie': user_cookie}, {"_id": 0})]
[ "def", "QA_fetch_user", "(", "user_cookie", ",", "db", "=", "DATABASE", ")", ":", "collection", "=", "DATABASE", ".", "account", "return", "[", "res", "for", "res", "in", "collection", ".", "find", "(", "{", "'user_cookie'", ":", "user_cookie", "}", ",", ...
23.6
20.8
def auth_aliases(d): """Interpret user/password aliases. """ for alias, real in ((USER_KEY, "readonly_user"), (PASS_KEY, "readonly_password")): if alias in d: d[real] = d[alias] del d[alias]
[ "def", "auth_aliases", "(", "d", ")", ":", "for", "alias", ",", "real", "in", "(", "(", "USER_KEY", ",", "\"readonly_user\"", ")", ",", "(", "PASS_KEY", ",", "\"readonly_password\"", ")", ")", ":", "if", "alias", "in", "d", ":", "d", "[", "real", "]"...
31.375
11.625
def _next(self, state_class, *args): """Transition into the next state. :param type state_class: a subclass of :class:`State`. It is intialized with the communication object and :paramref:`args` :param args: additional arguments """ self._communication.state = state_class(self._communication, *args)
[ "def", "_next", "(", "self", ",", "state_class", ",", "*", "args", ")", ":", "self", ".", "_communication", ".", "state", "=", "state_class", "(", "self", ".", "_communication", ",", "*", "args", ")" ]
43
17.375
def plotlyFrequencyHistogram(counts): """ x-axis is a count of how many times a bit was active y-axis is number of bits that have that frequency """ data = [ go.Histogram( x=tuple(count for _, _, count in counts.getNonZerosSorted()) ) ] py.plot(data, filename=os.environ.get("HEATMAP_NAME", str(datetime.datetime.now())))
[ "def", "plotlyFrequencyHistogram", "(", "counts", ")", ":", "data", "=", "[", "go", ".", "Histogram", "(", "x", "=", "tuple", "(", "count", "for", "_", ",", "_", ",", "count", "in", "counts", ".", "getNonZerosSorted", "(", ")", ")", ")", "]", "py", ...
31.5
18.666667
def to_dict(self,include_node_id=False,no_attributes=False,track_namespaces=False): """ This function is currently geared very much towards writing STIX/CybOX objects to a dictionary. That should not be the case -- the function needs to be generic just as the from_dict function. TODO: make function generic. """ flat_result = [] def make_ns_slug(name_counter,slug='n'): while "%s%s" % (slug,name_counter['counter']) in namespace_mapping.values(): name_counter['counter'] = name_counter['counter']+1 return "%s%s" % (slug,name_counter['counter']) name_counter = {'counter':0} if track_namespaces: fact_thrus = self.fact_thru.all().prefetch_related( 'fact__fact_term', 'fact__fact_values', 'fact__fact_values__fact_data_type', 'fact__fact_values__fact_data_type__namespace', 'fact__value_iobject_id', 'fact__value_iobject_id__namespace', 'namespace_map__namespaces_thru__namespace', 'node_id') else: fact_thrus = self.fact_thru.all().prefetch_related( 'fact__fact_term', 'fact__fact_values', 'fact__fact_values__fact_data_type', 'fact__fact_values__fact_data_type__namespace', 'fact__value_iobject_id', 'fact__value_iobject_id__namespace', 'node_id') export_ns_dict = {} namespace_mapping= {"%s-%s" % (self.iobject_type.namespace.uri,self.iobject_type_revision): 'n0'} #fact_thrus = self.fact_thru.all() for fact_thru in fact_thrus: #print fact_thru.node_id #print fact_thru.fact.fact_term #for positional_namespace in fact_thru.namespace_map.namespaces_thru.all(): # print positional_namespace value_list = [] first = True fact_datatype_name = None fact_datatype_ns = None fact_dict = {'node_id': fact_thru.node_id.name, 'term': fact_thru.fact.fact_term.term, 'attribute' : fact_thru.fact.fact_term.attribute, '@@namespace_map' : fact_thru.namespace_map, } for fact_value in fact_thru.fact.fact_values.all(): if first: first=False fact_datatype_name = fact_value.fact_data_type.name fact_datatype_ns = fact_value.fact_data_type.namespace.uri if (fact_datatype_name == DINGOS_DEFAULT_FACT_DATATYPE and fact_datatype_ns == DINGOS_NAMESPACE_URI) or fact_thru.fact.value_iobject_id: pass else: if not fact_datatype_ns in namespace_mapping: namespace_slug = make_ns_slug(name_counter) namespace_mapping[fact_datatype_ns] = namespace_slug else: namespace_slug= namespace_mapping[fact_datatype_ns] fact_dict['@@type'] = '%s:%s' % (namespace_slug,fact_datatype_name) value_list.append(fact_value.value) fact_dict['value_list'] = value_list if fact_thru.fact.value_iobject_id: value_iobject_id_ns = fact_thru.fact.value_iobject_id.namespace.uri if not value_iobject_id_ns in namespace_mapping: namespace_slug = make_ns_slug(name_counter) namespace_mapping[value_iobject_id_ns] = namespace_slug else: namespace_slug= namespace_mapping[value_iobject_id_ns] value_iobject_id =fact_thru.fact.value_iobject_id.uid if fact_dict['attribute']: # Here we treat the case that the reference is part of an attribute such as # 'phase_id' fact_dict['value_list'] = ["%s:%s" % (namespace_slug,value_iobject_id)] else: # Otherwise, we sneak in an idref attribute. Because the code that # generates the dictionary simply dumps all untreated key-value paris # into the generated dictionary, this works... but is a bit of a hack, really. fact_dict['@idref'] = "%s:%s" % (namespace_slug,value_iobject_id) flat_result.append(fact_dict) result = DingoObjDict() result.from_flat_repr(flat_result, include_node_id=include_node_id, no_attributes=no_attributes, track_namespaces=track_namespaces, namespace_mapping=namespace_mapping ) if not no_attributes: if not track_namespaces: result['@@iobject_type'] = self.iobject_type.name result['@@iobject_type_ns'] = self.iobject_type.namespace.uri return result else: result['@ns'] = namespace_mapping["%s-%s" % (self.iobject_type.namespace.uri,self.iobject_type_revision)] #result['@@iobject_type'] = self.iobject_type.name return {'namespaces': dict(map(lambda x : (x[1],x[0]), namespace_mapping.items())), 'objects' : [result] } else: return result
[ "def", "to_dict", "(", "self", ",", "include_node_id", "=", "False", ",", "no_attributes", "=", "False", ",", "track_namespaces", "=", "False", ")", ":", "flat_result", "=", "[", "]", "def", "make_ns_slug", "(", "name_counter", ",", "slug", "=", "'n'", ")"...
42.24031
25.031008
def labels(self, value): """ Setter for **self.__labels** attribute. :param value: Attribute value. :type value: tuple """ if value is not None: assert type(value) is tuple, "'{0}' attribute: '{1}' type is not 'tuple'!".format("labels", value) assert len(value) == 2, "'{0}' attribute: '{1}' length should be '2'!".format("labels", value) for index in range(len(value)): assert type(value[index]) is unicode, \ "'{0}' attribute element '{1}': '{2}' type is not 'unicode'!".format("labels", index, value) self.__labels = value
[ "def", "labels", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "tuple", ",", "\"'{0}' attribute: '{1}' type is not 'tuple'!\"", ".", "format", "(", "\"labels\"", ",", "value", ")", ...
45.785714
23.357143
def parse_srs(t_srs, src_ds_list=None): """Parse arbitrary input t_srs Parameters ---------- t_srs : str or gdal.Dataset or filename Arbitrary input t_srs src_ds_list : list of gdal.Dataset objects, optional Needed if specifying 'first' or 'last' Returns ------- t_srs : osr.SpatialReference() object Output spatial reference system """ if t_srs is None and src_ds_list is None: print("Input t_srs and src_ds_list are both None") else: if t_srs is None: t_srs = 'first' if t_srs == 'first' and src_ds_list is not None: t_srs = geolib.get_ds_srs(src_ds_list[0]) elif t_srs == 'last' and src_ds_list is not None: t_srs = geolib.get_ds_srs(src_ds_list[-1]) #elif t_srs == 'source': # t_srs = None elif isinstance(t_srs, osr.SpatialReference): pass elif isinstance(t_srs, gdal.Dataset): t_srs = geolib.get_ds_srs(t_srs) elif isinstance(t_srs, str) and os.path.exists(t_srs): t_srs = geolib.get_ds_srs(gdal.Open(t_srs)) elif isinstance(t_srs, str): temp = osr.SpatialReference() if 'EPSG' in t_srs.upper(): epsgcode = int(t_srs.split(':')[-1]) temp.ImportFromEPSG(epsgcode) elif 'proj' in t_srs: temp.ImportFromProj4(t_srs) else: #Assume the user knows what they are doing temp.ImportFromWkt(t_srs) t_srs = temp else: t_srs = None return t_srs
[ "def", "parse_srs", "(", "t_srs", ",", "src_ds_list", "=", "None", ")", ":", "if", "t_srs", "is", "None", "and", "src_ds_list", "is", "None", ":", "print", "(", "\"Input t_srs and src_ds_list are both None\"", ")", "else", ":", "if", "t_srs", "is", "None", "...
34.478261
13.804348
def describe(self, req=None, resp=None, **kwargs): """Describe API resource using resource introspection. Additional description on derrived resource class can be added using keyword arguments and calling ``super().decribe()`` method call like following: .. code-block:: python class SomeResource(BaseResource): def describe(req, resp, **kwargs): return super().describe( req, resp, type='list', **kwargs ) Args: req (falcon.Request): request object resp (falcon.Response): response object kwargs (dict): dictionary of values created from resource url template Returns: dict: dictionary with resource descritpion information .. versionchanged:: 0.2.0 The `req` and `resp` parameters became optional to ease the implementation of application-level documentation generators. """ description = { 'params': OrderedDict([ (name, param.describe()) for name, param in self.params.items() ]), 'details': inspect.cleandoc( self.__class__.__doc__ or "This resource does not have description yet" ), 'name': self.__class__.__name__, 'methods': self.allowed_methods() } # note: add path to resource description only if request object was # provided in order to make auto-documentation engines simpler if req: description['path'] = req.path description.update(**kwargs) return description
[ "def", "describe", "(", "self", ",", "req", "=", "None", ",", "resp", "=", "None", ",", "*", "*", "kwargs", ")", ":", "description", "=", "{", "'params'", ":", "OrderedDict", "(", "[", "(", "name", ",", "param", ".", "describe", "(", ")", ")", "f...
36.020833
19.75
def from_ast( pyast_node, node=None, node_cls=None, Node=Node, iter_fields=ast.iter_fields, AST=ast.AST): '''Convert the ast tree to a tater tree. ''' node_cls = node_cls or Node node = node or node_cls() name = pyast_node.__class__.__name__ attrs = [] for field, value in iter_fields(pyast_node): if name == 'Dict': for key, value in zip(pyast_node.keys, pyast_node.values): if isinstance(value, list): for item in value: if isinstance(item, AST): value = from_ast(item) elif isinstance(value, AST): value = from_ast(value) attrs.append((key.s, value)) else: if isinstance(value, list): for item in value: if isinstance(item, AST): value = from_ast(item) elif isinstance(value, AST): value = from_ast(value) attrs.append((field, value)) node.update(attrs, type=name) return node
[ "def", "from_ast", "(", "pyast_node", ",", "node", "=", "None", ",", "node_cls", "=", "None", ",", "Node", "=", "Node", ",", "iter_fields", "=", "ast", ".", "iter_fields", ",", "AST", "=", "ast", ".", "AST", ")", ":", "node_cls", "=", "node_cls", "or...
36.1
11.366667
def is_build_needed(self, data_sink, data_src): """ returns true if data_src needs to be rebuilt, given that data_sink has had a rebuild requested. """ return (self._gettask(data_src).last_build_time == 0 or self._gettask(data_src).last_build_time < self._gettask(data_sink).last_build_time)
[ "def", "is_build_needed", "(", "self", ",", "data_sink", ",", "data_src", ")", ":", "return", "(", "self", ".", "_gettask", "(", "data_src", ")", ".", "last_build_time", "==", "0", "or", "self", ".", "_gettask", "(", "data_src", ")", ".", "last_build_time"...
50.428571
9.142857
def inferSuperimposedSequenceObjects(exp, sequenceId, objectId, sequences, objects): """Run inference on the given sequence.""" # Create the (loc, feat) pairs for this sequence for column 0. objectSensations = { 0: [pair for pair in sequences[sequenceId]] } inferConfig = { "object": sequenceId, "numSteps": len(objectSensations[0]), "pairs": objectSensations, } inferenceSDRSequence = sequences.provideObjectToInfer(inferConfig) # Create sequence of random sensations for this object for one column. The # total number of sensations is equal to the number of points on the object. # No point should be visited more than once. objectSensations = {} objectSensations[0] = [] obj = objects[objectId] objectCopy = [pair for pair in obj] random.shuffle(objectCopy) for pair in objectCopy: objectSensations[0].append(pair) inferConfig = { "numSteps": len(objectSensations[0]), "pairs": objectSensations, "includeRandomLocation": False, } inferenceSDRObject = objects.provideObjectToInfer(inferConfig) superimposedSDRs = createSuperimposedSDRs(inferenceSDRSequence, inferenceSDRObject) # exp.infer(superimposedSDRs, objectName=str(sequenceId) + "+" + str(objectId)) exp.infer(superimposedSDRs, objectName=sequenceId*len(objects) + objectId)
[ "def", "inferSuperimposedSequenceObjects", "(", "exp", ",", "sequenceId", ",", "objectId", ",", "sequences", ",", "objects", ")", ":", "# Create the (loc, feat) pairs for this sequence for column 0.", "objectSensations", "=", "{", "0", ":", "[", "pair", "for", "pair", ...
33.684211
23.842105
def limit_keyphrases (path, phrase_limit=20): """ iterator for the most significant key phrases """ rank_thresh = None if isinstance(path, str): lex = [] for meta in json_iter(path): rl = RankedLexeme(**meta) lex.append(rl) else: lex = path if len(lex) > 0: rank_thresh = statistics.mean([rl.rank for rl in lex]) else: rank_thresh = 0 used = 0 for rl in lex: if rl.pos[0] != "v": if (used > phrase_limit) or (rl.rank < rank_thresh): return used += 1 yield rl.text.replace(" - ", "-")
[ "def", "limit_keyphrases", "(", "path", ",", "phrase_limit", "=", "20", ")", ":", "rank_thresh", "=", "None", "if", "isinstance", "(", "path", ",", "str", ")", ":", "lex", "=", "[", "]", "for", "meta", "in", "json_iter", "(", "path", ")", ":", "rl", ...
21.689655
20.517241
def ajRadicaux(self, lemme): """ Calcule tous les radicaux du lemme l, * en se servant des modèles, ajoute à ce lemme, * et ensuite à la map * des radicaux de la classe Lemmat. Ligne type de lemme # ablŭo=ā̆blŭo|lego|ā̆blŭ|ā̆blūt|is, ere, lui, lutum # 0 1 2 3 4 :param lemme: Lemme :type lemme: Lemme """ m = self.lemmatiseur.modele(lemme.grModele()) ''' insérer d'abord les radicaux définis dans lemmes.la qui sont prioritaires ''' for i in lemme.cles_radicaux(): radical_list = lemme.radical(i) for radical in radical_list: self.lemmatiseur._radicaux[deramise(radical.gr()).lower()].append(radical) # pour chaque radical du modèle for indice_radical in m.cles_radicaux(): # Si le radical a été défini par le lemme if indice_radical in lemme.cles_radicaux(): continue gs = lemme.grq().split(',') for graphie in gs: gen = m.genRadical(indice_radical) # si gen == 'K', radical est la forme canonique if gen == "-": continue if gen != "K": # sinon, la règle de formation du modèle oter, ajouter = 0, "0" if "," in gen: oter, ajouter = tuple(gen.split(",")) oter = int(oter) else: oter = int(gen) if oter == len(graphie): graphie = "" elif oter != 0: graphie = graphie[:-oter] if ajouter != "0": graphie += ajouter r = Radical(graphie, indice_radical, lemme) # Doute si cela n'appartient pas à graphe in gs lemme.ajRadical(indice_radical, r) self.lemmatiseur._radicaux[deramise(r.gr()).lower()].append(r)
[ "def", "ajRadicaux", "(", "self", ",", "lemme", ")", ":", "m", "=", "self", ".", "lemmatiseur", ".", "modele", "(", "lemme", ".", "grModele", "(", ")", ")", "''' insérer d'abord les radicaux définis dans lemmes.la\n qui sont prioritaires '''", "for", "i", "in...
39.403846
14.903846
def get_node_type(tree): """ returns the node type (leaf or span) of a subtree (i.e. Nucleus or Satellite) Parameters ---------- tree : nltk.tree.ParentedTree a tree representing a rhetorical structure (or a part of it) """ node_type = tree[0].label() assert node_type in NODE_TYPES, "node_type: {}".format(node_type) return node_type
[ "def", "get_node_type", "(", "tree", ")", ":", "node_type", "=", "tree", "[", "0", "]", ".", "label", "(", ")", "assert", "node_type", "in", "NODE_TYPES", ",", "\"node_type: {}\"", ".", "format", "(", "node_type", ")", "return", "node_type" ]
30.666667
20.166667
def _filtered_walk(path, file_filter): """ static method that calls os.walk, but filters out anything that doesn't match the filter """ for root, dirs, files in os.walk(path): log.debug('looking in %s', root) log.debug('files is %s', files) file_filter.set_root(root) files = filter(file_filter, files) log.debug('filtered files is %s', files) yield (root, dirs, files)
[ "def", "_filtered_walk", "(", "path", ",", "file_filter", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "log", ".", "debug", "(", "'looking in %s'", ",", "root", ")", "log", ".", "debug", "(", "...
32.166667
4.5
def create(self, mention, max_message_length): """ Create a message :param mention: JSON object containing mention details from Twitter (or an empty dict {}) :param max_message_length: Maximum allowable length for created message :return: A random message created using a Markov chain generator """ message = [] def message_len(): return sum([len(w) + 1 for w in message]) while message_len() < max_message_length: message.append(self.a_random_word(message[-1] if message else None)) return ' '.join(message[:-1])
[ "def", "create", "(", "self", ",", "mention", ",", "max_message_length", ")", ":", "message", "=", "[", "]", "def", "message_len", "(", ")", ":", "return", "sum", "(", "[", "len", "(", "w", ")", "+", "1", "for", "w", "in", "message", "]", ")", "w...
37.8125
23.0625
def qualify(workers, qualification, value, by_name, notify, sandbox): """Assign a qualification to 1 or more workers""" if not (workers and qualification and value): raise click.BadParameter( "Must specify a qualification ID, value/score, and at least one worker ID" ) mturk = _mturk_service_from_config(sandbox) if by_name: result = mturk.get_qualification_type_by_name(qualification) if result is None: raise click.BadParameter( 'No qualification with name "{}" exists.'.format(qualification) ) qid = result["id"] else: qid = qualification click.echo( "Assigning qualification {} with value {} to {} worker{}...".format( qid, value, len(workers), "s" if len(workers) > 1 else "" ) ) for worker in workers: if mturk.set_qualification_score(qid, worker, int(value), notify=notify): click.echo("{} OK".format(worker)) # print out the current set of workers with the qualification results = list(mturk.get_workers_with_qualification(qid)) click.echo("{} workers with qualification {}:".format(len(results), qid)) for score, count in Counter([r["score"] for r in results]).items(): click.echo("{} with value {}".format(count, score))
[ "def", "qualify", "(", "workers", ",", "qualification", ",", "value", ",", "by_name", ",", "notify", ",", "sandbox", ")", ":", "if", "not", "(", "workers", "and", "qualification", "and", "value", ")", ":", "raise", "click", ".", "BadParameter", "(", "\"M...
38.411765
25.676471
def decode_offset_response(cls, response): """ Decode OffsetResponse into OffsetResponsePayloads Arguments: response: OffsetResponse Returns: list of OffsetResponsePayloads """ return [ kafka.structs.OffsetResponsePayload(topic, partition, error, tuple(offsets)) for topic, partitions in response.topics for partition, error, offsets in partitions ]
[ "def", "decode_offset_response", "(", "cls", ",", "response", ")", ":", "return", "[", "kafka", ".", "structs", ".", "OffsetResponsePayload", "(", "topic", ",", "partition", ",", "error", ",", "tuple", "(", "offsets", ")", ")", "for", "topic", ",", "partit...
31.571429
18.714286
def main(target, label): """ Semver tag triggered deployment helper """ check_environment(target, label) click.secho('Fetching tags from the upstream ...') handler = TagHandler(git.list_tags()) print_information(handler, label) tag = handler.yield_tag(target, label) confirm(tag)
[ "def", "main", "(", "target", ",", "label", ")", ":", "check_environment", "(", "target", ",", "label", ")", "click", ".", "secho", "(", "'Fetching tags from the upstream ...'", ")", "handler", "=", "TagHandler", "(", "git", ".", "list_tags", "(", ")", ")", ...
23.538462
14.307692
def get(self, id, **options): '''Get a single item with the given ID''' if not self._item_path: raise AttributeError('get is not available for %s' % self._item_name) target = self._item_path % id json_data = self._redmine.get(target, **options) data = self._redmine.unwrap_json(self._item_type, json_data) data['_source_path'] = target return self._objectify(data=data)
[ "def", "get", "(", "self", ",", "id", ",", "*", "*", "options", ")", ":", "if", "not", "self", ".", "_item_path", ":", "raise", "AttributeError", "(", "'get is not available for %s'", "%", "self", ".", "_item_name", ")", "target", "=", "self", ".", "_ite...
47.666667
13.444444
def getxattr(self, req, ino, name, size): """ Set an extended attribute Valid replies: reply_buf reply_data reply_xattr reply_err """ self.reply_err(req, errno.ENOSYS)
[ "def", "getxattr", "(", "self", ",", "req", ",", "ino", ",", "name", ",", "size", ")", ":", "self", ".", "reply_err", "(", "req", ",", "errno", ".", "ENOSYS", ")" ]
22.7
14.5
def convert_mrf_to_syntax_mrf( mrf_lines, conversion_rules ): ''' Converts given lines from Filosoft's mrf format to syntactic analyzer's format, using the morph-category conversion rules from conversion_rules, and punctuation via method _convert_punctuation(); As a result of conversion, the input list mrf_lines will be modified, and also returned after a successful conversion; Morph-category conversion rules should be loaded via method load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ), usually from a file named 'tmorftrtabel.txt'; Note that the resulting list of lines likely has more lines than the original list had, because the conversion often requires that the original Filosoft's analysis is expanded into multiple analyses suitable for the syntactic analyzer; ''' i = 0 while ( i < len(mrf_lines) ): line = mrf_lines[i] if line.startswith(' '): # only consider lines of analysis # 1) Convert punctuation if _punctOrAbbrev.search(line): mrf_lines[i] = _convert_punctuation( line ) if '_Y_' not in line: i += 1 continue # 2) Convert morphological analyses that have a form specified withFormMatch = _morfWithForm.search(line) if withFormMatch: root = withFormMatch.group(1) pos = withFormMatch.group(2) formStr = withFormMatch.group(3) forms = formStr.split(',') all_new_lines = [] for form in forms: morphKey = pos+' '+form.strip() if morphKey in conversion_rules: newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ] all_new_lines.extend( newlines ) if all_new_lines: del mrf_lines[i] for newline in all_new_lines: mrf_lines.insert(i, newline) i += len(newlines) continue else: withoutFormMatch = _morfWithoutForm.search(line) if withoutFormMatch: # 3) Convert morphological analyses that have only POS specified root = withoutFormMatch.group(1) pos = withoutFormMatch.group(2) morphKey = pos all_new_lines = [] if morphKey in conversion_rules: newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ] all_new_lines.extend( newlines ) if all_new_lines: del mrf_lines[i] for newline in all_new_lines: mrf_lines.insert(i, newline) i += len(newlines) continue i += 1 return mrf_lines
[ "def", "convert_mrf_to_syntax_mrf", "(", "mrf_lines", ",", "conversion_rules", ")", ":", "i", "=", "0", "while", "(", "i", "<", "len", "(", "mrf_lines", ")", ")", ":", "line", "=", "mrf_lines", "[", "i", "]", "if", "line", ".", "startswith", "(", "' '...
48.140625
18.109375
async def fetch_state(self, request): """Fetches data from a specific address in the validator's state tree. Request: query: - head: The id of the block to use as the head of the chain - address: The 70 character address of the data to be fetched Response: data: The base64 encoded binary data stored at that address head: The head used for this query (most recent if unspecified) link: The link to this exact query, including head block """ error_traps = [ error_handlers.InvalidAddressTrap, error_handlers.StateNotFoundTrap] address = request.match_info.get('address', '') head = request.url.query.get('head', None) head, root = await self._head_to_root(head) response = await self._query_validator( Message.CLIENT_STATE_GET_REQUEST, client_state_pb2.ClientStateGetResponse, client_state_pb2.ClientStateGetRequest( state_root=root, address=address), error_traps) return self._wrap_response( request, data=response['value'], metadata=self._get_metadata(request, response, head=head))
[ "async", "def", "fetch_state", "(", "self", ",", "request", ")", ":", "error_traps", "=", "[", "error_handlers", ".", "InvalidAddressTrap", ",", "error_handlers", ".", "StateNotFoundTrap", "]", "address", "=", "request", ".", "match_info", ".", "get", "(", "'a...
38.8125
19.46875
def toggle_settings( toolbar=False, nbname=False, hideprompt=False, kernellogo=False): """Toggle main notebook toolbar (e.g., buttons), filename, and kernel logo.""" toggle = '' if toolbar: toggle += 'div#maintoolbar {margin-left: 8px !important;}\n' toggle += '.toolbar.container {width: 100% !important;}\n' else: toggle += 'div#maintoolbar {display: none !important;}\n' if nbname: toggle += ('span.save_widget span.filename {margin-left: 8px; height: initial;' 'font-size: 100%; color: @nb-name-fg; background-color:' '@cc-input-bg;}\n') toggle += ('span.save_widget span.filename:hover {color:' '@nb-name-hover; background-color: @cc-input-bg;}\n') toggle += ('#menubar {padding-top: 4px; background-color:' '@notebook-bg;}\n') else: toggle += '#header-container {display: none !important;}\n' if hideprompt: toggle += 'div.prompt.input_prompt {display: none !important;}\n' toggle += 'div.prompt.output_prompt {width: 5ex !important;}\n' toggle += 'div.out_prompt_overlay.prompt:hover {width: 5ex !important; min-width: 5ex !important;}\n' toggle += ( '.CodeMirror-gutters, .cm-s-ipython .CodeMirror-gutters' '{ position: absolute; left: 0; top: 0; z-index: 3; width: 2em; ' 'display: inline-block !important; }\n') toggle += ('div.cell.code_cell .input { border-left: 5px solid @cm-gutters !important; border-bottom-left-radius: 5px; border-top-left-radius: 5px; }\n') if kernellogo: toggle += '@kernel-logo-display: block;' else: toggle += '@kernel-logo-display: none;' return toggle
[ "def", "toggle_settings", "(", "toolbar", "=", "False", ",", "nbname", "=", "False", ",", "hideprompt", "=", "False", ",", "kernellogo", "=", "False", ")", ":", "toggle", "=", "''", "if", "toolbar", ":", "toggle", "+=", "'div#maintoolbar {margin-left: 8px !imp...
48.166667
28.083333
def send_up(self, count): """ Sends the given number of up key presses. """ for i in range(count): self.interface.send_key(Key.UP)
[ "def", "send_up", "(", "self", ",", "count", ")", ":", "for", "i", "in", "range", "(", "count", ")", ":", "self", ".", "interface", ".", "send_key", "(", "Key", ".", "UP", ")" ]
29.5
6.166667
def trackerItem( self ): """ Returns the tracker item for this chart. :return <XChartTrackerItem> || None """ # check for the tracking enabled state if not self.isTrackingEnabled(): return None # generate a new tracker item if not (self._trackerItem and self._trackerItem()): item = XChartTrackerItem() self.addItem(item) self._trackerItem = weakref.ref(item) return self._trackerItem()
[ "def", "trackerItem", "(", "self", ")", ":", "# check for the tracking enabled state\r", "if", "not", "self", ".", "isTrackingEnabled", "(", ")", ":", "return", "None", "# generate a new tracker item\r", "if", "not", "(", "self", ".", "_trackerItem", "and", "self", ...
31.588235
11.588235
def info(): """Display app info. Examples: $ dj info No application, try running dj init. $ dj info Application: foo @ 2.7.9 Requirements: Django == 1.10 """ application = get_current_application() info = application.info() stdout.write(info) return info
[ "def", "info", "(", ")", ":", "application", "=", "get_current_application", "(", ")", "info", "=", "application", ".", "info", "(", ")", "stdout", ".", "write", "(", "info", ")", "return", "info" ]
15.789474
21.947368
def _compute_fans(shape): """Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of scalars (fan_in, fan_out). """ if len(shape) < 1: # Just to avoid errors for constants. fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: fan_in = shape[0] fan_out = shape[1] else: # Assuming convolution kernels (2D, 3D, or more). # kernel shape: (..., input_depth, depth) receptive_field_size = 1. for dim in shape[:-2]: receptive_field_size *= dim fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size if isinstance(fan_in, tf.Dimension): fan_in = fan_in.value if isinstance(fan_out, tf.Dimension): fan_out = fan_out.value return fan_in, fan_out
[ "def", "_compute_fans", "(", "shape", ")", ":", "if", "len", "(", "shape", ")", "<", "1", ":", "# Just to avoid errors for constants.", "fan_in", "=", "fan_out", "=", "1", "elif", "len", "(", "shape", ")", "==", "1", ":", "fan_in", "=", "fan_out", "=", ...
29.241379
14.655172
def _format_stage_info(bar_width, stage_info, duration, timedelta_formatter=_pretty_time_delta): """Formats the Spark stage progress. Parameters ---------- bar_width : int Width of the progressbar to print out. stage_info : :class:`pyspark.status.StageInfo` Information about the running stage stage_id : int Unique ID of the stage duration : :class:`datetime.timedelta` Duration of the stage so far timedelta_formatter : callable Converts a timedelta to a string. Returns ------- formatted : str """ dur = timedelta_formatter(duration) percent = (stage_info.numCompletedTasks * bar_width) // stage_info.numTasks bar = [' '] * bar_width for i in range(bar_width): char = ' ' if i < percent: char = '=' if i == percent: char = '>' bar[i] = char bar = ''.join(bar) return "[Stage {info.stageId}:{bar} " \ "({info.numCompletedTasks} + {info.numActiveTasks} / {info.numTasks} Dur: {dur}]" \ .format(info=stage_info, dur=dur, bar=bar)
[ "def", "_format_stage_info", "(", "bar_width", ",", "stage_info", ",", "duration", ",", "timedelta_formatter", "=", "_pretty_time_delta", ")", ":", "dur", "=", "timedelta_formatter", "(", "duration", ")", "percent", "=", "(", "stage_info", ".", "numCompletedTasks", ...
31.911765
17.941176
def get_pub_date(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `pub_date` record is not found. Returns: str: Date of publication (month and year usually) or `undefined` \ if `pub_date` is not found. """ dates = self["260c "] + self["264c"] def clean_date(date): """ Clean the `date` strings from special characters, but leave sequences of numbers followed by -. So: [2015]- -> 2015 2015- -> 2015- """ out = "" was_digit = False for c in date: if c.isdigit() or (c == "-" and was_digit) or c == " ": out += c was_digit = c.isdigit() return out # clean all the date strings dates = set([ clean_date(date) for date in self["260c "] + self["264c"] ]) return _undefined_pattern( ", ".join(dates), lambda x: x.strip() == "", undefined )
[ "def", "get_pub_date", "(", "self", ",", "undefined", "=", "\"\"", ")", ":", "dates", "=", "self", "[", "\"260c \"", "]", "+", "self", "[", "\"264c\"", "]", "def", "clean_date", "(", "date", ")", ":", "\"\"\"\n Clean the `date` strings from special c...
27.333333
18.428571
def ntp_server_key(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ntp = ET.SubElement(config, "ntp", xmlns="urn:brocade.com:mgmt:brocade-ntp") server = ET.SubElement(ntp, "server") ip_key = ET.SubElement(server, "ip") ip_key.text = kwargs.pop('ip') use_vrf_key = ET.SubElement(server, "use-vrf") use_vrf_key.text = kwargs.pop('use_vrf') key = ET.SubElement(server, "key") key.text = kwargs.pop('key') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ntp_server_key", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ntp", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ntp\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ntp...
39.466667
10.466667
def get_storage_pool_by_name(self, name): """ Get ScaleIO StoragePool object by its name :param name: Name of StoragePool :return: ScaleIO StoragePool object :raise KeyError: No StoragePool with specified name found :rtype: StoragePool object """ for storage_pool in self.conn.storage_pools: if storage_pool.name == name: return storage_pool raise KeyError("Storage pool of that name not found")
[ "def", "get_storage_pool_by_name", "(", "self", ",", "name", ")", ":", "for", "storage_pool", "in", "self", ".", "conn", ".", "storage_pools", ":", "if", "storage_pool", ".", "name", "==", "name", ":", "return", "storage_pool", "raise", "KeyError", "(", "\"S...
40.333333
7
def MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc): """ Creates the function that calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- kBT_Gamma : float Value of kB*T/Gamma density : float density of the nanoparticle SpringPotnlFunc : function Function which takes the value of position (in volts) and returns the spring potential Returns ------- PotentialFunc : function function that calculates the potential given the position (in volts) and the radius of the particle. """ def PotentialFunc(xdata, Radius): """ calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- xdata : ndarray Positon data (in volts) Radius : float Radius in units of nm Returns ------- Potential : ndarray Dynamical Spring Potential at positions given by xdata """ mass = ((4/3)*np.pi*((Radius*10**-9)**3))*density yfit=(kBT_Gamma/mass) Y = yfit*SpringPotnlFunc(xdata) return Y return PotentialFunc
[ "def", "MakeDynamicPotentialFunc", "(", "kBT_Gamma", ",", "density", ",", "SpringPotnlFunc", ")", ":", "def", "PotentialFunc", "(", "xdata", ",", "Radius", ")", ":", "\"\"\"\n calculates the potential given the position (in volts) \n and the radius of the particle.\...
27.466667
17.733333
def get_groups_count(self, field=None): ''' Returns 'matches' from group response. If grouping on more than one field, provide the field argument to specify which count you are looking for. ''' field = field if field else self._determine_group_field(field) if 'matches' in self.data['grouped'][field]: return self.data['grouped'][field]['matches'] raise ValueError("group matches not found in response")
[ "def", "get_groups_count", "(", "self", ",", "field", "=", "None", ")", ":", "field", "=", "field", "if", "field", "else", "self", ".", "_determine_group_field", "(", "field", ")", "if", "'matches'", "in", "self", ".", "data", "[", "'grouped'", "]", "[",...
47.1
26.9
def as_dict( self, key="id" ): """ Return a dictionary containing all remaining motifs, using `key` as the dictionary key. """ rval = {} for motif in self: rval[ getattr( motif, key ) ] = motif return rval
[ "def", "as_dict", "(", "self", ",", "key", "=", "\"id\"", ")", ":", "rval", "=", "{", "}", "for", "motif", "in", "self", ":", "rval", "[", "getattr", "(", "motif", ",", "key", ")", "]", "=", "motif", "return", "rval" ]
29.444444
13.222222
def strip_fhss(self, idx): """strip (2 byte) radiotap.fhss.hopset(1 byte) and radiotap.fhss.pattern(1 byte) :idx: int :return: int idx :return: collections.namedtuple """ fhss = collections.namedtuple('fhss', ['hopset', 'pattern']) fhss.hopset, fhss.pattern, = struct.unpack_from('<bb', self._rtap, idx) return idx + 2, fhss
[ "def", "strip_fhss", "(", "self", ",", "idx", ")", ":", "fhss", "=", "collections", ".", "namedtuple", "(", "'fhss'", ",", "[", "'hopset'", ",", "'pattern'", "]", ")", "fhss", ".", "hopset", ",", "fhss", ".", "pattern", ",", "=", "struct", ".", "unpa...
36.181818
15
def sameAs(self, other): """ Check if this is the same location. :: >>> l = Location(pop=5, snap=100) >>> m = Location(pop=5.0, snap=100.0) >>> l.sameAs(m) 0 >>> l = Location(pop=5, snap=100) >>> m = Location(pop=5.0, snap=100.0001) >>> l.sameAs(m) -1 """ if not hasattr(other, "get"): return -1 d = self.distance(other) if d < _EPSILON: return 0 return -1
[ "def", "sameAs", "(", "self", ",", "other", ")", ":", "if", "not", "hasattr", "(", "other", ",", "\"get\"", ")", ":", "return", "-", "1", "d", "=", "self", ".", "distance", "(", "other", ")", "if", "d", "<", "_EPSILON", ":", "return", "0", "retur...
26.5
14
def run_step(context): """Parse input file and substitutes {tokens} from context. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileFormat - in. mandatory. str, path-like, or an iterable (list/tuple) of strings/paths. Each str/path can be a glob, relative or absolute path. - out. optional. path-like. Can refer to a file or a directory. will create directory structure if it doesn't exist. If in-path refers to >1 file (e.g it's a glob or list), out path can only be a directory - it doesn't make sense to write >1 file to the same single file (this is not an appender.) To ensure out_path is read as a directory and not a file, be sure to have the path separator (/) at the end. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None. Raises: FileNotFoundError: take a guess pypyr.errors.KeyNotInContextError: fileFormat missing in context. pypyr.errors.KeyInContextHasNoValueError: in or out exists but is None. """ logger.debug("started") deprecated(context) StreamRewriterStep(__name__, 'fileFormat', context).run_step() logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "deprecated", "(", "context", ")", "StreamRewriterStep", "(", "__name__", ",", "'fileFormat'", ",", "context", ")", ".", "run_step", "(", ")", "logger", ".", "deb...
40.421053
24.605263
def update(self, table, columns, values): """Update one or more existing table rows. :type table: str :param table: Name of the table to be modified. :type columns: list of str :param columns: Name of the table columns to be modified. :type values: list of lists :param values: Values to be modified. """ self._mutations.append(Mutation(update=_make_write_pb(table, columns, values)))
[ "def", "update", "(", "self", ",", "table", ",", "columns", ",", "values", ")", ":", "self", ".", "_mutations", ".", "append", "(", "Mutation", "(", "update", "=", "_make_write_pb", "(", "table", ",", "columns", ",", "values", ")", ")", ")" ]
34.384615
18.461538
def create_virtualenv(venv=VENV, install_pip=False): """Creates the virtual environment and installs PIP only into the virtual environment """ print 'Creating venv...', install = ['virtualenv', '-q', venv] run_command(install) print 'done.' print 'Installing pip in virtualenv...', if install_pip and \ not run_command(['tools/with_venv.sh', 'easy_install', 'pip>1.0']): die("Failed to install pip.") print 'done.'
[ "def", "create_virtualenv", "(", "venv", "=", "VENV", ",", "install_pip", "=", "False", ")", ":", "print", "'Creating venv...'", ",", "install", "=", "[", "'virtualenv'", ",", "'-q'", ",", "venv", "]", "run_command", "(", "install", ")", "print", "'done.'", ...
30.625
14.5
def pf_to_n(L, pf, R): """Returns the number of non-intersecting spheres required to achieve as close to a given packing fraction as possible, along with the actual achieved packing fraction. for a number of non-intersecting spheres. Parameters ---------- L: float array, shape (d,) System lengths. pf: float Fraction of space to be occupied by the spheres. R: float Sphere radius. Returns ------- n: integer Number of spheres required to achieve a packing fraction `pf_actual` pf_actual: Fraction of space occupied by `n` spheres. This is the closest possible fraction achievable to `pf`. """ dim = L.shape[0] n = int(round(pf * np.product(L) / sphere_volume(R, dim))) pf_actual = n_to_pf(L, n, R) return n, pf_actual
[ "def", "pf_to_n", "(", "L", ",", "pf", ",", "R", ")", ":", "dim", "=", "L", ".", "shape", "[", "0", "]", "n", "=", "int", "(", "round", "(", "pf", "*", "np", ".", "product", "(", "L", ")", "/", "sphere_volume", "(", "R", ",", "dim", ")", ...
31.192308
22.346154
def div_img(img1, div2): """ Pixelwise division or divide by a number """ if is_img(div2): return img1.get_data()/div2.get_data() elif isinstance(div2, (float, int)): return img1.get_data()/div2 else: raise NotImplementedError('Cannot divide {}({}) by ' '{}({})'.format(type(img1), img1, type(div2), div2))
[ "def", "div_img", "(", "img1", ",", "div2", ")", ":", "if", "is_img", "(", "div2", ")", ":", "return", "img1", ".", "get_data", "(", ")", "/", "div2", ".", "get_data", "(", ")", "elif", "isinstance", "(", "div2", ",", "(", "float", ",", "int", ")...
43.25
14.25
def listed(self): """Print blacklist packages """ print("\nPackages in the blacklist:\n") for black in self.get_black(): if black: print("{0}{1}{2}".format(self.meta.color["GREEN"], black, self.meta.color["ENDC"])) self.quit = True if self.quit: print("")
[ "def", "listed", "(", "self", ")", ":", "print", "(", "\"\\nPackages in the blacklist:\\n\"", ")", "for", "black", "in", "self", ".", "get_black", "(", ")", ":", "if", "black", ":", "print", "(", "\"{0}{1}{2}\"", ".", "format", "(", "self", ".", "meta", ...
34.727273
14.181818
def main(argv): """Train on examples and export the updated model weights.""" tf_records = argv[1:] logging.info("Training on %s records: %s to %s", len(tf_records), tf_records[0], tf_records[-1]) with utils.logged_timer("Training"): train(*tf_records) if FLAGS.export_path: dual_net.export_model(FLAGS.export_path) if FLAGS.freeze: if FLAGS.use_tpu: dual_net.freeze_graph_tpu(FLAGS.export_path) else: dual_net.freeze_graph(FLAGS.export_path)
[ "def", "main", "(", "argv", ")", ":", "tf_records", "=", "argv", "[", "1", ":", "]", "logging", ".", "info", "(", "\"Training on %s records: %s to %s\"", ",", "len", "(", "tf_records", ")", ",", "tf_records", "[", "0", "]", ",", "tf_records", "[", "-", ...
37.571429
14.5
def normalise_reads(self): """ Use bbnorm from the bbmap suite of tools to perform read normalisation """ logging.info('Normalising reads to a kmer depth of 100') for sample in self.metadata: # Set the name of the normalised read files sample.general.normalisedreads = [fastq.split('.fastq.gz')[0] + '_normalised.fastq.gz' for fastq in sorted(sample.general.fastqfiles)] try: # Run the normalisation command out, err, cmd = bbtools.bbnorm(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], forward_out=sample.general.normalisedreads[0], returncmd=True, threads=self.cpus) sample[self.analysistype].normalisecmd = cmd write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) except CalledProcessError: sample.general.normalisedreads = sample.general.trimmedfastqfiles except IndexError: sample.general.normalisedreads = list()
[ "def", "normalise_reads", "(", "self", ")", ":", "logging", ".", "info", "(", "'Normalising reads to a kmer depth of 100'", ")", "for", "sample", "in", "self", ".", "metadata", ":", "# Set the name of the normalised read files", "sample", ".", "general", ".", "normali...
59.238095
27.142857
def _inherited_value(self, attr_name): """ Return the attribute value, e.g. 'width' of the base placeholder this placeholder inherits from. """ base_placeholder = self._base_placeholder if base_placeholder is None: return None inherited_value = getattr(base_placeholder, attr_name) return inherited_value
[ "def", "_inherited_value", "(", "self", ",", "attr_name", ")", ":", "base_placeholder", "=", "self", ".", "_base_placeholder", "if", "base_placeholder", "is", "None", ":", "return", "None", "inherited_value", "=", "getattr", "(", "base_placeholder", ",", "attr_nam...
37.1
10.7
def _infer_precision(base_precision, bins): """Infer an appropriate precision for _round_frac """ for precision in range(base_precision, 20): levels = [_round_frac(b, precision) for b in bins] if algos.unique(levels).size == bins.size: return precision return base_precision
[ "def", "_infer_precision", "(", "base_precision", ",", "bins", ")", ":", "for", "precision", "in", "range", "(", "base_precision", ",", "20", ")", ":", "levels", "=", "[", "_round_frac", "(", "b", ",", "precision", ")", "for", "b", "in", "bins", "]", "...
38.875
8.125
def imethodcallPayload(self, methodname, localnsp, **kwargs): """Generate the XML payload for an intrinsic methodcall.""" param_list = [pywbem.IPARAMVALUE(x[0], pywbem.tocimxml(x[1])) for x in kwargs.items()] payload = cim_xml.CIM( cim_xml.MESSAGE( cim_xml.SIMPLEREQ( cim_xml.IMETHODCALL( methodname, cim_xml.LOCALNAMESPACEPATH( [cim_xml.NAMESPACE(ns) for ns in localnsp.split('/')]), param_list)), '1001', '1.0'), '2.0', '2.0') return self.xml_header + payload.toxml()
[ "def", "imethodcallPayload", "(", "self", ",", "methodname", ",", "localnsp", ",", "*", "*", "kwargs", ")", ":", "param_list", "=", "[", "pywbem", ".", "IPARAMVALUE", "(", "x", "[", "0", "]", ",", "pywbem", ".", "tocimxml", "(", "x", "[", "1", "]", ...
37.526316
15.052632
def RecurseKey(recur_item, depth=15, key_path=''): """Flattens nested dictionaries and lists by yielding it's values. The hierarchy of a plist file is a series of nested dictionaries and lists. This is a helper function helps plugins navigate the structure without having to reimplement their own recursive methods. This method implements an overridable depth limit to prevent processing extremely deeply nested plists. If the limit is reached a debug message is logged indicating which key processing stopped on. Example Input Plist: recur_item = { DeviceRoot: { DeviceMAC1: [Value1, Value2, Value3], DeviceMAC2: [Value1, Value2, Value3]}} Example Output: ('', DeviceRoot, {DeviceMACs...}) (DeviceRoot, DeviceMAC1, [Value1, Value2, Value3]) (DeviceRoot, DeviceMAC2, [Value1, Value2, Value3]) Args: recur_item: An object to be checked for additional nested items. depth: Optional integer indication the current recursion depth. This value is used to ensure we stop at the maximum recursion depth. key_path: Optional path of the current working key. Yields: A tuple of the key path, key, and value from a plist. """ if depth < 1: logger.debug('Recursion limit hit for key: {0:s}'.format(key_path)) return if isinstance(recur_item, (list, tuple)): for recur in recur_item: for key in RecurseKey(recur, depth=depth, key_path=key_path): yield key return if not hasattr(recur_item, 'items'): return for subkey, value in iter(recur_item.items()): yield key_path, subkey, value if isinstance(value, dict): value = [value] if isinstance(value, list): for item in value: if not isinstance(item, dict): continue subkey_path = '{0:s}/{1:s}'.format(key_path, subkey) for tuple_value in RecurseKey( item, depth=depth - 1, key_path=subkey_path): yield tuple_value
[ "def", "RecurseKey", "(", "recur_item", ",", "depth", "=", "15", ",", "key_path", "=", "''", ")", ":", "if", "depth", "<", "1", ":", "logger", ".", "debug", "(", "'Recursion limit hit for key: {0:s}'", ".", "format", "(", "key_path", ")", ")", "return", ...
33.807018
23.368421
def inverse(self): """ returns q.conjugate()/q.norm()**2 So if the quaternion is unit length, it is the same as the conjugate. """ new = self.conjugate() tmp = self.norm()**2 new.w /= tmp new.x /= tmp new.y /= tmp new.z /= tmp return new
[ "def", "inverse", "(", "self", ")", ":", "new", "=", "self", ".", "conjugate", "(", ")", "tmp", "=", "self", ".", "norm", "(", ")", "**", "2", "new", ".", "w", "/=", "tmp", "new", ".", "x", "/=", "tmp", "new", ".", "y", "/=", "tmp", "new", ...
24.769231
16.307692
def setBrush(self, b, resize=0, proportional=None): """ Sets the size of the current :py:class:`Brush`. :param brush: The :py:class:`Brush` object to use as a brush. :param resize: An optional absolute value to resize the brush before using it. :param proportional: An optional relative float 0-1 value to resize the brush before using it. :rtype: Nothing. """ if proportional!=None: resize = int(self.brush.brushSize*0.5) b.resizeBrush(resize) #If resize=0 it reset to its default size self.brush = b if self.brush and self.brush.doesUseSourceCaching(): self.brush.cacheBrush(self.color)
[ "def", "setBrush", "(", "self", ",", "b", ",", "resize", "=", "0", ",", "proportional", "=", "None", ")", ":", "if", "proportional", "!=", "None", ":", "resize", "=", "int", "(", "self", ".", "brush", ".", "brushSize", "*", "0.5", ")", "b", ".", ...
40.333333
18.866667
def supported_operations(self): """ All file operations supported by the camera. """ return tuple(op for op in backend.FILE_OPS if self._operations & op)
[ "def", "supported_operations", "(", "self", ")", ":", "return", "tuple", "(", "op", "for", "op", "in", "backend", ".", "FILE_OPS", "if", "self", ".", "_operations", "&", "op", ")" ]
55.666667
15
def _get_sample_select(samples, keep): """Returns a vector of True/False to keep samples.""" k = np.ones_like(samples, dtype=bool) if keep is not None: k = np.array([s in keep for s in samples], dtype=bool) if np.sum(k) == 0: logger.warning("No samples matched the keep list") return k
[ "def", "_get_sample_select", "(", "samples", ",", "keep", ")", ":", "k", "=", "np", ".", "ones_like", "(", "samples", ",", "dtype", "=", "bool", ")", "if", "keep", "is", "not", "None", ":", "k", "=", "np", ".", "array", "(", "[", "s", "in", "keep...
40.25
13.125
def fromString( parent, xmlstring, actions = None ): """ Loads the xml string as xml data and then calls the fromXml method. :param parent | <QWidget> xmlstring | <str> actions | {<str> name: <QAction>, .. } || None :return <XMenu> || None """ try: xdata = ElementTree.fromstring(xmlstring) except ExpatError, e: logger.exception(e) return None return XMenu.fromXml(parent, xdata, actions)
[ "def", "fromString", "(", "parent", ",", "xmlstring", ",", "actions", "=", "None", ")", ":", "try", ":", "xdata", "=", "ElementTree", ".", "fromstring", "(", "xmlstring", ")", "except", "ExpatError", ",", "e", ":", "logger", ".", "exception", "(", "e", ...
31.388889
16.722222
def window_bohman(N): r"""Bohman tapering window :param N: window length .. math:: w(n) = (1-|x|) \cos (\pi |x|) + \frac{1}{\pi} \sin(\pi |x|) where x is a length N vector of linearly spaced values between -1 and 1. .. plot:: :width: 80% :include-source: from spectrum import window_visu window_visu(64, 'bohman') .. seealso:: :func:`create_window`, :class:`Window` """ x = linspace(-1, 1, N) w = (1.-abs(x)) * cos(pi*abs(x)) + 1./pi * sin(pi*abs(x)) return w
[ "def", "window_bohman", "(", "N", ")", ":", "x", "=", "linspace", "(", "-", "1", ",", "1", ",", "N", ")", "w", "=", "(", "1.", "-", "abs", "(", "x", ")", ")", "*", "cos", "(", "pi", "*", "abs", "(", "x", ")", ")", "+", "1.", "/", "pi", ...
23.636364
23.045455
def addLocation(self, locationUri, weight): """ add relevant location to the topic page @param locationUri: uri of the location to add @param weight: importance of the provided location (typically in range 1 - 50) """ assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer" self.topicPage["locations"].append({"uri": locationUri, "wgt": weight})
[ "def", "addLocation", "(", "self", ",", "locationUri", ",", "weight", ")", ":", "assert", "isinstance", "(", "weight", ",", "(", "float", ",", "int", ")", ")", ",", "\"weight value has to be a positive or negative integer\"", "self", ".", "topicPage", "[", "\"lo...
54.375
21.625
def user_path(self, team, user): """ Returns the path to directory with the user's package repositories. """ return os.path.join(self.team_path(team), user)
[ "def", "user_path", "(", "self", ",", "team", ",", "user", ")", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "team_path", "(", "team", ")", ",", "user", ")" ]
36.8
11.6
def create_privkey(self): """ This is called by post_build() for key creation. """ if self.group in _tls_named_ffdh_groups: params = _ffdh_groups[_tls_named_ffdh_groups[self.group]][0] privkey = params.generate_private_key() self.privkey = privkey pubkey = privkey.public_key() self.key_exchange = pubkey.public_numbers().y elif self.group in _tls_named_curves: if _tls_named_curves[self.group] == "x25519": if conf.crypto_valid_advanced: privkey = x25519.X25519PrivateKey.generate() self.privkey = privkey pubkey = privkey.public_key() self.key_exchange = pubkey.public_bytes() elif _tls_named_curves[self.group] != "x448": curve = ec._CURVE_TYPES[_tls_named_curves[self.group]]() privkey = ec.generate_private_key(curve, default_backend()) self.privkey = privkey pubkey = privkey.public_key() try: # cryptography >= 2.5 self.key_exchange = pubkey.public_bytes( serialization.Encoding.X962, serialization.PublicFormat.UncompressedPoint ) except TypeError: # older versions self.key_exchange = pubkey.public_numbers().encode_point()
[ "def", "create_privkey", "(", "self", ")", ":", "if", "self", ".", "group", "in", "_tls_named_ffdh_groups", ":", "params", "=", "_ffdh_groups", "[", "_tls_named_ffdh_groups", "[", "self", ".", "group", "]", "]", "[", "0", "]", "privkey", "=", "params", "."...
47.290323
13.870968
def probePlane(img, origin=(0, 0, 0), normal=(1, 0, 0)): """ Takes a ``vtkImageData`` and probes its scalars on a plane. .. hint:: |probePlane| |probePlane.py|_ """ plane = vtk.vtkPlane() plane.SetOrigin(origin) plane.SetNormal(normal) planeCut = vtk.vtkCutter() planeCut.SetInputData(img) planeCut.SetCutFunction(plane) planeCut.Update() cutActor = Actor(planeCut.GetOutput(), c=None) # ScalarVisibilityOn cutActor.mapper.SetScalarRange(img.GetPointData().GetScalars().GetRange()) return cutActor
[ "def", "probePlane", "(", "img", ",", "origin", "=", "(", "0", ",", "0", ",", "0", ")", ",", "normal", "=", "(", "1", ",", "0", ",", "0", ")", ")", ":", "plane", "=", "vtk", ".", "vtkPlane", "(", ")", "plane", ".", "SetOrigin", "(", "origin",...
31.764706
17.529412
def contrast(image, mask = slice(None)): r""" Takes a simple or multi-spectral image and returns the contrast of the texture. Fcon = standard_deviation(gray_value) / (kurtosis(gray_value)**0.25) Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). mask : array_like A binary mask for the image or a slice object Returns ------- contrast : float High differences in gray value distribution is represented in a high contrast value. See Also -------- """ image = numpy.asarray(image) # set default mask or apply given mask if not type(mask) is slice: if not type(mask[0] is slice): mask = numpy.array(mask, copy=False, dtype = numpy.bool) image = image[mask] standard_deviation = numpy.std(image) kurtosis = stats.kurtosis(image, axis=None, bias=True, fisher=False) n = 0.25 # The value n=0.25 is recommended as the best for discriminating the textures. Fcon = standard_deviation / (kurtosis**n) return Fcon
[ "def", "contrast", "(", "image", ",", "mask", "=", "slice", "(", "None", ")", ")", ":", "image", "=", "numpy", ".", "asarray", "(", "image", ")", "# set default mask or apply given mask", "if", "not", "type", "(", "mask", ")", "is", "slice", ":", "if", ...
30.756757
24.189189
def _resolve_argn(macro, args): """Get argument from macro name ie : $ARG3$ -> args[2] :param macro: macro to parse :type macro: :param args: args given to command line :type args: :return: argument at position N-1 in args table (where N is the int parsed) :rtype: None | str """ # first, get the number of args _id = None matches = re.search(r'ARG(?P<id>\d+)', macro) if matches is not None: _id = int(matches.group('id')) - 1 try: return args[_id] except IndexError: # Required argument not found, returns an empty string return '' return ''
[ "def", "_resolve_argn", "(", "macro", ",", "args", ")", ":", "# first, get the number of args", "_id", "=", "None", "matches", "=", "re", ".", "search", "(", "r'ARG(?P<id>\\d+)'", ",", "macro", ")", "if", "matches", "is", "not", "None", ":", "_id", "=", "i...
32.681818
14.954545
def set_visible(self, visible): """ Set the visibility of the widget. """ v = View.VISIBILITY_VISIBLE if visible else View.VISIBILITY_GONE self.widget.setVisibility(v)
[ "def", "set_visible", "(", "self", ",", "visible", ")", ":", "v", "=", "View", ".", "VISIBILITY_VISIBLE", "if", "visible", "else", "View", ".", "VISIBILITY_GONE", "self", ".", "widget", ".", "setVisibility", "(", "v", ")" ]
32.5
14.166667
def prior_dates(*args, **kwargs): """Get the prior distribution of calibrated radiocarbon dates""" try: chron = args[0] except IndexError: chron = kwargs['coredates'] d_r = np.array(kwargs['d_r']) d_std = np.array(kwargs['d_std']) t_a = np.array(kwargs['t_a']) t_b = np.array(kwargs['t_b']) try: normal_distr = kwargs['normal_distr'] except KeyError: normal_distr = None cc_int = kwargs['cc'] ccdict = {0: 'ConstCal', 1: 'IntCal3', 2: 'Marine13', 3: 'SHCal13', 4: 'ConstCal'} # There is a better way to do this. if 'cc1' in kwargs: ccdict[1] = str(kwargs['cc1']) if 'cc2' in kwargs: ccdict[2] = str(kwargs['cc2']) if 'cc3' in kwargs: ccdict[3] = str(kwargs['cc3']) if 'cc4' in kwargs: ccdict[4] = str(kwargs['cc4']) cc = [] for i in cc_int: i = int(i) cc.append(fetch_calibcurve(ccdict[i])) d, p = calibrate_dates(chron, calib_curve=cc, d_r=d_r, d_std=d_std, t_a=t_a, t_b=t_b, normal_distr=normal_distr) return d, p
[ "def", "prior_dates", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "chron", "=", "args", "[", "0", "]", "except", "IndexError", ":", "chron", "=", "kwargs", "[", "'coredates'", "]", "d_r", "=", "np", ".", "array", "(", "kwargs",...
31.076923
16.307692
def validate_maildirs(ctx, param, value): """ Check that folders are maildirs. """ for path in value: for subdir in MD_SUBDIRS: if not os.path.isdir(os.path.join(path, subdir)): raise click.BadParameter( '{} is not a maildir (missing {!r} sub-directory).'.format( path, subdir)) return value
[ "def", "validate_maildirs", "(", "ctx", ",", "param", ",", "value", ")", ":", "for", "path", "in", "value", ":", "for", "subdir", "in", "MD_SUBDIRS", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "path...
41.666667
12.555556
def make_app(global_conf, full_stack=True, **app_conf): """Create a Pylons WSGI application and return it ``global_conf`` The inherited configuration for this application. Normally from the [DEFAULT] section of the Paste ini file. ``full_stack`` Whether or not this application provides a full WSGI stack (by default, meaning it handles its own exceptions and errors). Disable full_stack when this application is "managed" by another WSGI middleware. ``app_conf`` The application's local configuration. Normally specified in the [app:<name>] section of the Paste ini file (where <name> defaults to main). """ # Configure the Pylons environment load_environment(global_conf, app_conf) # The Pylons WSGI app app = PylonsApp() # CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares) # Routing/Session/Cache Middleware app = RoutesMiddleware(app, config['routes.map']) #~ app = SessionMiddleware(app, config) app = CacheMiddleware(app, config) if asbool(full_stack): # Handle Python exceptions #~ app = ErrorHandler(app, global_conf, **config['pylons.errorware']) # Display error documents for 401, 403, 404 status codes (and # 500 when debug is disabled) if asbool(config['debug']): app = StatusCodeRedirect(app) else: app = StatusCodeRedirect(app, [400, 401, 403, 404, 500]) # Establish the Registry for this application app = RegistryManager(app, streaming=True) # Static files (If running in production, and Apache or another web # server is handling this static content, remove the following 3 lines) javascripts_app = StaticJavascripts() static_app = StaticURLParser(config['pylons.paths']['static_files']) app = Cascade([static_app, javascripts_app, app]) from cogen.web.async import LazyStartResponseMiddleware app = LazyStartResponseMiddleware(app) app = SessionMiddleware(app, config) from cogen.web.async import SynchronousInputMiddleware app = SynchronousInputMiddleware(app) return app
[ "def", "make_app", "(", "global_conf", ",", "full_stack", "=", "True", ",", "*", "*", "app_conf", ")", ":", "# Configure the Pylons environment\r", "load_environment", "(", "global_conf", ",", "app_conf", ")", "# The Pylons WSGI app\r", "app", "=", "PylonsApp", "(",...
35.655738
21.52459
def addFilteringOptions(parser, samfileIsPositionalArg=False): """ Add options to an argument parser for filtering SAM/BAM. @param samfileIsPositionalArg: If C{True} the SAM/BAM file must be given as the final argument on the command line (without being preceded by --samfile). @param parser: An C{argparse.ArgumentParser} instance. """ parser.add_argument( '%ssamfile' % ('' if samfileIsPositionalArg else '--'), required=True, help='The SAM/BAM file to filter.') parser.add_argument( '--referenceId', metavar='ID', nargs='+', action='append', help=('A reference sequence id whose alignments should be kept ' '(alignments against other references will be dropped). ' 'If omitted, alignments against all references will be ' 'kept. May be repeated.')) parser.add_argument( '--dropUnmapped', default=False, action='store_true', help='If given, unmapped matches will not be output.') parser.add_argument( '--dropSecondary', default=False, action='store_true', help='If given, secondary matches will not be output.') parser.add_argument( '--dropSupplementary', default=False, action='store_true', help='If given, supplementary matches will not be output.') parser.add_argument( '--dropDuplicates', default=False, action='store_true', help=('If given, matches flagged as optical or PCR duplicates ' 'will not be output.')) parser.add_argument( '--keepQCFailures', default=False, action='store_true', help=('If given, reads that are considered quality control ' 'failures will be included in the output.')) parser.add_argument( '--minScore', type=float, help=('If given, alignments with --scoreTag (default AS) values ' 'less than this value will not be output. If given, ' 'alignments that do not have a score will not be output.')) parser.add_argument( '--maxScore', type=float, help=('If given, alignments with --scoreTag (default AS) values ' 'greater than this value will not be output. If given, ' 'alignments that do not have a score will not be output.')) parser.add_argument( '--scoreTag', default='AS', help=('The alignment tag to extract for --minScore and --maxScore ' 'comparisons.'))
[ "def", "addFilteringOptions", "(", "parser", ",", "samfileIsPositionalArg", "=", "False", ")", ":", "parser", ".", "add_argument", "(", "'%ssamfile'", "%", "(", "''", "if", "samfileIsPositionalArg", "else", "'--'", ")", ",", "required", "=", "True", ",", "help...
44.40678
23.932203
def updateNewCredentialValues(self): """ Set the new credential values to the credentials to use, and delete the new ones """ credentials_base = "disk.0.os.credentials." new_credentials_base = "disk.0.os.credentials.new." for elem in ['password', 'public_key', 'private_key']: if self.getValue(new_credentials_base + elem): self.setValue(credentials_base + elem, self.getValue(new_credentials_base + elem)) self.delValue(new_credentials_base + elem)
[ "def", "updateNewCredentialValues", "(", "self", ")", ":", "credentials_base", "=", "\"disk.0.os.credentials.\"", "new_credentials_base", "=", "\"disk.0.os.credentials.new.\"", "for", "elem", "in", "[", "'password'", ",", "'public_key'", ",", "'private_key'", "]", ":", ...
44.333333
23.166667
def setup_logging(self): """ Configure the logging framework. """ if self.config.debug: util.setup_logging(level=logging.DEBUG) util.activate_debug_shell_on_signal() else: util.setup_logging(level=logging.INFO)
[ "def", "setup_logging", "(", "self", ")", ":", "if", "self", ".", "config", ".", "debug", ":", "util", ".", "setup_logging", "(", "level", "=", "logging", ".", "DEBUG", ")", "util", ".", "activate_debug_shell_on_signal", "(", ")", "else", ":", "util", "....
30.888889
9.333333
async def stop_async(self): """ Terminiates the partition manger. """ self.cancellation_token.cancel() if self.run_task and not self.run_task.done(): await self.run_task
[ "async", "def", "stop_async", "(", "self", ")", ":", "self", ".", "cancellation_token", ".", "cancel", "(", ")", "if", "self", ".", "run_task", "and", "not", "self", ".", "run_task", ".", "done", "(", ")", ":", "await", "self", ".", "run_task" ]
30.714286
5.285714
def get_and_cache_account(self, addr): """Gets and caches an account for an addres, creates blank if not found. :param addr: :return: """ if addr in self.cache: return self.cache[addr] rlpdata = self.secure_trie.get(addr) if ( rlpdata == trie.BLANK_NODE and len(addr) == 32 ): # support for hashed addresses rlpdata = self.trie.get(addr) if rlpdata != trie.BLANK_NODE: o = rlp.decode(rlpdata, Account, db=self.db, addr=addr) else: o = Account.blank_account(self.db, addr, 0) self.cache[addr] = o o._mutable = True o._cached_rlp = None return o
[ "def", "get_and_cache_account", "(", "self", ",", "addr", ")", ":", "if", "addr", "in", "self", ".", "cache", ":", "return", "self", ".", "cache", "[", "addr", "]", "rlpdata", "=", "self", ".", "secure_trie", ".", "get", "(", "addr", ")", "if", "(", ...
29.333333
16.416667
def lazy_send(chainlet, chunks): """ Canonical version of `chainlet_send` that always takes and returns an iterable :param chainlet: the chainlet to receive and return data :type chainlet: chainlink.ChainLink :param chunks: the stream slice of data to pass to ``chainlet`` :type chunks: iterable :return: the resulting stream slice of data returned by ``chainlet`` :rtype: iterable """ fork, join = chainlet.chain_fork, chainlet.chain_join if fork and join: return _send_n_get_m(chainlet, chunks) elif fork: return _lazy_send_1_get_m(chainlet, chunks) elif join: return _lazy_send_n_get_1(chainlet, chunks) else: return _lazy_send_1_get_1(chainlet, chunks)
[ "def", "lazy_send", "(", "chainlet", ",", "chunks", ")", ":", "fork", ",", "join", "=", "chainlet", ".", "chain_fork", ",", "chainlet", ".", "chain_join", "if", "fork", "and", "join", ":", "return", "_send_n_get_m", "(", "chainlet", ",", "chunks", ")", "...
36.3
18.1
def set_eep(self, data): ''' Update packet data based on EEP. Input data is a dictionary with keys corresponding to the EEP. ''' self._bit_data, self._bit_status = self.eep.set_values(self._profile, self._bit_data, self._bit_status, data)
[ "def", "set_eep", "(", "self", ",", "data", ")", ":", "self", ".", "_bit_data", ",", "self", ".", "_bit_status", "=", "self", ".", "eep", ".", "set_values", "(", "self", ".", "_profile", ",", "self", ".", "_bit_data", ",", "self", ".", "_bit_status", ...
84
54.666667
def assert_not_equal(first, second, msg_fmt="{msg}"): """Fail if first equals second, as determined by the '==' operator. >>> assert_not_equal(5, 8) >>> assert_not_equal(-7, -7.0) Traceback (most recent call last): ... AssertionError: -7 == -7.0 The following msg_fmt arguments are supported: * msg - the default error message * first - the first argument * second - the second argument """ if first == second: msg = "{!r} == {!r}".format(first, second) fail(msg_fmt.format(msg=msg, first=first, second=second))
[ "def", "assert_not_equal", "(", "first", ",", "second", ",", "msg_fmt", "=", "\"{msg}\"", ")", ":", "if", "first", "==", "second", ":", "msg", "=", "\"{!r} == {!r}\"", ".", "format", "(", "first", ",", "second", ")", "fail", "(", "msg_fmt", ".", "format"...
31.388889
14.944444
def _multi_value_field(self, field): """ Private method that returns `True` if a field is multi-valued, else `False`. Required arguemnts: `field` -- The field to lookup Returns a boolean value indicating whether the field is multi-valued. """ for field_dict in self.schema: if field_dict['field_name'] == field: return field_dict['multi_valued'] == 'true' return False
[ "def", "_multi_value_field", "(", "self", ",", "field", ")", ":", "for", "field_dict", "in", "self", ".", "schema", ":", "if", "field_dict", "[", "'field_name'", "]", "==", "field", ":", "return", "field_dict", "[", "'multi_valued'", "]", "==", "'true'", "...
32.928571
17.5
def listen_tta(self, target, timeout): """Listen *timeout* seconds for a Type A activation at 106 kbps. The ``sens_res``, ``sdd_res``, and ``sel_res`` response data must be provided and ``sdd_res`` must be a 4 byte UID that starts with ``08h``. Depending on ``sel_res`` an activation may return a target with a ``tt2_cmd``, ``tt4_cmd`` or ``atr_req`` attribute. The default RATS response sent for a Type 4 Tag activation can be replaced with a ``rats_res`` attribute. """ return super(Device, self).listen_tta(target, timeout)
[ "def", "listen_tta", "(", "self", ",", "target", ",", "timeout", ")", ":", "return", "super", "(", "Device", ",", "self", ")", ".", "listen_tta", "(", "target", ",", "timeout", ")" ]
53.545455
20.545455
def timeseries(self): """ Load time series It returns the actual time series used in power flow analysis. If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise, :meth:`timeseries()` looks for time series of the according sector in :class:`~.grid.network.TimeSeries` object. Returns ------- :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power in kW in column 'p' and reactive power in kVA in column 'q'. """ if self._timeseries is None: if isinstance(self.grid, MVGrid): voltage_level = 'mv' elif isinstance(self.grid, LVGrid): voltage_level = 'lv' ts_total = None for sector in self.consumption.keys(): consumption = self.consumption[sector] # check if load time series for MV and LV are differentiated try: ts = self.grid.network.timeseries.load[ sector, voltage_level].to_frame('p') except KeyError: try: ts = self.grid.network.timeseries.load[ sector].to_frame('p') except KeyError: logger.exception( "No timeseries for load of type {} " "given.".format(sector)) raise ts = ts * consumption ts_q = self.timeseries_reactive if ts_q is not None: ts['q'] = ts_q.q else: ts['q'] = ts['p'] * self.q_sign * tan( acos(self.power_factor)) if ts_total is None: ts_total = ts else: ts_total.p += ts.p ts_total.q += ts.q return ts_total else: return self._timeseries
[ "def", "timeseries", "(", "self", ")", ":", "if", "self", ".", "_timeseries", "is", "None", ":", "if", "isinstance", "(", "self", ".", "grid", ",", "MVGrid", ")", ":", "voltage_level", "=", "'mv'", "elif", "isinstance", "(", "self", ".", "grid", ",", ...
34.964912
16.964912
def measures(*measurements, **kwargs): """Decorator-maker used to declare measurements for phases. See the measurements module docstring for examples of usage. Args: measurements: Measurement objects to declare, or a string name from which to create a Measurement. kwargs: Keyword arguments to pass to Measurement constructor if we're constructing one. Note that if kwargs are provided, the length of measurements must be 1, and that value must be a string containing the measurement name. For valid kwargs, see the definition of the Measurement class. Returns: A decorator that declares the measurement(s) for the decorated phase. """ def _maybe_make(meas): """Turn strings into Measurement objects if necessary.""" if isinstance(meas, Measurement): return meas elif isinstance(meas, six.string_types): return Measurement(meas, **kwargs) raise InvalidMeasurementType('Expected Measurement or string', meas) # In case we're declaring a measurement inline, we can only declare one. if kwargs and len(measurements) != 1: raise InvalidMeasurementType( 'If @measures kwargs are provided, a single measurement name must be ' 'provided as a positional arg first.') # Unlikely, but let's make sure we don't allow overriding initial outcome. if 'outcome' in kwargs: raise ValueError('Cannot specify outcome in measurement declaration!') measurements = [_maybe_make(meas) for meas in measurements] # 'measurements' is guaranteed to be a list of Measurement objects here. def decorate(wrapped_phase): """Phase decorator to be returned.""" phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(wrapped_phase) duplicate_names = (set(m.name for m in measurements) & set(m.name for m in phase.measurements)) if duplicate_names: raise DuplicateNameError('Measurement names duplicated', duplicate_names) phase.measurements.extend(measurements) return phase return decorate
[ "def", "measures", "(", "*", "measurements", ",", "*", "*", "kwargs", ")", ":", "def", "_maybe_make", "(", "meas", ")", ":", "\"\"\"Turn strings into Measurement objects if necessary.\"\"\"", "if", "isinstance", "(", "meas", ",", "Measurement", ")", ":", "return",...
40.877551
23
def get_statement_queries(stmts, **params): """Get queries used to search based on a statement. In addition to the stmts, you can enter any parameters standard to the query. See https://github.com/indralab/indra_db/rest_api for a full list. Parameters ---------- stmts : list[Statement] A list of INDRA statements. """ def pick_ns(ag): for ns in ['HGNC', 'FPLX', 'CHEMBL', 'CHEBI', 'GO', 'MESH']: if ns in ag.db_refs.keys(): dbid = ag.db_refs[ns] break else: ns = 'TEXT' dbid = ag.name return '%s@%s' % (dbid, ns) queries = [] url_base = get_url_base('statements/from_agents') non_binary_statements = [Complex, SelfModification, ActiveForm] for stmt in stmts: kwargs = {} if type(stmt) not in non_binary_statements: for pos, ag in zip(['subject', 'object'], stmt.agent_list()): if ag is not None: kwargs[pos] = pick_ns(ag) else: for i, ag in enumerate(stmt.agent_list()): if ag is not None: kwargs['agent%d' % i] = pick_ns(ag) kwargs['type'] = stmt.__class__.__name__ kwargs.update(params) query_str = '?' + '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items() if v is not None]) queries.append(url_base + query_str) return queries
[ "def", "get_statement_queries", "(", "stmts", ",", "*", "*", "params", ")", ":", "def", "pick_ns", "(", "ag", ")", ":", "for", "ns", "in", "[", "'HGNC'", ",", "'FPLX'", ",", "'CHEMBL'", ",", "'CHEBI'", ",", "'GO'", ",", "'MESH'", "]", ":", "if", "n...
35.02439
18.439024
def create_tumor_bamdir(tumor, tumor_bam, normal_bam, work_dir): """Create expected input directory with tumor/normal BAMs in one place. """ bam_dir = utils.safe_makedir(os.path.join(work_dir, tumor, "in_bams")) normal_bam_ready = os.path.join(bam_dir, os.path.basename(normal_bam)) utils.symlink_plus(normal_bam, normal_bam_ready) tumor_bam_ready = os.path.join(bam_dir, os.path.basename(tumor_bam)) utils.symlink_plus(tumor_bam, tumor_bam_ready) return bam_dir, normal_bam_ready
[ "def", "create_tumor_bamdir", "(", "tumor", ",", "tumor_bam", ",", "normal_bam", ",", "work_dir", ")", ":", "bam_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "tumor", ",", "\"in_bams\"", ")", ")", "n...
56
16.666667
def cli(env, identifier, name, all, note): """Capture one or all disks from a virtual server to a SoftLayer image.""" vsi = SoftLayer.VSManager(env.client) vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS') capture = vsi.capture(vs_id, name, all, note) table = formatting.KeyValueTable(['name', 'value']) table.align['name'] = 'r' table.align['value'] = 'l' table.add_row(['vs_id', capture['guestId']]) table.add_row(['date', capture['createDate'][:10]]) table.add_row(['time', capture['createDate'][11:19]]) table.add_row(['transaction', formatting.transaction_status(capture)]) table.add_row(['transaction_id', capture['id']]) table.add_row(['all_disks', all]) env.fout(table)
[ "def", "cli", "(", "env", ",", "identifier", ",", "name", ",", "all", ",", "note", ")", ":", "vsi", "=", "SoftLayer", ".", "VSManager", "(", "env", ".", "client", ")", "vs_id", "=", "helpers", ".", "resolve_id", "(", "vsi", ".", "resolve_ids", ",", ...
38.473684
18.052632
def set_attributes(self, **attributes): """ Set group of attributes without calling set between attributes regardless of global auto_set. Set will be called only after all attributes are set based on global auto_set. :param attributes: dictionary of <attribute, value> to set. """ auto_set = IxeObject.get_auto_set() IxeObject.set_auto_set(False) for name, value in attributes.items(): setattr(self, name, value) if auto_set: self.ix_set() IxeObject.set_auto_set(auto_set)
[ "def", "set_attributes", "(", "self", ",", "*", "*", "attributes", ")", ":", "auto_set", "=", "IxeObject", ".", "get_auto_set", "(", ")", "IxeObject", ".", "set_auto_set", "(", "False", ")", "for", "name", ",", "value", "in", "attributes", ".", "items", ...
37.133333
16.2
def str(self,local): """ Return the string representation of the time range :param local: if False [default] use UTC datetime. If True use localtz """ s = self.start_time.str(local) \ + u" to " \ + self.end_time.str(local) return s
[ "def", "str", "(", "self", ",", "local", ")", ":", "s", "=", "self", ".", "start_time", ".", "str", "(", "local", ")", "+", "u\" to \"", "+", "self", ".", "end_time", ".", "str", "(", "local", ")", "return", "s" ]
32.444444
16.111111
def show(self, ax:plt.Axes=None, figsize:tuple=(3,3), title:Optional[str]=None, hide_axis:bool=True, cmap:str=None, y:Any=None, **kwargs): "Show image on `ax` with `title`, using `cmap` if single-channel, overlaid with optional `y`" cmap = ifnone(cmap, defaults.cmap) ax = show_image(self, ax=ax, hide_axis=hide_axis, cmap=cmap, figsize=figsize) if y is not None: y.show(ax=ax, **kwargs) if title is not None: ax.set_title(title)
[ "def", "show", "(", "self", ",", "ax", ":", "plt", ".", "Axes", "=", "None", ",", "figsize", ":", "tuple", "=", "(", "3", ",", "3", ")", ",", "title", ":", "Optional", "[", "str", "]", "=", "None", ",", "hide_axis", ":", "bool", "=", "True", ...
68.142857
28.142857