text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def open(self): # type: () -> None """Connect to the TwinCAT message router.""" if self._open: return self._port = adsPortOpenEx() if linux: adsAddRoute(self._adr.netIdStruct(), self.ip_address) self._open = True
[ "def", "open", "(", "self", ")", ":", "# type: () -> None\r", "if", "self", ".", "_open", ":", "return", "self", ".", "_port", "=", "adsPortOpenEx", "(", ")", "if", "linux", ":", "adsAddRoute", "(", "self", ".", "_adr", ".", "netIdStruct", "(", ")", ",...
23.916667
21.5
def _decrypt(private_key, ciphertext, rsa_oaep_padding=False): """ Encrypts a value using an RSA private key :param private_key: A PrivateKey instance to decrypt with :param ciphertext: A byte string of the data to decrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the plaintext """ if not isinstance(private_key, PrivateKey): raise TypeError(pretty_message( ''' private_key must be an instance of the PrivateKey class, not %s ''', type_name(private_key) )) if not isinstance(ciphertext, byte_cls): raise TypeError(pretty_message( ''' ciphertext must be a byte string, not %s ''', type_name(ciphertext) )) if not isinstance(rsa_oaep_padding, bool): raise TypeError(pretty_message( ''' rsa_oaep_padding must be a bool, not %s ''', type_name(rsa_oaep_padding) )) if _backend == 'winlegacy': return _advapi32_decrypt(private_key, ciphertext, rsa_oaep_padding) return _bcrypt_decrypt(private_key, ciphertext, rsa_oaep_padding)
[ "def", "_decrypt", "(", "private_key", ",", "ciphertext", ",", "rsa_oaep_padding", "=", "False", ")", ":", "if", "not", "isinstance", "(", "private_key", ",", "PrivateKey", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n private_key m...
29.591837
20.653061
def dumps(obj): """Outputs json with formatting edits + object handling.""" return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder)
[ "def", "dumps", "(", "obj", ")", ":", "return", "json", ".", "dumps", "(", "obj", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ",", "cls", "=", "CustomEncoder", ")" ]
49.666667
18.666667
def find_config_files( path=['~/.vcspull'], match=['*'], filetype=['json', 'yaml'], include_home=False ): """Return repos from a directory and match. Not recursive. :param path: list of paths to search :type path: list :param match: list of globs to search against :type match: list :param filetype: list of filetypes to search against :type filetype: list :param include_home: Include home configuration files :type include_home: bool :raises: - LoadConfigRepoConflict: There are two configs that have same path and name with different repo urls. :returns: list of absolute paths to config files. :rtype: list """ configs = [] if include_home is True: configs.extend(find_home_config_files()) if isinstance(path, list): for p in path: configs.extend(find_config_files(p, match, filetype)) return configs else: path = os.path.expanduser(path) if isinstance(match, list): for m in match: configs.extend(find_config_files(path, m, filetype)) else: if isinstance(filetype, list): for f in filetype: configs.extend(find_config_files(path, match, f)) else: match = os.path.join(path, match) match += ".{filetype}".format(filetype=filetype) configs = glob.glob(match) return configs
[ "def", "find_config_files", "(", "path", "=", "[", "'~/.vcspull'", "]", ",", "match", "=", "[", "'*'", "]", ",", "filetype", "=", "[", "'json'", ",", "'yaml'", "]", ",", "include_home", "=", "False", ")", ":", "configs", "=", "[", "]", "if", "include...
31.822222
19.666667
def get_all_resource_attributes(ref_key, network_id, template_id=None, **kwargs): """ Get all the resource attributes for a given resource type in the network. That includes all the resource attributes for a given type within the network. For example, if the ref_key is 'NODE', then it will return all the attirbutes of all nodes in the network. This function allows a front end to pre-load an entire network's resource attribute information to reduce on function calls. If type_id is specified, only return the resource attributes within the type. """ user_id = kwargs.get('user_id') resource_attr_qry = db.DBSession.query(ResourceAttr).\ outerjoin(Node, Node.id==ResourceAttr.node_id).\ outerjoin(Link, Link.id==ResourceAttr.link_id).\ outerjoin(ResourceGroup, ResourceGroup.id==ResourceAttr.group_id).filter( ResourceAttr.ref_key == ref_key, or_( and_(ResourceAttr.node_id != None, ResourceAttr.node_id == Node.id, Node.network_id==network_id), and_(ResourceAttr.link_id != None, ResourceAttr.link_id == Link.id, Link.network_id==network_id), and_(ResourceAttr.group_id != None, ResourceAttr.group_id == ResourceGroup.id, ResourceGroup.network_id==network_id) )) if template_id is not None: attr_ids = [] rs = db.DBSession.query(TypeAttr).join(TemplateType, TemplateType.id==TypeAttr.type_id).filter( TemplateType.template_id==template_id).all() for r in rs: attr_ids.append(r.attr_id) resource_attr_qry = resource_attr_qry.filter(ResourceAttr.attr_id.in_(attr_ids)) resource_attrs = resource_attr_qry.all() return resource_attrs
[ "def", "get_all_resource_attributes", "(", "ref_key", ",", "network_id", ",", "template_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", "resource_attr_qry", "=", "db", ".", "DBSession", ".", ...
44.266667
25.777778
def get_top_tags(self, limit=None, cacheable=True): """ Returns a sequence of the top tags used by this user with their counts as TopItem objects. * limit: The limit of how many tags to return. * cacheable: Whether to cache results. """ params = self._get_params() if limit: params["limit"] = limit doc = self._request(self.ws_prefix + ".getTopTags", cacheable, params) seq = [] for node in doc.getElementsByTagName("tag"): seq.append( TopItem( Tag(_extract(node, "name"), self.network), _extract(node, "count") ) ) return seq
[ "def", "get_top_tags", "(", "self", ",", "limit", "=", "None", ",", "cacheable", "=", "True", ")", ":", "params", "=", "self", ".", "_get_params", "(", ")", "if", "limit", ":", "params", "[", "\"limit\"", "]", "=", "limit", "doc", "=", "self", ".", ...
30.086957
21.73913
def main(): """ The main loop for the commandline parser. """ DATABASE.load_contents() continue_flag = False while not continue_flag: DATABASE.print_contents() try: command = raw_input(">>> ") for stmnt_unformated in sqlparse.parse(command): statement = sqlparse.parse( sqlparse.format( str( stmnt_unformated ), reindent=True ) )[0] type = statement.tokens[0] if str(type).lower() == "drop": if str(statement.tokens[2]).lower() == "table": tablename = str(statement.tokens[4]) table = DATABASE.get_table(tablename) table.rows = [] table.store_contents() DATABASE.delete_table(tablename) DATABASE.store_contents() else: raise Exception( "Invalid Syntax of DROP TABLE t" ) elif str(type).lower() == "truncate": if str(statement.tokens[2]).lower() == "table": tablename = str(statement.tokens[4]) table = DATABASE.get_table(tablename) table.rows = [] table.store_contents() else: raise Exception( "Invalid Syntax of TRUNCATE TABLE t" ) elif str(type).lower() == "delete": if str(statement.tokens[2]).lower() == "from": tablename = str(statement.tokens[4]) table = DATABASE.get_table(tablename) whereclause = statement.tokens[6] if str(whereclause.tokens[0]).lower() == "where": comparison = whereclause.tokens[2] key = str(comparison.tokens[0]) value = int(str(comparison.tokens[4])) table.delete_row(key, value) table.store_contents() else: raise Exception( "Invalid Syntax of DELETE FROM t where k = v" ) else: raise Exception( "Invalid Syntax of DELETE FROM t WHERE k = v" ) elif str(type).lower() == "insert": if str(statement.tokens[2]).lower() == "into": tablename = str(statement.tokens[4]) table = DATABASE.get_table(tablename) if str(statement.tokens[6]).lower() == "values": parenthesis = statement.tokens[8] idlist = parenthesis.tokens[1] values_list = map( lambda x: int(str(x)), idlist.get_identifiers() ) table.put_row_raw(values_list) table.store_contents() else: raise Exception( "Invalid Syntax of INSERT INTO t VALUES (v...)" ) else: raise Exception( "Invalid Syntax of INSERT INTO t VALUES (v...)" ) elif str(type).lower() == "create": if str(statement.tokens[2]).lower() == "table": sublist = list(statement.tokens[4].get_sublists()) tablename = str(sublist[0]) garbage = str(sublist[1]) column_list = map( lambda x: x.strip(" ()",).split()[0], garbage.split(",") ) DATABASE.create_table_raw( tablename=tablename, columns=column_list[:], ) DATABASE.store_contents() elif str(type).lower() == "select": col_list_or_single = statement.tokens[2] if "," not in str(col_list_or_single): if str(col_list_or_single) == "*": column_list = ['*'] else: column_list = [str(col_list_or_single)] else: column_list = map( lambda x: str(x), col_list_or_single.get_identifiers() ) if str(statement.tokens[4]).lower() == "from": tab_list_or_single = statement.tokens[6] if "," not in str(tab_list_or_single): table_list = [str(tab_list_or_single)] else: table_list = map( lambda x: str(x), tab_list_or_single.get_identifiers() ) cross_columns = reduce( lambda x, y: x + y, map( lambda x: DATABASE.get_table( x ).get_column_list_prefixed(), table_list ) ) cross_table = parthsql.Table( name="temp", columns=cross_columns, rows=[] ) for i in itertools.product( *map( lambda x: DATABASE.get_table(x).get_all_rows(), table_list ) ): cross_table.put_row_raw( reduce( lambda x, y: x + y, i ) ) if len(statement.tokens) >= 9: whereclause = statement.tokens[8] if str(whereclause.tokens[0]).lower() == "where": comparison = whereclause.tokens[2] key = str(comparison.tokens[0]) try: value = int(str(comparison.tokens[4])) cross_table.invert_delete_row(key, value) except: value = str(comparison.tokens[4]) cross_table.invert_delete_row2(key, value) else: raise Exception( "Invalid Syntax of DELETE FROM t where k = v" ) if "*" in column_list: cross_table.print_contents() else: temp_list = [] for i in column_list: temp_list.append(cross_table.get_column(i)) print "\t\t\t".join(column_list) for i in zip(*(temp_list)): print "\t\t\t".join(map(str, i)) else: raise Exception( "Invalid Syntax of SELECT c... FROM t... WHERE k = v" ) else: raise Exception( "Unsupported Operation" ) except ValueError: print("¯\_(ツ)_/¯") except IOError: print("¯\_(ツ)_/¯") except IndexError: print("¯\_(ツ)_/¯") except AttributeError: print("¯\_(ツ)_/¯") except Exception, e: print e.message
[ "def", "main", "(", ")", ":", "DATABASE", ".", "load_contents", "(", ")", "continue_flag", "=", "False", "while", "not", "continue_flag", ":", "DATABASE", ".", "print_contents", "(", ")", "try", ":", "command", "=", "raw_input", "(", "\">>> \"", ")", "for"...
44.239583
15.614583
def key_to_path(self, key): """Return the fullpath to the file with sha1sum key.""" return os.path.join(self.cache_dir, key[:2], key[2:4], key[4:] + '.pkl')
[ "def", "key_to_path", "(", "self", ",", "key", ")", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "cache_dir", ",", "key", "[", ":", "2", "]", ",", "key", "[", "2", ":", "4", "]", ",", "key", "[", "4", ":", "]", "+", "'.p...
49.25
10
def reset_network(message): """Resets the users network to make changes take effect""" for command in settings.RESTART_NETWORK: try: subprocess.check_call(command) except: pass print(message)
[ "def", "reset_network", "(", "message", ")", ":", "for", "command", "in", "settings", ".", "RESTART_NETWORK", ":", "try", ":", "subprocess", ".", "check_call", "(", "command", ")", "except", ":", "pass", "print", "(", "message", ")" ]
29.5
14.75
def loop(bot, config, interval, settings): """Schedule a BOT (by label) to run on an interval, e.g. 'MyBot -i 60'""" print_options(bot, config, settings) click.echo(f'- Interval: {interval}s') click.echo() bot_task = BotTask(bot, config) bot_task.run_loop(interval)
[ "def", "loop", "(", "bot", ",", "config", ",", "interval", ",", "settings", ")", ":", "print_options", "(", "bot", ",", "config", ",", "settings", ")", "click", ".", "echo", "(", "f'- Interval: {interval}s'", ")", "click", ".", "echo", "(", ")", "bot_tas...
40.428571
6
def repeat(self, time, function, args = []): """Repeat `function` every `time` milliseconds.""" callback_id = self.tk.after(time, self._call_wrapper, time, function, *args) self._callback[function] = [callback_id, True]
[ "def", "repeat", "(", "self", ",", "time", ",", "function", ",", "args", "=", "[", "]", ")", ":", "callback_id", "=", "self", ".", "tk", ".", "after", "(", "time", ",", "self", ".", "_call_wrapper", ",", "time", ",", "function", ",", "*", "args", ...
60
15.5
def _extension(modpath: str) -> setuptools.Extension: """Make setuptools.Extension.""" return setuptools.Extension(modpath, [modpath.replace(".", "/") + ".py"])
[ "def", "_extension", "(", "modpath", ":", "str", ")", "->", "setuptools", ".", "Extension", ":", "return", "setuptools", ".", "Extension", "(", "modpath", ",", "[", "modpath", ".", "replace", "(", "\".\"", ",", "\"/\"", ")", "+", "\".py\"", "]", ")" ]
55.333333
16.666667
def load_unicode(self, resource_path): """ Gets the content of a resource """ resource_content = pkg_resources.resource_string(self.module_name, resource_path) return resource_content.decode('utf-8')
[ "def", "load_unicode", "(", "self", ",", "resource_path", ")", ":", "resource_content", "=", "pkg_resources", ".", "resource_string", "(", "self", ".", "module_name", ",", "resource_path", ")", "return", "resource_content", ".", "decode", "(", "'utf-8'", ")" ]
39
10
def _find_monitor(monitors, handle): """Find all devices and events with a given monitor installed.""" found_devs = set() found_events = set() for conn_string, device in monitors.items(): for event, handles in device.items(): if handle in handles: found_events.add(event) found_devs.add(conn_string) return found_devs, found_events
[ "def", "_find_monitor", "(", "monitors", ",", "handle", ")", ":", "found_devs", "=", "set", "(", ")", "found_events", "=", "set", "(", ")", "for", "conn_string", ",", "device", "in", "monitors", ".", "items", "(", ")", ":", "for", "event", ",", "handle...
30.307692
14.384615
def gallery_image_versions(self): """Instance depends on the API version: * 2018-06-01: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2018_06_01.operations.GalleryImageVersionsOperations>` * 2019-03-01: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2019_03_01.operations.GalleryImageVersionsOperations>` """ api_version = self._get_api_version('gallery_image_versions') if api_version == '2018-06-01': from .v2018_06_01.operations import GalleryImageVersionsOperations as OperationClass elif api_version == '2019-03-01': from .v2019_03_01.operations import GalleryImageVersionsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "gallery_image_versions", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'gallery_image_versions'", ")", "if", "api_version", "==", "'2018-06-01'", ":", "from", ".", "v2018_06_01", ".", "operations", "import", "GalleryImageV...
68.428571
40.714286
def longest_increasing_subsequence(xs): '''Return a longest increasing subsequence of xs. (Note that there may be more than one such subsequence.) >>> longest_increasing_subsequence(range(3)) [0, 1, 2] >>> longest_increasing_subsequence([3, 1, 2, 0]) [1, 2] ''' # Patience sort xs, stacking (x, prev_ix) pairs on the piles. # Prev_ix indexes the element at the top of the previous pile, # which has a lower x value than the current x value. piles = [[]] # Create a dummy pile 0 for x, p in patience_sort(xs): if p + 1 == len(piles): piles.append([]) # backlink to the top of the previous pile piles[p + 1].append((x, len(piles[p]) - 1)) # Backtrack to find a longest increasing subsequence npiles = len(piles) - 1 prev = 0 lis = list() for pile in range(npiles, 0, -1): x, prev = piles[pile][prev] lis.append(x) lis.reverse() return lis
[ "def", "longest_increasing_subsequence", "(", "xs", ")", ":", "# Patience sort xs, stacking (x, prev_ix) pairs on the piles.", "# Prev_ix indexes the element at the top of the previous pile,", "# which has a lower x value than the current x value.", "piles", "=", "[", "[", "]", "]", "#...
34.814815
16.962963
def _dump_crawl_stats(self): ''' Dumps flattened crawling stats so the spiders do not have to ''' extras = {} spiders = {} spider_set = set() total_spider_count = 0 keys = self.redis_conn.keys('stats:crawler:*:*:*') for key in keys: # we only care about the spider elements = key.split(":") spider = elements[3] if spider not in spiders: spiders[spider] = 0 if len(elements) == 6: # got a time based stat response = elements[4] end = elements[5] final = '{s}_{r}_{e}'.format(s=spider, r=response, e=end) if end == 'lifetime': value = self.redis_conn.execute_command("PFCOUNT", key) else: value = self.redis_conn.zcard(key) extras[final] = value elif len(elements) == 5: # got a spider identifier spiders[spider] += 1 total_spider_count += 1 spider_set.add(spider) else: self.logger.warn("Unknown crawler stat key", {"key":key}) # simple counts extras['unique_spider_count'] = len(spider_set) extras['total_spider_count'] = total_spider_count for spider in spiders: extras['{k}_spider_count'.format(k=spider)] = spiders[spider] if not self.logger.json: self.logger.info('Crawler Stats Dump:\n{0}'.format( json.dumps(extras, indent=4, sort_keys=True))) else: self.logger.info('Crawler Stats Dump', extra=extras)
[ "def", "_dump_crawl_stats", "(", "self", ")", ":", "extras", "=", "{", "}", "spiders", "=", "{", "}", "spider_set", "=", "set", "(", ")", "total_spider_count", "=", "0", "keys", "=", "self", ".", "redis_conn", ".", "keys", "(", "'stats:crawler:*:*:*'", "...
31.037037
20.185185
def render_children(node: Node, **child_args): """Render node children""" for xml_node in node.xml_node.children: child = render(xml_node, **child_args) node.add_child(child)
[ "def", "render_children", "(", "node", ":", "Node", ",", "*", "*", "child_args", ")", ":", "for", "xml_node", "in", "node", ".", "xml_node", ".", "children", ":", "child", "=", "render", "(", "xml_node", ",", "*", "*", "child_args", ")", "node", ".", ...
38.8
5.2
def jd_to_datetime(jd): """ Convert a Julian Day to an `jdutil.datetime` object. Parameters ---------- jd : float Julian day. Returns ------- dt : `jdutil.datetime` object `jdutil.datetime` equivalent of Julian day. Examples -------- >>> jd_to_datetime(2446113.75) datetime(1985, 2, 17, 6, 0) """ year, month, day = jd_to_date(jd) frac_days,day = math.modf(day) day = int(day) hour,min,sec,micro = days_to_hmsm(frac_days) return datetime(year,month,day,hour,min,sec,micro)
[ "def", "jd_to_datetime", "(", "jd", ")", ":", "year", ",", "month", ",", "day", "=", "jd_to_date", "(", "jd", ")", "frac_days", ",", "day", "=", "math", ".", "modf", "(", "day", ")", "day", "=", "int", "(", "day", ")", "hour", ",", "min", ",", ...
19.285714
21.857143
def _get_center(self): '''Returns the center point of the path, disregarding transforms. ''' x = (self.x + self.width / 2) y = (self.y + self.height / 2) return (x, y)
[ "def", "_get_center", "(", "self", ")", ":", "x", "=", "(", "self", ".", "x", "+", "self", ".", "width", "/", "2", ")", "y", "=", "(", "self", ".", "y", "+", "self", ".", "height", "/", "2", ")", "return", "(", "x", ",", "y", ")" ]
33.666667
17.333333
def apply(self, *args: Any, **kwargs: Any) -> Any: """Called by workers to run the wrapped function. You may call it yourself if you want to run the task in current process without sending to the queue. If task has a `retry` property it will be retried on failure. If task has a `max_run_time` property the task will not be allowed to run more than that. """ def send_signal(sig: Signal, **extra: Any) -> None: self._send_signal(sig, args=args, kwargs=kwargs, **extra) logger.debug("Applying %r, args=%r, kwargs=%r", self, args, kwargs) send_signal(signals.task_preapply) try: tries = 1 + self.retry while 1: tries -= 1 send_signal(signals.task_prerun) try: with time_limit(self.max_run_time or 0): return self.f(*args, **kwargs) except Exception: send_signal(signals.task_error, exc_info=sys.exc_info()) if tries <= 0: raise else: break finally: send_signal(signals.task_postrun) except Exception: send_signal(signals.task_failure, exc_info=sys.exc_info()) raise else: send_signal(signals.task_success) finally: send_signal(signals.task_postapply)
[ "def", "apply", "(", "self", ",", "*", "args", ":", "Any", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "Any", ":", "def", "send_signal", "(", "sig", ":", "Signal", ",", "*", "*", "extra", ":", "Any", ")", "->", "None", ":", "self", ".", "...
37.282051
19.487179
def fit_sparse(model_matrix, response, model, model_coefficients_start, tolerance, l1_regularizer, l2_regularizer=None, maximum_iterations=None, maximum_full_sweeps_per_iteration=1, learning_rate=None, name=None): r"""Fits a GLM using coordinate-wise FIM-informed proximal gradient descent. This function uses a L1- and L2-regularized, second-order quasi-Newton method to find maximum-likelihood parameters for the given model and observed data. The second-order approximations use negative Fisher information in place of the Hessian, that is, ```none FisherInfo = E_Y[Hessian with respect to model_coefficients of -LogLikelihood( Y | model_matrix, current value of model_coefficients)] ``` For large, sparse data sets, `model_matrix` should be supplied as a `SparseTensor`. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood which will be minimized. Must have sufficient statistic equal to the response, that is, `T(y) = y`. model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with the same dtype as `model_matrix`, representing the initial values of the coefficients for the GLM regression. Has shape `[n]` where `model_matrix` has shape `[N, n]`. tolerance: scalar, `float` `Tensor` representing the tolerance for each optiization step; see the `tolerance` argument of `fit_sparse_one_step`. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term. l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term. Default value: `None` (i.e., no L2 regularization). maximum_iterations: Python integer specifying maximum number of iterations of the outer loop of the optimizer (i.e., maximum number of calls to `fit_sparse_one_step`). After this many iterations of the outer loop, the algorithm will terminate even if the return value `model_coefficients` has not converged. Default value: `1`. maximum_full_sweeps_per_iteration: Python integer specifying the maximum number of coordinate descent sweeps allowed in each iteration. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"fit_sparse"`. Returns: model_coefficients: (Batch of) `Tensor` of the same shape and dtype as `model_coefficients_start`, representing the computed model coefficients which minimize the regularized negative log-likelihood. is_converged: scalar, `bool` `Tensor` indicating whether the minimization procedure converged across all batches within the specified number of iterations. Here convergence means that an iteration of the inner loop (`fit_sparse_one_step`) returns `True` for its `is_converged` output value. iter: scalar, `int` `Tensor` indicating the actual number of iterations of the outer loop of the optimizer completed (i.e., number of calls to `fit_sparse_one_step` before achieving convergence). #### Example ```python from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions def make_dataset(n, d, link, scale=1., dtype=np.float32): model_coefficients = tfd.Uniform( low=np.array(-1, dtype), high=np.array(1, dtype)).sample( d, seed=42) radius = np.sqrt(2.) model_coefficients *= radius / tf.linalg.norm(model_coefficients) mask = tf.random_shuffle(tf.range(d)) < tf.to_int32(0.5 * tf.to_float(d)) model_coefficients = tf.where(mask, model_coefficients, tf.zeros_like(model_coefficients)) model_matrix = tfd.Normal( loc=np.array(0, dtype), scale=np.array(1, dtype)).sample( [n, d], seed=43) scale = tf.convert_to_tensor(scale, dtype) linear_response = tf.matmul(model_matrix, model_coefficients[..., tf.newaxis])[..., 0] if link == 'linear': response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) elif link == 'probit': response = tf.cast( tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0, dtype) elif link == 'logit': response = tfd.Bernoulli(logits=linear_response).sample(seed=44) else: raise ValueError('unrecognized true link: {}'.format(link)) return model_matrix, response, model_coefficients, mask with tf.Session() as sess: x_, y_, model_coefficients_true_, _ = sess.run(make_dataset( n=int(1e5), d=100, link='probit')) model = tfp.glm.Bernoulli() model_coefficients_start = tf.zeros(x_.shape[-1], np.float32) model_coefficients, is_converged, num_iter = tfp.glm.fit_sparse( model_matrix=tf.convert_to_tensor(x_), response=tf.convert_to_tensor(y_), model=model, model_coefficients_start=model_coefficients_start, l1_regularizer=800., l2_regularizer=None, maximum_iterations=10, maximum_full_sweeps_per_iteration=10, tolerance=1e-6, learning_rate=None) model_coefficients_, is_converged_, num_iter_ = sess.run([ model_coefficients, is_converged, num_iter]) print("is_converged:", is_converged_) print(" num_iter:", num_iter_) print("\nLearned / True") print(np.concatenate( [[model_coefficients_], [model_coefficients_true_]], axis=0).T) # ==> # is_converged: True # num_iter: 1 # # Learned / True # [[ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.11195257 0.12484948] # [ 0. 0. ] # [ 0.05191106 0.06394956] # [-0.15090358 -0.15325639] # [-0.18187316 -0.18825999] # [-0.06140942 -0.07994166] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.14474444 0.15810856] # [ 0. 0. ] # [-0.25249591 -0.24260855] # [ 0. 0. ] # [ 0. 0. ] # [-0.03888761 -0.06755984] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [-0.0192222 -0.04169233] # [ 0. 0. ] # [ 0. 0. ] # [ 0.01434913 0.03568212] # [-0.11336883 -0.12873614] # [ 0. 0. ] # [-0.24496339 -0.24048163] # [ 0. 0. ] # [ 0. 0. ] # [ 0.04088281 0.06565224] # [-0.12784363 -0.13359821] # [ 0.05618424 0.07396613] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0. -0.01719233] # [ 0. 0. ] # [ 0. 0. ] # [-0.00076072 -0.03607186] # [ 0.21801499 0.21146794] # [-0.02161094 -0.04031265] # [ 0.0918689 0.10487888] # [ 0.0106154 0.03233612] # [-0.07817317 -0.09725142] # [ 0. 0. ] # [ 0. 0. ] # [-0.23725343 -0.24194022] # [ 0. 0. ] # [-0.08725718 -0.1048776 ] # [ 0. 0. ] # [ 0. 0. ] # [-0.02114314 -0.04145789] # [ 0. 0. ] # [ 0. 0. ] # [-0.02710908 -0.04590397] # [ 0.15293184 0.15415154] # [ 0.2114463 0.2088728 ] # [-0.10969634 -0.12368613] # [ 0. -0.01505797] # [-0.01140458 -0.03234904] # [ 0.16051085 0.1680062 ] # [ 0.09816848 0.11094204] ``` #### References [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths for Generalized Linear Models via Coordinate Descent. _Journal of Statistical Software_, 33(1), 2010. https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for L1-regularized Logistic Regression. _Journal of Machine Learning Research_, 13, 2012. http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf """ graph_deps = [ model_matrix, response, model_coefficients_start, l1_regularizer, l2_regularizer, maximum_iterations, maximum_full_sweeps_per_iteration, # TODO(b/111925792): Replace `tolerance` arg with something like # `convergence_criteria_fn`. tolerance, learning_rate, ] with tf.compat.v1.name_scope(name, 'fit_sparse', graph_deps): # TODO(b/111922388): Include dispersion and offset parameters. def _grad_neg_log_likelihood_and_fim_fn(x): predicted_linear_response = sparse_or_dense_matvecmul(model_matrix, x) g, h_middle = _grad_neg_log_likelihood_and_fim( model_matrix, predicted_linear_response, response, model) return g, model_matrix, h_middle return tfp.optimizer.proximal_hessian_sparse_minimize( _grad_neg_log_likelihood_and_fim_fn, x_start=model_coefficients_start, l1_regularizer=l1_regularizer, l2_regularizer=l2_regularizer, maximum_iterations=maximum_iterations, maximum_full_sweeps_per_iteration=maximum_full_sweeps_per_iteration, learning_rate=learning_rate, tolerance=tolerance, name=name)
[ "def", "fit_sparse", "(", "model_matrix", ",", "response", ",", "model", ",", "model_coefficients_start", ",", "tolerance", ",", "l1_regularizer", ",", "l2_regularizer", "=", "None", ",", "maximum_iterations", "=", "None", ",", "maximum_full_sweeps_per_iteration", "="...
39.173228
19.901575
def set_deferred_transfer(self, enable): """ Allow transfers to be delayed and buffered By default deferred transfers are turned off. All reads and writes will be completed by the time the function returns. When enabled packets are buffered and sent all at once, which increases speed. When memory is written to, the transfer might take place immediately, or might take place on a future memory write. This means that an invalid write could cause an exception to occur on a later, unrelated write. To guarantee that previous writes are complete call the flush() function. The behaviour of read operations is determined by the modes READ_START, READ_NOW and READ_END. The option READ_NOW is the default and will cause the read to flush all previous writes, and read the data immediately. To improve performance, multiple reads can be made using READ_START and finished later with READ_NOW. This allows the reads to be buffered and sent at once. Note - All READ_ENDs must be called before a call using READ_NOW can be made. """ if self._deferred_transfer and not enable: self.flush() self._deferred_transfer = enable
[ "def", "set_deferred_transfer", "(", "self", ",", "enable", ")", ":", "if", "self", ".", "_deferred_transfer", "and", "not", "enable", ":", "self", ".", "flush", "(", ")", "self", ".", "_deferred_transfer", "=", "enable" ]
50.88
24.08
def send_email(sender, subject, content, email_recipient_list, email_address_list, email_user=None, email_pass=None, email_server=None): '''This sends an email to addresses, informing them about events. The email account settings are retrieved from the settings file as described above. Parameters ---------- sender : str The name of the sender to use in the email header. subject : str Subject of the email. content : str Content of the email. email_recipient list : list of str This is a list of email recipient names of the form: `['Example Person 1', 'Example Person 1', ...]` email_recipient list : list of str This is a list of email recipient addresses of the form: `['example1@example.com', 'example2@example.org', ...]` email_user : str The username of the email server account that will send the emails. If this is None, the value of EMAIL_USER from the ~/.astrobase/.emailsettings file will be used. If that is None as well, this function won't work. email_pass : str The password of the email server account that will send the emails. If this is None, the value of EMAIL_PASS from the ~/.astrobase/.emailsettings file will be used. If that is None as well, this function won't work. email_server : str The address of the email server that will send the emails. If this is None, the value of EMAIL_USER from the ~/.astrobase/.emailsettings file will be used. If that is None as well, this function won't work. Returns ------- bool True if email sending succeeded. False if email sending failed. ''' if not email_user: email_user = EMAIL_USER if not email_pass: email_pass = EMAIL_PASSWORD if not email_server: email_server = EMAIL_SERVER if not email_server and email_user and email_pass: raise ValueError("no email server address and " "credentials available, can't continue") msg_text = EMAIL_TEMPLATE.format( sender=sender, hostname=socket.gethostname(), activity_time='%sZ' % datetime.utcnow().isoformat(), activity_report=content ) email_sender = '%s <%s>' % (sender, EMAIL_USER) # put together the recipient and email lists email_recipients = [('%s <%s>' % (x,y)) for (x,y) in zip(email_recipient_list, email_address_list)] # put together the rest of the message email_msg = MIMEText(msg_text) email_msg['From'] = email_sender email_msg['To'] = ', '.join(email_recipients) email_msg['Message-Id'] = make_msgid() email_msg['Subject'] = '[%s on %s] %s' % ( sender, socket.gethostname(), subject ) email_msg['Date'] = formatdate(time.time()) # start the email process try: server = smtplib.SMTP(EMAIL_SERVER, 587) server_ehlo_response = server.ehlo() if server.has_extn('STARTTLS'): try: tls_start_response = server.starttls() tls_ehlo_response = server.ehlo() login_response = server.login(EMAIL_USER, EMAIL_PASSWORD) send_response = ( server.sendmail(email_sender, email_address_list, email_msg.as_string()) ) except Exception as e: print('script email sending failed with error: %s' % e) send_response = None if send_response is not None: print('script email sent successfully') quit_response = server.quit() return True else: quit_response = server.quit() return False else: print('email server does not support STARTTLS,' ' bailing out...') quit_response = server.quit() return False except Exception as e: print('sending email failed with error: %s' % e) returnval = False quit_response = server.quit() return returnval
[ "def", "send_email", "(", "sender", ",", "subject", ",", "content", ",", "email_recipient_list", ",", "email_address_list", ",", "email_user", "=", "None", ",", "email_pass", "=", "None", ",", "email_server", "=", "None", ")", ":", "if", "not", "email_user", ...
28.324503
22.97351
def FormatArtifacts(self, artifacts): """Formats artifacts to desired output format. Args: artifacts (list[ArtifactDefinition]): artifact definitions. Returns: str: formatted string of artifact definition. """ artifact_definitions = [artifact.AsDict() for artifact in artifacts] json_data = json.dumps(artifact_definitions) return json_data
[ "def", "FormatArtifacts", "(", "self", ",", "artifacts", ")", ":", "artifact_definitions", "=", "[", "artifact", ".", "AsDict", "(", ")", "for", "artifact", "in", "artifacts", "]", "json_data", "=", "json", ".", "dumps", "(", "artifact_definitions", ")", "re...
30.916667
19.833333
def fit(self, X, y=None): ''' Fit the transform. Does nothing, for compatibility with sklearn API. Parameters ---------- X : array-like, shape [n_series, ...] Time series data and (optionally) contextual data y : None There is no need of a target in a transformer, yet the pipeline API requires this parameter. Returns ------- self : object Returns self. ''' check_ts_data(X, y) if not X[0].ndim > 1: raise ValueError("X variable must have more than 1 channel") return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "check_ts_data", "(", "X", ",", "y", ")", "if", "not", "X", "[", "0", "]", ".", "ndim", ">", "1", ":", "raise", "ValueError", "(", "\"X variable must have more than 1 channel\"", "...
28.136364
25.227273
def embeddedFileGet(self, id): """Retrieve embedded file content by name or by number.""" if self.isClosed or self.isEncrypted: raise ValueError("operation illegal for closed / encrypted doc") return _fitz.Document_embeddedFileGet(self, id)
[ "def", "embeddedFileGet", "(", "self", ",", "id", ")", ":", "if", "self", ".", "isClosed", "or", "self", ".", "isEncrypted", ":", "raise", "ValueError", "(", "\"operation illegal for closed / encrypted doc\"", ")", "return", "_fitz", ".", "Document_embeddedFileGet",...
45.333333
17.666667
def MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, otherPartFilePath = None): """ Archive all parts of multi-part compressed file. If file has been extracted (via part1) then move all subsequent parts directly to archive directory. If file has not been extracted then if part >1 add to other part skipped list and only archive when the first part is sent for archiving. Parameters ---------- firstPartExtractList : list File directory to search. otherPartSkippedList : list List which any file matches will be added to. archiveDir : list List of directories to ignore in recursive lookup (currently unused). otherPartFilePath : list [optional : default = None] List of supported file formats to search for. """ if otherPartFilePath is None: for filePath in list(otherPartSkippedList): MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, filePath) else: baseFileName = re.findall("(.+?)[.]part.+?rar", otherPartFilePath)[0] if baseFileName in firstPartExtractList: util.ArchiveProcessedFile(otherPartFilePath, archiveDir) if otherPartFilePath in otherPartSkippedList: otherPartSkippedList.remove(otherPartFilePath) elif otherPartFilePath not in otherPartSkippedList: otherPartSkippedList.append(otherPartFilePath)
[ "def", "MultipartArchiving", "(", "firstPartExtractList", ",", "otherPartSkippedList", ",", "archiveDir", ",", "otherPartFilePath", "=", "None", ")", ":", "if", "otherPartFilePath", "is", "None", ":", "for", "filePath", "in", "list", "(", "otherPartSkippedList", ")"...
39.529412
24.176471
def get_command(self, ctx, name): """Get a callable command object.""" if name not in self.daemon_class.list_actions(): return None # The context object is a Daemon object daemon = ctx.obj def subcommand(debug=False): """Call a daemonocle action.""" if daemon.detach and debug: daemon.detach = False daemon.do_action(name) # Override the docstring for the function so that it shows up # correctly in the help output subcommand.__doc__ = daemon.get_action(name).__doc__ if name == 'start': # Add a --debug option for start subcommand = click.option( '--debug', is_flag=True, help='Do NOT detach and run in the background.' )(subcommand) # Make it into a click command subcommand = click.command( name, options_metavar=self.options_metavar)(subcommand) return subcommand
[ "def", "get_command", "(", "self", ",", "ctx", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "daemon_class", ".", "list_actions", "(", ")", ":", "return", "None", "# The context object is a Daemon object", "daemon", "=", "ctx", ".", "obj", ...
31.774194
16.580645
def safeRmTree(rootPath): """ Deletes a tree and returns true if it was correctly deleted """ shutil.rmtree(rootPath, True) return not os.path.exists(rootPath)
[ "def", "safeRmTree", "(", "rootPath", ")", ":", "shutil", ".", "rmtree", "(", "rootPath", ",", "True", ")", "return", "not", "os", ".", "path", ".", "exists", "(", "rootPath", ")" ]
27.714286
12.571429
def get_identities(self, item): """Return the identities from an item""" user = self.get_sh_identity(item, self.get_field_author()) yield user
[ "def", "get_identities", "(", "self", ",", "item", ")", ":", "user", "=", "self", ".", "get_sh_identity", "(", "item", ",", "self", ".", "get_field_author", "(", ")", ")", "yield", "user" ]
32.6
19.4
def get_std_icon(name, size=None): """Get standard platform icon Call 'show_std_icons()' for details""" if not name.startswith('SP_'): name = 'SP_' + name icon = QWidget().style().standardIcon(getattr(QStyle, name)) if size is None: return icon else: return QIcon(icon.pixmap(size, size))
[ "def", "get_std_icon", "(", "name", ",", "size", "=", "None", ")", ":", "if", "not", "name", ".", "startswith", "(", "'SP_'", ")", ":", "name", "=", "'SP_'", "+", "name", "icon", "=", "QWidget", "(", ")", ".", "style", "(", ")", ".", "standardIcon"...
32.7
12.6
def _compute_mean(self, C, A1, A2, A3, A4, A5, A6, mag, hypo_depth, rrup, mean, idx): """ Compute mean for subduction interface events, as explained in table 2, page 67. """ mean[idx] = (A1 + A2 * mag + C['C1'] + C['C2'] * (A3 - mag) ** 3 + C['C3'] * np.log(rrup[idx] + A4 * np.exp(A5 * mag)) + A6 * hypo_depth)
[ "def", "_compute_mean", "(", "self", ",", "C", ",", "A1", ",", "A2", ",", "A3", ",", "A4", ",", "A5", ",", "A6", ",", "mag", ",", "hypo_depth", ",", "rrup", ",", "mean", ",", "idx", ")", ":", "mean", "[", "idx", "]", "=", "(", "A1", "+", "A...
45.222222
17.888889
def _set_collector_encoding(self, v, load=False): """ Setter method for collector_encoding, mapped from YANG variable /telemetry/collector/collector_encoding (collector-encoding-type) If this variable is read-only (config: false) in the source YANG file, then _set_collector_encoding is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_collector_encoding() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'json': {'value': 2}, u'gpb': {'value': 1}},), is_leaf=True, yang_name="collector-encoding", rest_name="encoding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Collector encoding format', u'alt-name': u'encoding', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='collector-encoding-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """collector_encoding must be of a type compatible with collector-encoding-type""", 'defined-type': "brocade-telemetry:collector-encoding-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'json': {'value': 2}, u'gpb': {'value': 1}},), is_leaf=True, yang_name="collector-encoding", rest_name="encoding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Collector encoding format', u'alt-name': u'encoding', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='collector-encoding-type', is_config=True)""", }) self.__collector_encoding = t if hasattr(self, '_set'): self._set()
[ "def", "_set_collector_encoding", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ","...
101.727273
48.818182
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'batches') and self.batches is not None: _dict['batches'] = [x._to_dict() for x in self.batches] return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'batches'", ")", "and", "self", ".", "batches", "is", "not", "None", ":", "_dict", "[", "'batches'", "]", "=", "[", "x", ".", "_to_dict", "(", "...
42
19.166667
def write_config_file(config_instance, appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME): """ Write a ConfigParser instance to file at the correct location. Args: config_instance: Config instance to safe to file. appdirs (HamsterAppDirs, optional): ``HamsterAppDirs`` instance storing app/user specific path information. file_name (text_type, optional): Name of the config file. Defaults to ``DEFAULT_CONFIG_FILENAME``. Returns: SafeConfigParser: Instance written to file. """ path = get_config_path(appdirs, file_name) with open(path, 'w') as fobj: config_instance.write(fobj) return config_instance
[ "def", "write_config_file", "(", "config_instance", ",", "appdirs", "=", "DEFAULT_APPDIRS", ",", "file_name", "=", "DEFAULT_CONFIG_FILENAME", ")", ":", "path", "=", "get_config_path", "(", "appdirs", ",", "file_name", ")", "with", "open", "(", "path", ",", "'w'"...
34.7
20
def _get_systemd_services(root): ''' Use os.listdir() to get all the unit files ''' ret = set() for path in SYSTEM_CONFIG_PATHS + (LOCAL_CONFIG_PATH,): # Make sure user has access to the path, and if the path is a # link it's likely that another entry in SYSTEM_CONFIG_PATHS # or LOCAL_CONFIG_PATH points to it, so we can ignore it. path = _root(path, root) if os.access(path, os.R_OK) and not os.path.islink(path): for fullname in os.listdir(path): try: unit_name, unit_type = fullname.rsplit('.', 1) except ValueError: continue if unit_type in VALID_UNIT_TYPES: ret.add(unit_name if unit_type == 'service' else fullname) return ret
[ "def", "_get_systemd_services", "(", "root", ")", ":", "ret", "=", "set", "(", ")", "for", "path", "in", "SYSTEM_CONFIG_PATHS", "+", "(", "LOCAL_CONFIG_PATH", ",", ")", ":", "# Make sure user has access to the path, and if the path is a", "# link it's likely that another ...
42.052632
20.052632
def load_pickle(file_path): """ Unpickle some data from a given path. Input: - file_path: Target file path. Output: - data: The python object that was serialized and stored in disk. """ pkl_file = open(file_path, 'rb') data = pickle.load(pkl_file) pkl_file.close() return data
[ "def", "load_pickle", "(", "file_path", ")", ":", "pkl_file", "=", "open", "(", "file_path", ",", "'rb'", ")", "data", "=", "pickle", ".", "load", "(", "pkl_file", ")", "pkl_file", ".", "close", "(", ")", "return", "data" ]
25.333333
15.833333
def load_commodities(self): """ Load the commodities for Amounts in this object. """ if isinstance(self.amount, Amount): self.amount = Amount("{0:.8f} {1}".format(self.amount.to_double(), self.currency)) else: self.amount = Amount("{0:.8f} {1}".format(self.amount, self.currency))
[ "def", "load_commodities", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "amount", ",", "Amount", ")", ":", "self", ".", "amount", "=", "Amount", "(", "\"{0:.8f} {1}\"", ".", "format", "(", "self", ".", "amount", ".", "to_double", "(", "...
42.125
19.375
def run_qpoints(self, q_points, with_eigenvectors=False, with_group_velocities=False, with_dynamical_matrices=False, nac_q_direction=None): """Phonon calculations on q-points. Parameters ---------- q_points: array_like or float, optional q-points in reduced coordinates. dtype='double', shape=(q-points, 3) with_eigenvectors: bool, optional Eigenvectors are stored by setting True. Default False. with_group_velocities : bool, optional Group velocities are calculated by setting True. Default is False. with_dynamical_matrices : bool, optional Calculated dynamical matrices are stored by setting True. Default is False. nac_q_direction : array_like q=(0,0,0) is replaced by q=epsilon * nac_q_direction where epsilon is infinitsimal for non-analytical term correction. This is used, e.g., to observe LO-TO splitting, """ if self._dynamical_matrix is None: msg = ("Dynamical matrix has not yet built.") raise RuntimeError(msg) if with_group_velocities: if self._group_velocity is None: self._set_group_velocity() group_velocity = self._group_velocity else: group_velocity = None self._qpoints = QpointsPhonon( np.reshape(q_points, (-1, 3)), self._dynamical_matrix, nac_q_direction=nac_q_direction, with_eigenvectors=with_eigenvectors, group_velocity=group_velocity, with_dynamical_matrices=with_dynamical_matrices, factor=self._factor)
[ "def", "run_qpoints", "(", "self", ",", "q_points", ",", "with_eigenvectors", "=", "False", ",", "with_group_velocities", "=", "False", ",", "with_dynamical_matrices", "=", "False", ",", "nac_q_direction", "=", "None", ")", ":", "if", "self", ".", "_dynamical_ma...
38.369565
14.152174
def cmd_list(self): """List migrations.""" from peewee_migrate.router import Router, LOGGER LOGGER.setLevel('DEBUG') LOGGER.propagate = 0 router = Router(self.database, migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'], migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE']) LOGGER.info('Migrations are done:') LOGGER.info('\n'.join(router.done)) LOGGER.info('') LOGGER.info('Migrations are undone:') LOGGER.info('\n'.join(router.diff))
[ "def", "cmd_list", "(", "self", ")", ":", "from", "peewee_migrate", ".", "router", "import", "Router", ",", "LOGGER", "LOGGER", ".", "setLevel", "(", "'DEBUG'", ")", "LOGGER", ".", "propagate", "=", "0", "router", "=", "Router", "(", "self", ".", "databa...
34.5
17.625
def readDataAndReshuffle(args, categoriesInOrderOfInterest=None): """ Read data file specified in args, optionally reshuffle categories, print out some statistics, and return various data structures. This routine is pretty specific and only used in some simple test scripts. categoriesInOrderOfInterest (list) Optional list of integers representing the priority order of various categories. The categories in the original data file will be reshuffled to the order in this array, up to args.numLabels, if specified. Returns the tuple: (dataset, labelRefs, documentCategoryMap, documentTextMap) Return format: dataset = [ ["fox eats carrots", [0], docId], ["fox eats peppers", [0], docId], ["carrots are healthy", [1], docId], ["peppers is healthy", [1], docId], ] labelRefs = [Category0Name, Category1Name, ...] documentCategoryMap = { docId: [categoryIndex0, categoryIndex1, ...], docId: [categoryIndex0, categoryIndex1, ...], : } documentTextMap = { docId: documentText, docId: documentText, : } """ # Read data dataDict = readCSV(args.dataPath, 1) labelRefs, dataDict = mapLabelRefs(dataDict) if "numLabels" in args: numLabels = args.numLabels else: numLabels = len(labelRefs) if categoriesInOrderOfInterest is None: categoriesInOrderOfInterest = range(0,numLabels) else: categoriesInOrderOfInterest=categoriesInOrderOfInterest[0:numLabels] # Select data based on categories of interest. Shift category indices down # so we go from 0 to numLabels-1 dataSet = [] documentTextMap = {} counts = numpy.zeros(len(labelRefs)) for document in dataDict.itervalues(): try: docId = int(document[2]) except: raise RuntimeError("docId "+str(docId)+" is not an integer") oldCategoryIndex = document[1][0] documentTextMap[docId] = document[0] if oldCategoryIndex in categoriesInOrderOfInterest: newIndex = categoriesInOrderOfInterest.index(oldCategoryIndex) dataSet.append([document[0], [newIndex], docId]) counts[newIndex] += 1 # For each document, figure out which categories it belongs to # Include the shifted category index documentCategoryMap = {} for doc in dataDict.iteritems(): docId = int(doc[1][2]) oldCategoryIndex = doc[1][1][0] if oldCategoryIndex in categoriesInOrderOfInterest: newIndex = categoriesInOrderOfInterest.index(oldCategoryIndex) v = documentCategoryMap.get(docId, []) v.append(newIndex) documentCategoryMap[docId] = v labelRefs = [labelRefs[i] for i in categoriesInOrderOfInterest] print "Total number of unique documents",len(documentCategoryMap) print "Category counts: ",counts print "Categories in training/test data:", labelRefs return dataSet, labelRefs, documentCategoryMap, documentTextMap
[ "def", "readDataAndReshuffle", "(", "args", ",", "categoriesInOrderOfInterest", "=", "None", ")", ":", "# Read data", "dataDict", "=", "readCSV", "(", "args", ".", "dataPath", ",", "1", ")", "labelRefs", ",", "dataDict", "=", "mapLabelRefs", "(", "dataDict", "...
34.413793
20.91954
def update(self): """ Draw the star. """ if not self._screen.is_visible(self._x, self._y): self._respawn() cur_char, _, _, _ = self._screen.get_from(self._x, self._y) if cur_char not in (ord(self._old_char), 32): self._respawn() self._cycle += 1 if self._cycle >= len(self._star_chars): self._cycle = 0 new_char = self._star_chars[self._cycle] if new_char == self._old_char: return self._screen.print_at(new_char, self._x, self._y) self._old_char = new_char
[ "def", "update", "(", "self", ")", ":", "if", "not", "self", ".", "_screen", ".", "is_visible", "(", "self", ".", "_x", ",", "self", ".", "_y", ")", ":", "self", ".", "_respawn", "(", ")", "cur_char", ",", "_", ",", "_", ",", "_", "=", "self", ...
27.857143
17.952381
def fast_forward_selection(scenarios, number_of_reduced_scenarios, probability=None): """Fast forward selection algorithm Parameters ---------- scenarios : numpy.array Contain the input scenarios. The columns representing the individual scenarios The rows are the vector of values in each scenario number_of_reduced_scenarios : int final number of scenarios that the reduced scenarios contain. If number of scenarios is equal to or greater than the input scenarios, then the original input scenario set is returned as the reduced set probability : numpy.array (default=None) probability is a numpy.array with length equal to number of scenarios. if probability is not defined, all scenarios get equal probabilities Returns ------- reduced_scenarios : numpy.array reduced set of scenarios reduced_probability : numpy.array probability of reduced set of scenarios reduced_scenario_set : list scenario numbers of reduced set of scenarios Example ------- Scenario reduction can be performed as shown below:: >>> import numpy as np >>> import random >>> scenarios = np.array([[random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)]]) >>> import psst.scenario >>> reduced_scenarios, reduced_probability, reduced_scenario_numbers = psst.scenario.fast_forward_selection(scenarios, probability, 2) """ print("Running fast forward selection algorithm") number_of_scenarios = scenarios.shape[1] logger.debug("Input number of scenarios = %d", number_of_scenarios) # if probability is not defined assign equal probability to all scenarios if probability is None: probability = np.array([1/number_of_scenarios for i in range(0, number_of_scenarios)]) # initialize z, c and J z = np.array([np.inf for i in range(0, number_of_scenarios)]) c = np.zeros((number_of_scenarios, number_of_scenarios)) J = range(0, number_of_scenarios) # no reduction necessary if number_of_reduced_scenarios >= number_of_scenarios: return(scenarios, probability, J) for scenario_k in range(0, number_of_scenarios): for scenario_u in range(0, number_of_scenarios): c[scenario_k, scenario_u] = distance(scenarios[:, scenario_k], scenarios[:, scenario_u]) for scenario_u in range(0, number_of_scenarios): summation = 0 for scenario_k in range(0, number_of_scenarios): if scenario_k != scenario_u: summation = summation + probability[scenario_k]*c[scenario_k, scenario_u] z[scenario_u] = summation U = [np.argmin(z)] for u in U: J.remove(u) for _ in range(0, number_of_scenarios - number_of_reduced_scenarios - 1): print("Running {}".format(_)) for scenario_u in J: for scenario_k in J: lowest_value = np.inf for scenario_number in U: lowest_value = min(c[scenario_k, scenario_u], c[scenario_k, scenario_number]) c[scenario_k, scenario_u] = lowest_value for scenario_u in J: summation = 0 for scenario_k in J: if scenario_k not in U: summation = summation + probability[scenario_k]*c[scenario_k, scenario_u] z[scenario_u] = summation u_i = np.argmin([item if i in J else np.inf for i, item in enumerate(z)]) J.remove(u_i) U.append(u_i) reduced_scenario_set = U reduced_probability = [] reduced_probability = copy.deepcopy(probability) for deleted_scenario_number in J: lowest_value = np.inf # find closest scenario_number for scenario_j in reduced_scenario_set: if c[deleted_scenario_number, scenario_j] < lowest_value: closest_scenario_number = scenario_j lowest_value = c[deleted_scenario_number, scenario_j] reduced_probability[closest_scenario_number] = reduced_probability[closest_scenario_number] + reduced_probability[deleted_scenario_number] reduced_scenarios = copy.deepcopy(scenarios[:, reduced_scenario_set]) reduced_probability = reduced_probability[reduced_scenario_set] return reduced_scenarios, reduced_probability, reduced_scenario_set
[ "def", "fast_forward_selection", "(", "scenarios", ",", "number_of_reduced_scenarios", ",", "probability", "=", "None", ")", ":", "print", "(", "\"Running fast forward selection algorithm\"", ")", "number_of_scenarios", "=", "scenarios", ".", "shape", "[", "1", "]", "...
38.433071
25.220472
def remember_order(self): """Verify that subsequent :func:`fudge.Fake.expects` are called in the right order. For example:: >>> import fudge >>> db = fudge.Fake('db').remember_order().expects('insert').expects('update') >>> db.update() Traceback (most recent call last): ... AssertionError: Call #1 was fake:db.update(); Expected: #1 fake:db.insert(), #2 fake:db.update(), end >>> fudge.clear_expectations() When declaring multiple calls using :func:`fudge.Fake.next_call`, each subsequent call will be added to the expected order of calls :: >>> import fudge >>> sess = fudge.Fake("session").remember_order().expects("get_id").returns(1) >>> sess = sess.expects("set_id").with_args(5) >>> sess = sess.next_call(for_method="get_id").returns(5) Multiple calls to ``get_id()`` are now expected :: >>> sess.get_id() 1 >>> sess.set_id(5) >>> sess.get_id() 5 >>> fudge.verify() >>> fudge.clear_expectations() """ if self._callable: raise FakeDeclarationError( "remember_order() cannot be used for Fake(callable=True) or Fake(expect_call=True)") self._expected_call_order = ExpectedCallOrder(self) registry.remember_expected_call_order(self._expected_call_order) return self
[ "def", "remember_order", "(", "self", ")", ":", "if", "self", ".", "_callable", ":", "raise", "FakeDeclarationError", "(", "\"remember_order() cannot be used for Fake(callable=True) or Fake(expect_call=True)\"", ")", "self", ".", "_expected_call_order", "=", "ExpectedCallOrde...
38.578947
24.736842
def build_recursive_gcs_delocalize_env(source, outputs): """Return a multi-line string with export statements for the variables. Arguments: source: Folder with the data. For example /mnt/data outputs: a list of OutputFileParam Returns: a multi-line string with a shell script that sets environment variables corresponding to the outputs. """ filtered_outs = [ var for var in outputs if var.recursive and var.file_provider == job_model.P_GCS ] return '\n'.join([ 'export {0}={1}/{2}'.format(var.name, source.rstrip('/'), var.docker_path.rstrip('/')) for var in filtered_outs ])
[ "def", "build_recursive_gcs_delocalize_env", "(", "source", ",", "outputs", ")", ":", "filtered_outs", "=", "[", "var", "for", "var", "in", "outputs", "if", "var", ".", "recursive", "and", "var", ".", "file_provider", "==", "job_model", ".", "P_GCS", "]", "r...
31.5
18.590909
def shutdown(self, force=False): """ Stop executing any further jobs. If the force argument is True, the function does not wait until any queued jobs are completed but stops immediately. After emptying the queue it is restarted, so you may still call run() after using this method. :type force: bool :param force: Whether to wait until all jobs were processed. """ if not force: self.join() self._dbg(2, 'Shutting down queue...') self.workqueue.shutdown(True) self._dbg(2, 'Queue shut down.') self._del_status_bar()
[ "def", "shutdown", "(", "self", ",", "force", "=", "False", ")", ":", "if", "not", "force", ":", "self", ".", "join", "(", ")", "self", ".", "_dbg", "(", "2", ",", "'Shutting down queue...'", ")", "self", ".", "workqueue", ".", "shutdown", "(", "True...
32.894737
18.368421
def from_transitions(cls, initial_state, accepting_states, transition_function): # type: (State, Set[State], NondeterministicTransitionFunction) -> NFA """ Initialize a DFA without explicitly specifying the set of states and the alphabet. :param initial_state: the initial state. :param accepting_states: the accepting state. :param transition_function: the (nondeterministic) transition function. :return: the NFA. """ states, alphabet = _extract_states_from_nondeterministic_transition_function(transition_function) return NFA(states, alphabet, initial_state, accepting_states, transition_function)
[ "def", "from_transitions", "(", "cls", ",", "initial_state", ",", "accepting_states", ",", "transition_function", ")", ":", "# type: (State, Set[State], NondeterministicTransitionFunction) -> NFA", "states", ",", "alphabet", "=", "_extract_states_from_nondeterministic_transition_fu...
51.538462
30.615385
def Rx(rads: Union[float, sympy.Basic]) -> XPowGate: """Returns a gate with the matrix e^{-i X rads / 2}.""" pi = sympy.pi if protocols.is_parameterized(rads) else np.pi return XPowGate(exponent=rads / pi, global_shift=-0.5)
[ "def", "Rx", "(", "rads", ":", "Union", "[", "float", ",", "sympy", ".", "Basic", "]", ")", "->", "XPowGate", ":", "pi", "=", "sympy", ".", "pi", "if", "protocols", ".", "is_parameterized", "(", "rads", ")", "else", "np", ".", "pi", "return", "XPow...
58.25
13.5
def _notify_create_process(self, event): """ Notify the creation of a new process. This is done automatically by the L{Debug} class, you shouldn't need to call it yourself. @type event: L{CreateProcessEvent} @param event: Create process event. @rtype: bool @return: C{True} to call the user-defined handle, C{False} otherwise. """ # Do not use super() here. bCallHandler = _ThreadContainer._notify_create_process(self, event) bCallHandler = bCallHandler and \ _ModuleContainer._notify_create_process(self, event) return bCallHandler
[ "def", "_notify_create_process", "(", "self", ",", "event", ")", ":", "# Do not use super() here.", "bCallHandler", "=", "_ThreadContainer", ".", "_notify_create_process", "(", "self", ",", "event", ")", "bCallHandler", "=", "bCallHandler", "and", "_ModuleContainer", ...
36.166667
18.277778
def _write(self, fp): """Write an .ini-format representation of the configuration state in git compatible format""" def write_section(name, section_dict): fp.write(("[%s]\n" % name).encode(defenc)) for (key, value) in section_dict.items(): if key != "__name__": fp.write(("\t%s = %s\n" % (key, self._value_to_string(value).replace('\n', '\n\t'))).encode(defenc)) # END if key is not __name__ # END section writing if self._defaults: write_section(cp.DEFAULTSECT, self._defaults) for name, value in self._sections.items(): write_section(name, value)
[ "def", "_write", "(", "self", ",", "fp", ")", ":", "def", "write_section", "(", "name", ",", "section_dict", ")", ":", "fp", ".", "write", "(", "(", "\"[%s]\\n\"", "%", "name", ")", ".", "encode", "(", "defenc", ")", ")", "for", "(", "key", ",", ...
45.533333
15.533333
def ratechangebase(self, ratefactor, current_base, new_base): """ Local helper function for changing currency base, returns new rate in new base Defaults to ROUND_HALF_EVEN """ if self._multiplier is None: self.log(logging.WARNING, "CurrencyHandler: changing base ourselves") # Check the current base is 1 if Decimal(1) != self.get_ratefactor(current_base, current_base): raise RuntimeError("CurrencyHandler: current baserate: %s not 1" % current_base) self._multiplier = Decimal(1) / self.get_ratefactor(current_base, new_base) return (ratefactor * self._multiplier).quantize(Decimal(".0001"))
[ "def", "ratechangebase", "(", "self", ",", "ratefactor", ",", "current_base", ",", "new_base", ")", ":", "if", "self", ".", "_multiplier", "is", "None", ":", "self", ".", "log", "(", "logging", ".", "WARNING", ",", "\"CurrencyHandler: changing base ourselves\"",...
57.916667
24.25
def add_value_check(self, field_name, value_check, code=VALUE_CHECK_FAILED, message=MESSAGES[VALUE_CHECK_FAILED], modulus=1): """ Add a value check function for the specified field. Arguments --------- `field_name` - the name of the field to attach the value check function to `value_check` - a function that accepts a single argument (a value) and raises a `ValueError` if the value is not valid `code` - problem code to report if a value is not valid, defaults to `VALUE_CHECK_FAILED` `message` - problem message to report if a value is not valid `modulus` - apply the check to every nth record, defaults to 1 (check every record) """ # guard conditions assert field_name in self._field_names, 'unexpected field name: %s' % field_name assert callable(value_check), 'value check must be a callable function' t = field_name, value_check, code, message, modulus self._value_checks.append(t)
[ "def", "add_value_check", "(", "self", ",", "field_name", ",", "value_check", ",", "code", "=", "VALUE_CHECK_FAILED", ",", "message", "=", "MESSAGES", "[", "VALUE_CHECK_FAILED", "]", ",", "modulus", "=", "1", ")", ":", "# guard conditions", "assert", "field_name...
34.09375
26.53125
def find_all_pistacking_pairs(self): """Main analysis function. Analyses each frame in the trajectory in search for pi-pi interactions between previously defined rings on protein residues and ligand molecule. """ data = namedtuple("pistacking","frame time proteinring ligandring distance angle offset type resname resid segid") i=0 if self.trajectory==[]: self.trajectory = [self.topology_data.universe.filename] self.start_frame_num=[None] self.end_frame_num = [None] self.skip =[None] for traj in self.trajectory: self.timeseries=[] self.timesteps=[frame.time for frame in self.topology_data.universe.trajectory[self.start_frame_num[i]:self.end_frame_num[i]:self.skip[i]]] start = timer() self.topology_data.load_trajectory(traj) for prot_ring in self.protein_rings: for ring in self.ligrings: l = self.get_ligand_ring_selection(ring) p = self.protein_rings[prot_ring] for frame in self.topology_data.universe.trajectory[self.start_frame_num[i]:self.end_frame_num[i]:self.skip[i]]: lig_norm_vec = math.prepare_normal_vectors(l) protein_norm_vec = math.prepare_normal_vectors(p) dist = math.euclidean3d(l.center_of_geometry(),p.center_of_geometry()) a = math.vecangle(lig_norm_vec,protein_norm_vec) angle = min(a, 180 - a if not 180 - a < 0 else a) #Measure offset proj1 = math.projection(lig_norm_vec,l.center_of_geometry(),p.center_of_geometry()) proj2 = math.projection(protein_norm_vec,p.center_of_geometry(),l.center_of_geometry()) offset = min(math.euclidean3d(proj1,l.center_of_geometry()), math.euclidean3d(proj2,p.center_of_geometry())) if dist < self.max_distance: if 0 < angle < self.max_angle_dev and offset < self.max_offset: contacts = data(frame=frame.frame, time=frame.time, proteinring=tuple([a.id for a in p]), ligandring=tuple([a.id for a in l]), distance=dist, angle=angle, offset=offset, type="P",resname=self.protein_rings[prot_ring].residues.resnames[0], resid=self.protein_rings[prot_ring].residues.resids[0], segid=self.protein_rings[prot_ring].residues.segids[0]) self.timeseries.append(contacts) if 90 - self.max_angle_dev < angle < 90 + self.max_angle_dev and offset < self.max_offset: contacts = data(frame=frame.frame, time=frame.time, proteinring=tuple([a.id for a in p]), ligandring=tuple([a.id for a in l]), distance=dist, angle=angle, offset=offset, type="T",resname=self.protein_rings[prot_ring].residues.resnames[0], resid=self.protein_rings[prot_ring].residues.resids[0], segid=self.protein_rings[prot_ring].residues.segids[0]) self.timeseries.append(contacts) self.pistacking[i] = self.make_table() self.pistacking_by_time[i] = self.count_by_time() self.pistacking_by_type[i] = self.count_by_type() self.write_output_files(i) i+=1 end = timer() print "Pi-Stacking:"+str(end-start)
[ "def", "find_all_pistacking_pairs", "(", "self", ")", ":", "data", "=", "namedtuple", "(", "\"pistacking\"", ",", "\"frame time proteinring ligandring distance angle offset type resname resid segid\"", ")", "i", "=", "0", "if", "self", ".", "trajectory", "==", "[", "]",...
71.019231
41.480769
def format_name(self, format_name): """Set the default format name. :param str format_name: The display format name. :raises ValueError: if the format is not recognized. """ if format_name in self.supported_formats: self._format_name = format_name else: raise ValueError('unrecognized format_name "{}"'.format( format_name))
[ "def", "format_name", "(", "self", ",", "format_name", ")", ":", "if", "format_name", "in", "self", ".", "supported_formats", ":", "self", ".", "_format_name", "=", "format_name", "else", ":", "raise", "ValueError", "(", "'unrecognized format_name \"{}\"'", ".", ...
33.583333
16.583333
def generate_sub_codons_right(codons_dict): """Generate the sub_codons_right dictionary of codon suffixes. Parameters ---------- codons_dict : dict Dictionary, keyed by the allowed 'amino acid' symbols with the values being lists of codons corresponding to the symbol. Returns ------- sub_codons_right : dict Dictionary of the 1 and 2 nucleotide suffixes (read from 5') for each codon in an 'amino acid' grouping. """ sub_codons_right = {} for aa in codons_dict.keys(): sub_codons_right[aa] = list(set([x[-1] for x in codons_dict[aa]] + [x[-2:] for x in codons_dict[aa]])) return sub_codons_right
[ "def", "generate_sub_codons_right", "(", "codons_dict", ")", ":", "sub_codons_right", "=", "{", "}", "for", "aa", "in", "codons_dict", ".", "keys", "(", ")", ":", "sub_codons_right", "[", "aa", "]", "=", "list", "(", "set", "(", "[", "x", "[", "-", "1"...
32.47619
23.285714
def BSF(cpu, dest, src): """ Bit scan forward. Searches the source operand (second operand) for the least significant set bit (1 bit). If a least significant 1 bit is found, its bit index is stored in the destination operand (first operand). The source operand can be a register or a memory location; the destination operand is a register. The bit index is an unsigned offset from bit 0 of the source operand. If the contents source operand are 0, the contents of the destination operand is undefined:: IF SRC = 0 THEN ZF = 1; DEST is undefined; ELSE ZF = 0; temp = 0; WHILE Bit(SRC, temp) = 0 DO temp = temp + 1; DEST = temp; OD; FI; :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ value = src.read() flag = Operators.EXTRACT(value, 0, 1) == 1 res = 0 for pos in range(1, src.size): res = Operators.ITEBV(dest.size, flag, res, pos) flag = Operators.OR(flag, Operators.EXTRACT(value, pos, 1) == 1) cpu.ZF = value == 0 dest.write(Operators.ITEBV(dest.size, cpu.ZF, dest.read(), res))
[ "def", "BSF", "(", "cpu", ",", "dest", ",", "src", ")", ":", "value", "=", "src", ".", "read", "(", ")", "flag", "=", "Operators", ".", "EXTRACT", "(", "value", ",", "0", ",", "1", ")", "==", "1", "res", "=", "0", "for", "pos", "in", "range",...
38.076923
18.589744
def set_label(self, value,callb=None): """Convenience method to set the label of the device This method will send a SetLabel message to the device, and request callb be executed when an ACK is received. The default callback will simply cache the value. :param value: The new label :type value: str :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :returns: None :rtype: None """ if len(value) > 32: value = value[:32] mypartial=partial(self.resp_set_label,label=value) if callb: self.req_with_ack(SetLabel, {"label": value},lambda x,y:(mypartial(y),callb(x,y)) ) else: self.req_with_ack(SetLabel, {"label": value},lambda x,y:mypartial(y) )
[ "def", "set_label", "(", "self", ",", "value", ",", "callb", "=", "None", ")", ":", "if", "len", "(", "value", ")", ">", "32", ":", "value", "=", "value", "[", ":", "32", "]", "mypartial", "=", "partial", "(", "self", ".", "resp_set_label", ",", ...
42.904762
22.857143
def matches(self, address, name=None): """Check if this slot identifier matches the given tile. Matching can happen either by address or by module name (not currently implemented). Returns: bool: True if there is a match, otherwise False. """ if self.controller: return address == 8 return self.address == address
[ "def", "matches", "(", "self", ",", "address", ",", "name", "=", "None", ")", ":", "if", "self", ".", "controller", ":", "return", "address", "==", "8", "return", "self", ".", "address", "==", "address" ]
29
21.692308
def check_config(config): ''' Check the executor config file for consistency. ''' # Check server URL url = config.get("Server", "url") try: urlopen(url) except Exception as e: logger.error( "The configured OpenSubmit server URL ({0}) seems to be invalid: {1}".format(url, e)) return False # Check directory specification targetdir = config.get("Execution", "directory") if platform.system() is not "Windows" and not targetdir.startswith("/"): logger.error( "Please use absolute paths, starting with a /, in your Execution-directory setting.") return False if not targetdir.endswith(os.sep): logger.error( "Your Execution-directory setting must end with a " + os.sep) return False return True
[ "def", "check_config", "(", "config", ")", ":", "# Check server URL", "url", "=", "config", ".", "get", "(", "\"Server\"", ",", "\"url\"", ")", "try", ":", "urlopen", "(", "url", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", ...
35.347826
22.913043
def _hash(self): """Return a hash for the current query. This hash is _not_ a unique representation of the dataset! """ dump = dumps(self.query, sort_keys=True) if isinstance(dump, str): dump = dump.encode('utf-8') return md5(dump).hexdigest()
[ "def", "_hash", "(", "self", ")", ":", "dump", "=", "dumps", "(", "self", ".", "query", ",", "sort_keys", "=", "True", ")", "if", "isinstance", "(", "dump", ",", "str", ")", ":", "dump", "=", "dump", ".", "encode", "(", "'utf-8'", ")", "return", ...
32.888889
12.222222
def read(self, filename=None, read_detection_catalog=True): """ Read a Party from a file. :type filename: str :param filename: File to read from - can be a list of files, and can contain wildcards. :type read_detection_catalog: bool :param read_detection_catalog: Whether to read the detection catalog or not, if False, catalog will be regenerated - for large catalogs this can be faster. .. rubric:: Example >>> Party().read() Party of 4 Families. """ tribe = Tribe() families = [] if filename is None: # If there is no filename given, then read the example. filename = os.path.join(os.path.dirname(__file__), '..', 'tests', 'test_data', 'test_party.tgz') if isinstance(filename, list): filenames = [] for _filename in filename: # Expand wildcards filenames.extend(glob.glob(_filename)) else: # Expand wildcards filenames = glob.glob(filename) for _filename in filenames: with tarfile.open(_filename, "r:*") as arc: temp_dir = tempfile.mkdtemp() arc.extractall(path=temp_dir, members=_safemembers(arc)) # Read in the detections first, this way, if we read from multiple # files then we can just read in extra templates as needed. # Read in families here! party_dir = glob.glob(temp_dir + os.sep + '*')[0] tribe._read_from_folder(dirname=party_dir) det_cat_file = glob.glob(os.path.join(party_dir, "catalog.*")) if len(det_cat_file) != 0 and read_detection_catalog: try: all_cat = read_events(det_cat_file[0]) except TypeError as e: print(e) pass else: all_cat = Catalog() for family_file in glob.glob(join(party_dir, '*_detections.csv')): template = [ t for t in tribe if _templates_match(t, family_file)] family = Family(template=template[0] or Template()) new_family = True if family.template.name in [f.template.name for f in families]: family = [ f for f in families if f.template.name == family.template.name][0] new_family = False family.detections = _read_family( fname=family_file, all_cat=all_cat, template=template[0]) if new_family: families.append(family) shutil.rmtree(temp_dir) self.families = families return self
[ "def", "read", "(", "self", ",", "filename", "=", "None", ",", "read_detection_catalog", "=", "True", ")", ":", "tribe", "=", "Tribe", "(", ")", "families", "=", "[", "]", "if", "filename", "is", "None", ":", "# If there is no filename given, then read the exa...
41.897059
17.044118
def remove(mode_id: str) -> bool: """ Removes the specified mode identifier from the active modes and returns whether or not a remove operation was carried out. If the mode identifier is not in the currently active modes, it does need to be removed. """ had_mode = has(mode_id) if had_mode: _current_modes.remove(mode_id) return had_mode
[ "def", "remove", "(", "mode_id", ":", "str", ")", "->", "bool", ":", "had_mode", "=", "has", "(", "mode_id", ")", "if", "had_mode", ":", "_current_modes", ".", "remove", "(", "mode_id", ")", "return", "had_mode" ]
28.307692
22.153846
def make_anchor(file_path: pathlib.Path, offset: int, width: int, context_width: int, metadata, encoding: str = 'utf-8', handle=None): """Construct a new `Anchor`. Args: file_path: The absolute path to the target file for the anchor. offset: The offset of the anchored text in codepoints in `file_path`'s contents. width: The width in codepoints of the anchored text. context_width: The width in codepoints of context on either side of the anchor. metadata: The metadata to attach to the anchor. Must be json-serializeable. encoding: The encoding of the contents of `file_path`. handle: If not `None`, this is a file-like object the contents of which are used to calculate the context of the anchor. If `None`, then the file indicated by `file_path` is opened instead. Raises: ValueError: `width` characters can't be read at `offset`. ValueError: `file_path` is not absolute. """ @contextmanager def get_handle(): if handle is None: with file_path.open(mode='rt', encoding=encoding) as fp: yield fp else: yield handle with get_handle() as fp: context = _make_context(fp, offset, width, context_width) return Anchor( file_path=file_path, encoding=encoding, context=context, metadata=metadata)
[ "def", "make_anchor", "(", "file_path", ":", "pathlib", ".", "Path", ",", "offset", ":", "int", ",", "width", ":", "int", ",", "context_width", ":", "int", ",", "metadata", ",", "encoding", ":", "str", "=", "'utf-8'", ",", "handle", "=", "None", ")", ...
34.045455
22.204545
def apply_boundary_conditions(self, **kwargs): """Applies any boundary conditions to the given values (e.g., applying cyclic conditions, and/or reflecting values off of boundaries). This is done by running `apply_conditions` of each bounds in self on the corresponding value. See `boundaries.Bounds.apply_conditions` for details. Parameters ---------- \**kwargs : The keyword args should be the name of a parameter and value to apply its boundary conditions to. The arguments need not include all of the parameters in self. Any unrecognized arguments are ignored. Returns ------- dict A dictionary of the parameter names and the conditioned values. """ return dict([[p, self._bounds[p].apply_conditions(val)] for p,val in kwargs.items() if p in self._bounds])
[ "def", "apply_boundary_conditions", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "dict", "(", "[", "[", "p", ",", "self", ".", "_bounds", "[", "p", "]", ".", "apply_conditions", "(", "val", ")", "]", "for", "p", ",", "val", "in", "kwar...
42.045455
25.909091
def tlog(x, th=1, r=_display_max, d=_l_mmax): """ Truncated log10 transform. Parameters ---------- x : num | num iterable values to be transformed. th : num values below th are transormed to 0. Must be positive. r : num (default = 10**4) maximal transformed value. d : num (default = log10(2**18)) log10 of maximal possible measured value. tlog(10**d) = r Returns ------- Array of transformed values. """ if th <= 0: raise ValueError('Threshold value must be positive. %s given.' % th) return where(x <= th, log10(th) * 1. * r / d, log10(x) * 1. * r / d)
[ "def", "tlog", "(", "x", ",", "th", "=", "1", ",", "r", "=", "_display_max", ",", "d", "=", "_l_mmax", ")", ":", "if", "th", "<=", "0", ":", "raise", "ValueError", "(", "'Threshold value must be positive. %s given.'", "%", "th", ")", "return", "where", ...
26.875
17.541667
def GreaterThan(self, value): """Sets the type of the WHERE clause as "greater than". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to. """ self._awql = self._CreateSingleValueCondition(value, '>') return self._query_builder
[ "def", "GreaterThan", "(", "self", ",", "value", ")", ":", "self", ".", "_awql", "=", "self", ".", "_CreateSingleValueCondition", "(", "value", ",", "'>'", ")", "return", "self", ".", "_query_builder" ]
29.181818
19.545455
def ipv6_prefix_to_mask(prefix): """ ipv6 cidr prefix to net mask :param prefix: cidr prefix, rang in (0, 128) :type prefix: int :return: comma separated ipv6 net mask code, eg: ffff:ffff:ffff:ffff:0000:0000:0000:0000 :rtype: str """ if prefix > 128 or prefix < 0: raise ValueError("invalid cidr prefix for ipv6") else: mask = ((1 << 128) - 1) ^ ((1 << (128 - prefix)) - 1) f = 15 # 0xf or 0b1111 hex_mask_str = '' for i in range(0, 32): hex_mask_str = format((mask & f), 'x') + hex_mask_str mask = mask >> 4 if i != 31 and i & 3 == 3: hex_mask_str = ':' + hex_mask_str return hex_mask_str
[ "def", "ipv6_prefix_to_mask", "(", "prefix", ")", ":", "if", "prefix", ">", "128", "or", "prefix", "<", "0", ":", "raise", "ValueError", "(", "\"invalid cidr prefix for ipv6\"", ")", "else", ":", "mask", "=", "(", "(", "1", "<<", "128", ")", "-", "1", ...
32.681818
13.681818
def extract(input, output): """Extract public key from private key. Given INPUT a private paillier key file as generated by generate, extract the public key portion to OUTPUT. Use "-" to output to stdout. """ log("Loading paillier keypair") priv = json.load(input) error_msg = "Invalid private key" assert 'pub' in priv, error_msg assert priv['kty'] == 'DAJ', error_msg json.dump(priv['pub'], output) output.write('\n') log("Public key written to {}".format(output.name))
[ "def", "extract", "(", "input", ",", "output", ")", ":", "log", "(", "\"Loading paillier keypair\"", ")", "priv", "=", "json", ".", "load", "(", "input", ")", "error_msg", "=", "\"Invalid private key\"", "assert", "'pub'", "in", "priv", ",", "error_msg", "as...
31.875
13.5
def send_note(self, to, subject="", body="", noetid=""): """Send a note :param to: The username(s) that this note is to :param subject: The subject of the note :param body: The body of the note :param noetid: The UUID of the note that is being responded to """ if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req('/notes/send', post_data={ 'to[]' : to, 'subject' : subject, 'body' : body, 'noetid' : noetid }) sent_notes = [] for item in response['results']: n = {} n['success'] = item['success'] n['user'] = User() n['user'].from_dict(item['user']) sent_notes.append(n) return sent_notes
[ "def", "send_note", "(", "self", ",", "to", ",", "subject", "=", "\"\"", ",", "body", "=", "\"\"", ",", "noetid", "=", "\"\"", ")", ":", "if", "self", ".", "standard_grant_type", "is", "not", "\"authorization_code\"", ":", "raise", "DeviantartError", "(", ...
30.258065
22.16129
def import_gtfs(gtfs_sources, output, preserve_connection=False, print_progress=True, location_name=None, **kwargs): """Import a GTFS database gtfs_sources: str, dict, list Paths to the gtfs zip file or to the directory containing the GTFS data. Alternatively, a dict can be provide that maps gtfs filenames (like 'stops.txt' and 'agencies.txt') to their string presentations. output: str or sqlite3.Connection path to the new database to be created, or an existing sqlite3 connection preserve_connection: bool, optional Whether to close the connection in the end, or not. print_progress: bool, optional Whether to print progress output location_name: str, optional set the location of this database """ if isinstance(output, sqlite3.Connection): conn = output else: # if os.path.isfile(output): # raise RuntimeError('File already exists') conn = sqlite3.connect(output) if not isinstance(gtfs_sources, list): gtfs_sources = [gtfs_sources] cur = conn.cursor() time_import_start = time.time() # These are a bit unsafe, but make importing much faster, # especially on scratch. cur.execute('PRAGMA page_size = 4096;') cur.execute('PRAGMA mmap_size = 1073741824;') cur.execute('PRAGMA cache_size = -2000000;') cur.execute('PRAGMA temp_store=2;') # Changes of isolation level are python3.6 workarounds - # eventually will probably be fixed and this can be removed. conn.isolation_level = None # change to autocommit mode (former default) cur.execute('PRAGMA journal_mode = OFF;') #cur.execute('PRAGMA journal_mode = WAL;') cur.execute('PRAGMA synchronous = OFF;') conn.isolation_level = '' # change back to python default. # end python3.6 workaround # Do the actual importing. loaders = [L(gtfssource=gtfs_sources, print_progress=print_progress, **kwargs) for L in Loaders] for loader in loaders: loader.assert_exists_if_required() # Do initial import. This consists of making tables, raw insert # of the CSVs, and then indexing. for loader in loaders: loader.import_(conn) # Do any operations that require all tables present. for Loader in loaders: Loader.post_import_round2(conn) # Make any views for Loader in loaders: Loader.make_views(conn) # Make any views for F in postprocessors: F(conn) # Set up same basic metadata. from gtfspy import gtfs as mod_gtfs G = mod_gtfs.GTFS(output) G.meta['gen_time_ut'] = time.time() G.meta['gen_time'] = time.ctime() G.meta['import_seconds'] = time.time() - time_import_start G.meta['download_date'] = '' G.meta['location_name'] = '' G.meta['n_gtfs_sources'] = len(gtfs_sources) # Extract things from GTFS download_date_strs = [] for i, source in enumerate(gtfs_sources): if len(gtfs_sources) == 1: prefix = "" else: prefix = "feed_" + str(i) + "_" if isinstance(source, string_types): G.meta[prefix + 'original_gtfs'] = decode_six(source) if source else None # Extract GTFS date. Last date pattern in filename. filename_date_list = re.findall(r'\d{4}-\d{2}-\d{2}', source) if filename_date_list: date_str = filename_date_list[-1] G.meta[prefix + 'download_date'] = date_str download_date_strs.append(date_str) if location_name: G.meta['location_name'] = location_name else: location_name_list = re.findall(r'/([^/]+)/\d{4}-\d{2}-\d{2}', source) if location_name_list: G.meta[prefix + 'location_name'] = location_name_list[-1] else: try: G.meta[prefix + 'location_name'] = source.split("/")[-4] except: G.meta[prefix + 'location_name'] = source if G.meta['download_date'] == "": unique_download_dates = list(set(download_date_strs)) if len(unique_download_dates) == 1: G.meta['download_date'] = unique_download_dates[0] G.meta['timezone'] = cur.execute('SELECT timezone FROM agencies LIMIT 1').fetchone()[0] stats.update_stats(G) del G if print_progress: print("Vacuuming...") # Next 3 lines are python 3.6 work-arounds again. conn.isolation_level = None # former default of autocommit mode cur.execute('VACUUM;') conn.isolation_level = '' # back to python default # end python3.6 workaround if print_progress: print("Analyzing...") cur.execute('ANALYZE') if not (preserve_connection is True): conn.close()
[ "def", "import_gtfs", "(", "gtfs_sources", ",", "output", ",", "preserve_connection", "=", "False", ",", "print_progress", "=", "True", ",", "location_name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "output", ",", "sqlite3", ...
37.370079
17.84252
def _cb_inform_sensor_status(self, msg): """Update received for an sensor.""" timestamp = msg.arguments[0] num_sensors = int(msg.arguments[1]) assert len(msg.arguments) == 2 + num_sensors * 3 for n in xrange(num_sensors): name = msg.arguments[2 + n * 3] status = msg.arguments[3 + n * 3] value = msg.arguments[4 + n * 3] self.update_sensor(name, timestamp, status, value)
[ "def", "_cb_inform_sensor_status", "(", "self", ",", "msg", ")", ":", "timestamp", "=", "msg", ".", "arguments", "[", "0", "]", "num_sensors", "=", "int", "(", "msg", ".", "arguments", "[", "1", "]", ")", "assert", "len", "(", "msg", ".", "arguments", ...
45
6
def sizeHint(self): """ Reimplemented to suggest a size that is 80 characters wide and 25 lines high. """ font_metrics = QtGui.QFontMetrics(self.font) margin = (self._control.frameWidth() + self._control.document().documentMargin()) * 2 style = self.style() splitwidth = style.pixelMetric(QtGui.QStyle.PM_SplitterWidth) # Note 1: Despite my best efforts to take the various margins into # account, the width is still coming out a bit too small, so we include # a fudge factor of one character here. # Note 2: QFontMetrics.maxWidth is not used here or anywhere else due # to a Qt bug on certain Mac OS systems where it returns 0. width = font_metrics.width(' ') * 81 + margin width += style.pixelMetric(QtGui.QStyle.PM_ScrollBarExtent) if self.paging == 'hsplit': width = width * 2 + splitwidth height = font_metrics.height() * 25 + margin if self.paging == 'vsplit': height = height * 2 + splitwidth return QtCore.QSize(width, height)
[ "def", "sizeHint", "(", "self", ")", ":", "font_metrics", "=", "QtGui", ".", "QFontMetrics", "(", "self", ".", "font", ")", "margin", "=", "(", "self", ".", "_control", ".", "frameWidth", "(", ")", "+", "self", ".", "_control", ".", "document", "(", ...
44.12
18.08
def f_inv(self, z, max_iterations=250, y=None): """ Calculate the numerical inverse of f. This should be overwritten for specific warping functions where the inverse can be found in closed form. :param max_iterations: maximum number of N.R. iterations """ z = z.copy() y = np.ones_like(z) it = 0 update = np.inf while np.abs(update).sum() > 1e-10 and it < max_iterations: fy = self.f(y) fgrady = self.fgrad_y(y) update = (fy - z) / fgrady y -= self.rate * update it += 1 #if it == max_iterations: # print("WARNING!!! Maximum number of iterations reached in f_inv ") # print("Sum of roots: %.4f" % np.sum(fy - z)) return y
[ "def", "f_inv", "(", "self", ",", "z", ",", "max_iterations", "=", "250", ",", "y", "=", "None", ")", ":", "z", "=", "z", ".", "copy", "(", ")", "y", "=", "np", ".", "ones_like", "(", "z", ")", "it", "=", "0", "update", "=", "np", ".", "inf...
32.791667
17.958333
def send_result(self, additional_dict): ''' Send a result to the RPC client :param additional_dict: the dictionary with the response ''' self.send_response(200) self.send_header("Content-type", "application/json") response = { 'jsonrpc': self.req_rpc_version, 'id': self.req_id, } response.update(additional_dict) jresponse = json.dumps(response) self.send_header("Content-length", len(jresponse)) self.end_headers() self.wfile.write(jresponse.encode())
[ "def", "send_result", "(", "self", ",", "additional_dict", ")", ":", "self", ".", "send_response", "(", "200", ")", "self", ".", "send_header", "(", "\"Content-type\"", ",", "\"application/json\"", ")", "response", "=", "{", "'jsonrpc'", ":", "self", ".", "r...
33.294118
14.941176
def write_recording(recording, save_path): ''' Save recording extractor to MEArec format. Parameters ---------- recording: RecordingExtractor Recording extractor object to be saved save_path: str .h5 or .hdf5 path ''' assert HAVE_MREX, "To use the MEArec extractors, install MEArec: \n\n pip install MEArec\n\n" save_path = Path(save_path) if save_path.is_dir(): print("The file will be saved as recording.h5 in the provided folder") save_path = save_path / 'recording.h5' if save_path.suffix == '.h5' or save_path.suffix == '.hdf5': info = {'recordings': {'fs': recording.get_sampling_frequency()}} rec_dict = {'recordings': recording.get_traces()} if 'location' in recording.get_channel_property_names(): positions = np.array([recording.get_channel_property(chan, 'location') for chan in recording.get_channel_ids()]) rec_dict['channel_positions'] = positions recgen = mr.RecordingGenerator(rec_dict=rec_dict, info=info) mr.save_recording_generator(recgen, str(save_path), verbose=False) else: raise Exception("Provide a folder or an .h5/.hdf5 as 'save_path'")
[ "def", "write_recording", "(", "recording", ",", "save_path", ")", ":", "assert", "HAVE_MREX", ",", "\"To use the MEArec extractors, install MEArec: \\n\\n pip install MEArec\\n\\n\"", "save_path", "=", "Path", "(", "save_path", ")", "if", "save_path", ".", "is_dir", "(",...
50.884615
24.423077
def exposure_notes(self): """Get the exposure specific notes defined in definitions. This method will do a lookup in definitions and return the exposure definition specific notes dictionary. This is a helper function to make it easy to get exposure specific notes from the definitions metadata. .. versionadded:: 3.5 :returns: A list like e.g. safe.definitions.exposure_land_cover[ 'notes'] :rtype: list, None """ notes = [] exposure = definition(self.exposure.keywords.get('exposure')) if 'notes' in exposure: notes += exposure['notes'] if self.exposure.keywords['layer_mode'] == 'classified': if 'classified_notes' in exposure: notes += exposure['classified_notes'] if self.exposure.keywords['layer_mode'] == 'continuous': if 'continuous_notes' in exposure: notes += exposure['continuous_notes'] return notes
[ "def", "exposure_notes", "(", "self", ")", ":", "notes", "=", "[", "]", "exposure", "=", "definition", "(", "self", ".", "exposure", ".", "keywords", ".", "get", "(", "'exposure'", ")", ")", "if", "'notes'", "in", "exposure", ":", "notes", "+=", "expos...
38.038462
19.153846
def stop_notifications(self): """Stop the notifications thread. :returns: """ with self._notifications_lock: if not self.has_active_notification_thread: return thread = self._notifications_thread self._notifications_thread = None stopping = thread.stop() api = self._get_api(mds.NotificationsApi) api.delete_long_poll_channel() return stopping.wait()
[ "def", "stop_notifications", "(", "self", ")", ":", "with", "self", ".", "_notifications_lock", ":", "if", "not", "self", ".", "has_active_notification_thread", ":", "return", "thread", "=", "self", ".", "_notifications_thread", "self", ".", "_notifications_thread",...
33.571429
10.428571
def runfile(filename, args=None, wdir=None, namespace=None, post_mortem=False): """ Run filename args: command line arguments (string) wdir: working directory post_mortem: boolean, whether to enter post-mortem mode on error """ try: filename = filename.decode('utf-8') except (UnicodeError, TypeError, AttributeError): # UnicodeError, TypeError --> eventually raised in Python 2 # AttributeError --> systematically raised in Python 3 pass if __umr__.enabled: __umr__.run() if args is not None and not isinstance(args, basestring): raise TypeError("expected a character buffer object") if namespace is None: namespace = _get_globals() namespace['__file__'] = filename sys.argv = [filename] if args is not None: for arg in shlex.split(args): sys.argv.append(arg) if wdir is not None: try: wdir = wdir.decode('utf-8') except (UnicodeError, TypeError, AttributeError): # UnicodeError, TypeError --> eventually raised in Python 2 # AttributeError --> systematically raised in Python 3 pass os.chdir(wdir) if post_mortem: set_post_mortem() if __umr__.has_cython: # Cython files with io.open(filename, encoding='utf-8') as f: ipython_shell = get_ipython() ipython_shell.run_cell_magic('cython', '', f.read()) else: execfile(filename, namespace) clear_post_mortem() sys.argv = [''] # Avoid error when running `%reset -f` programmatically # See issue spyder-ide/spyder-kernels#91 try: namespace.pop('__file__') except KeyError: pass
[ "def", "runfile", "(", "filename", ",", "args", "=", "None", ",", "wdir", "=", "None", ",", "namespace", "=", "None", ",", "post_mortem", "=", "False", ")", ":", "try", ":", "filename", "=", "filename", ".", "decode", "(", "'utf-8'", ")", "except", "...
33.156863
17.745098
def grayify_cmap(cmap): """Return a grayscale version of the colormap. `Source`__ __ https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/ """ cmap = plt.cm.get_cmap(cmap) colors = cmap(np.arange(cmap.N)) # convert RGBA to perceived greyscale luminance # cf. http://alienryderflex.com/hsp.html RGB_weight = [0.299, 0.587, 0.114] luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight)) colors[:, :3] = luminance[:, np.newaxis] return mplcolors.LinearSegmentedColormap.from_list(cmap.name + "_grayscale", colors, cmap.N)
[ "def", "grayify_cmap", "(", "cmap", ")", ":", "cmap", "=", "plt", ".", "cm", ".", "get_cmap", "(", "cmap", ")", "colors", "=", "cmap", "(", "np", ".", "arange", "(", "cmap", ".", "N", ")", ")", "# convert RGBA to perceived greyscale luminance", "# cf. http...
38.133333
17.933333
def active_time(self): """ The length of time (in seconds) that the device has been active for. When the device is inactive, this is :data:`None`. """ if self._active_event.is_set(): return self.pin_factory.ticks_diff(self.pin_factory.ticks(), self._last_changed) else: return None
[ "def", "active_time", "(", "self", ")", ":", "if", "self", ".", "_active_event", ".", "is_set", "(", ")", ":", "return", "self", ".", "pin_factory", ".", "ticks_diff", "(", "self", ".", "pin_factory", ".", "ticks", "(", ")", ",", "self", ".", "_last_ch...
39.1
17.5
def check_guest_exist(check_index=0): """Check guest exist in database. :param check_index: The parameter index of userid(s), default as 0 """ def outer(f): @six.wraps(f) def inner(self, *args, **kw): userids = args[check_index] if isinstance(userids, list): # convert all userids to upper case userids = [uid.upper() for uid in userids] new_args = (args[:check_index] + (userids,) + args[check_index + 1:]) else: # convert the userid to upper case userids = userids.upper() new_args = (args[:check_index] + (userids,) + args[check_index + 1:]) userids = [userids] self._vmops.check_guests_exist_in_db(userids) return f(self, *new_args, **kw) return inner return outer
[ "def", "check_guest_exist", "(", "check_index", "=", "0", ")", ":", "def", "outer", "(", "f", ")", ":", "@", "six", ".", "wraps", "(", "f", ")", "def", "inner", "(", "self", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "userids", "=", "args...
31.655172
17.793103
def get_file(self, target, path, offset=None, length=None): """Get the contents of a file on the device :param target: The device(s) to be targeted with this request :type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances :param path: The path on the target to the file to retrieve :param offset: Start retrieving data from this byte position in the file, if None start from the beginning :param length: How many bytes to retrieve, if None retrieve until the end of the file :return: A dictionary with keys of device ids and values of the bytes of the file (or partial file if offset and/or length are specified) or an :class:`~.ErrorInfo` object if there was an error response :raises: :class:`~.ResponseParseError` If the SCI response has unrecognized formatting """ command_block = FileSystemServiceCommandBlock() command_block.add_command(GetCommand(path, offset, length)) root = _parse_command_response( self._sci_api.send_sci("file_system", target, command_block.get_command_string())) out_dict = {} for device in root.findall('./file_system/device'): device_id = device.get('id') error = device.find('./error') if error is not None: out_dict[device_id] = _parse_error_tree(error) else: data = GetCommand.parse_response(device.find('./commands/get_file')) out_dict[device_id] = data return out_dict
[ "def", "get_file", "(", "self", ",", "target", ",", "path", ",", "offset", "=", "None", ",", "length", "=", "None", ")", ":", "command_block", "=", "FileSystemServiceCommandBlock", "(", ")", "command_block", ".", "add_command", "(", "GetCommand", "(", "path"...
60.346154
29.269231
def infer_typing_namedtuple_class(class_node, context=None): """Infer a subclass of typing.NamedTuple""" # Check if it has the corresponding bases annassigns_fields = [ annassign.target.name for annassign in class_node.body if isinstance(annassign, nodes.AnnAssign) ] code = dedent( """ from collections import namedtuple namedtuple({typename!r}, {fields!r}) """ ).format(typename=class_node.name, fields=",".join(annassigns_fields)) node = extract_node(code) generated_class_node = next(infer_named_tuple(node, context)) for method in class_node.mymethods(): generated_class_node.locals[method.name] = [method] return iter((generated_class_node,))
[ "def", "infer_typing_namedtuple_class", "(", "class_node", ",", "context", "=", "None", ")", ":", "# Check if it has the corresponding bases", "annassigns_fields", "=", "[", "annassign", ".", "target", ".", "name", "for", "annassign", "in", "class_node", ".", "body", ...
38
11
def makeFigFromFile(filename,*args,**kwargs): """ Renders an image in a matplotlib figure, so it can be added to reports args and kwargs are passed to plt.subplots """ import matplotlib.pyplot as plt img = plt.imread(filename) fig,ax = plt.subplots(*args,**kwargs) ax.axis('off') ax.imshow(img) return fig
[ "def", "makeFigFromFile", "(", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "img", "=", "plt", ".", "imread", "(", "filename", ")", "fig", ",", "ax", "=", "plt", ".", "subplots",...
30.545455
12
def post(self): ''' Start an execution command and immediately return the job id .. http:post:: /minions :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. The ``client`` option will be set to :py:meth:`~salt.client.LocalClient.local_async`. **Example request:** .. code-block:: bash curl -sSi localhost:8000/minions \\ -H "Accept: application/x-yaml" \\ -d tgt='*' \\ -d fun='status.diskusage' .. code-block:: text POST /minions HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 26 Content-Type: application/x-www-form-urlencoded tgt=*&fun=status.diskusage **Example response:** .. code-block:: text HTTP/1.1 202 Accepted Content-Length: 86 Content-Type: application/x-yaml return: - jid: '20130603122505459265' minions: [ms-4, ms-3, ms-2, ms-1, ms-0] ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return # verify that all lowstates are the correct client type for low in self.lowstate: # if you didn't specify, its fine if 'client' not in low: low['client'] = 'local_async' continue # if you specified something else, we don't do that if low.get('client') != 'local_async': self.set_status(400) self.write('We don\'t serve your kind here') self.finish() return self.disbatch()
[ "def", "post", "(", "self", ")", ":", "# if you aren't authenticated, redirect to login", "if", "not", "self", ".", "_verify_auth", "(", ")", ":", "self", ".", "redirect", "(", "'/login'", ")", "return", "# verify that all lowstates are the correct client type", "for", ...
29.114286
19.057143
def pull(self, url): """ Tries to pull changes from external location. """ url = self._get_url(url) try: pull(self.baseui, self._repo, url) except Abort, err: # Propagate error but with vcs's type raise RepositoryError(str(err))
[ "def", "pull", "(", "self", ",", "url", ")", ":", "url", "=", "self", ".", "_get_url", "(", "url", ")", "try", ":", "pull", "(", "self", ".", "baseui", ",", "self", ".", "_repo", ",", "url", ")", "except", "Abort", ",", "err", ":", "# Propagate e...
30.3
10.1
def between(self, left, right, inclusive=True): """ Return boolean Series equivalent to left <= series <= right. This function returns a boolean vector containing `True` wherever the corresponding Series element is between the boundary values `left` and `right`. NA values are treated as `False`. Parameters ---------- left : scalar Left boundary. right : scalar Right boundary. inclusive : bool, default True Include boundaries. Returns ------- Series Series representing whether each element is between left and right (inclusive). See Also -------- Series.gt : Greater than of series and other. Series.lt : Less than of series and other. Notes ----- This function is equivalent to ``(left <= ser) & (ser <= right)`` Examples -------- >>> s = pd.Series([2, 0, 4, 8, np.nan]) Boundary values are included by default: >>> s.between(1, 4) 0 True 1 False 2 True 3 False 4 False dtype: bool With `inclusive` set to ``False`` boundary values are excluded: >>> s.between(1, 4, inclusive=False) 0 True 1 False 2 False 3 False 4 False dtype: bool `left` and `right` can be any scalar value: >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve']) >>> s.between('Anna', 'Daniel') 0 False 1 True 2 True 3 False dtype: bool """ if inclusive: lmask = self >= left rmask = self <= right else: lmask = self > left rmask = self < right return lmask & rmask
[ "def", "between", "(", "self", ",", "left", ",", "right", ",", "inclusive", "=", "True", ")", ":", "if", "inclusive", ":", "lmask", "=", "self", ">=", "left", "rmask", "=", "self", "<=", "right", "else", ":", "lmask", "=", "self", ">", "left", "rma...
24.864865
22.135135
def flatten_tree(tree, nested_attr='replies', depth_first=False): """Return a flattened version of the passed in tree. :param nested_attr: The attribute name that contains the nested items. Defaults to ``replies`` which is suitable for comments. :param depth_first: When true, add to the list in a depth-first manner rather than the default breadth-first manner. """ stack = deque(tree) extend = stack.extend if depth_first else stack.extendleft retval = [] while stack: item = stack.popleft() nested = getattr(item, nested_attr, None) if nested: extend(nested) retval.append(item) return retval
[ "def", "flatten_tree", "(", "tree", ",", "nested_attr", "=", "'replies'", ",", "depth_first", "=", "False", ")", ":", "stack", "=", "deque", "(", "tree", ")", "extend", "=", "stack", ".", "extend", "if", "depth_first", "else", "stack", ".", "extendleft", ...
35.526316
20.421053
def _ge_from_lt(self, other): """Return a >= b. Computed by @total_ordering from (not a < b).""" op_result = self.__lt__(other) if op_result is NotImplemented: return NotImplemented return not op_result
[ "def", "_ge_from_lt", "(", "self", ",", "other", ")", ":", "op_result", "=", "self", ".", "__lt__", "(", "other", ")", "if", "op_result", "is", "NotImplemented", ":", "return", "NotImplemented", "return", "not", "op_result" ]
37
8.166667
def main(): """Main method that runs the build""" data = Common.open_file(F_INFO) config = Common.open_file(F_CONFIG) file_full_path = "" env = load_jinja2_env(config['p_template']) for index, page in data.iteritems(): logging.info('Creating ' + index + ' page:') template = env.get_template(page['f_template'] + \ config['f_template_ext']) for lang, content in page['content'].items(): if lang == "NaL": if page['f_directory'] != '': Common.make_dir(config['p_build'] + page['f_directory']) file_full_path = config['p_build'] + page['f_directory'] + \ page['f_name'] +page['f_endtype'] else: if page['f_directory'] != '': Common.make_dir(config['p_build'] + lang + '/' + \ page['f_directory']) file_full_path = config['p_build'] + lang + '/' + \ page['f_directory'] + page['f_name'] +page['f_endtype'] with open(file_full_path, 'w') as target_file: target_file.write(template.render(content)) logging.info('Page ' + index + ' created.')
[ "def", "main", "(", ")", ":", "data", "=", "Common", ".", "open_file", "(", "F_INFO", ")", "config", "=", "Common", ".", "open_file", "(", "F_CONFIG", ")", "file_full_path", "=", "\"\"", "env", "=", "load_jinja2_env", "(", "config", "[", "'p_template'", ...
36.393939
21.151515
def isServiceNameAvailable(self, name, serviceType): """ Checks to see if a given service name and type are available for publishing a new service. true indicates that the name and type is not found in the organization's services and is available for publishing. false means the requested name and type are not available. Inputs: name - requested name of service serviceType - type of service allowed values: Feature Service or Map Service """ _allowedTypes = ['Feature Service', "Map Service"] url = self._url + "/isServiceNameAvailable" params = { "f" : "json", "name" : name, "type" : serviceType } return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "isServiceNameAvailable", "(", "self", ",", "name", ",", "serviceType", ")", ":", "_allowedTypes", "=", "[", "'Feature Service'", ",", "\"Map Service\"", "]", "url", "=", "self", ".", "_url", "+", "\"/isServiceNameAvailable\"", "params", "=", "{", "\"f\""...
41.307692
17.230769
def fetch_open_data(cls, ifo, start, end, sample_rate=4096, tag=None, version=None, format='hdf5', host=GWOSC_DEFAULT_HOST, verbose=False, cache=None, **kwargs): """Fetch open-access data from the LIGO Open Science Center Parameters ---------- ifo : `str` the two-character prefix of the IFO in which you are interested, e.g. `'L1'` start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS start time of required data, defaults to start of data found; any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, defaults to end of data found; any input parseable by `~gwpy.time.to_gps` is fine sample_rate : `float`, optional, the sample rate of desired data; most data are stored by LOSC at 4096 Hz, however there may be event-related data releases with a 16384 Hz rate, default: `4096` tag : `str`, optional file tag, e.g. ``'CLN'`` to select cleaned data, or ``'C00'`` for 'raw' calibrated data. version : `int`, optional version of files to download, defaults to highest discovered version format : `str`, optional the data format to download and parse, default: ``'h5py'`` - ``'hdf5'`` - ``'gwf'`` - requires |LDAStools.frameCPP|_ host : `str`, optional HTTP host name of LOSC server to access verbose : `bool`, optional, default: `False` print verbose output while fetching data cache : `bool`, optional save/read a local copy of the remote URL, default: `False`; useful if the same remote data are to be accessed multiple times. Set `GWPY_CACHE=1` in the environment to auto-cache. **kwargs any other keyword arguments are passed to the `TimeSeries.read` method that parses the file that was downloaded Examples -------- >>> from gwpy.timeseries import (TimeSeries, StateVector) >>> print(TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)) TimeSeries([ 2.17704028e-19, 2.08763900e-19, 2.39681183e-19, ..., 3.55365541e-20, 6.33533516e-20, 7.58121195e-20] unit: Unit(dimensionless), t0: 1126259446.0 s, dt: 0.000244140625 s, name: Strain, channel: None) >>> print(StateVector.fetch_open_data('H1', 1126259446, 1126259478)) StateVector([127,127,127,127,127,127,127,127,127,127,127,127, 127,127,127,127,127,127,127,127,127,127,127,127, 127,127,127,127,127,127,127,127] unit: Unit(dimensionless), t0: 1126259446.0 s, dt: 1.0 s, name: Data quality, channel: None, bits: Bits(0: data present 1: passes cbc CAT1 test 2: passes cbc CAT2 test 3: passes cbc CAT3 test 4: passes burst CAT1 test 5: passes burst CAT2 test 6: passes burst CAT3 test, channel=None, epoch=1126259446.0)) For the `StateVector`, the naming of the bits will be ``format``-dependent, because they are recorded differently by LOSC in different formats. For events published in O2 and later, LOSC typically provides multiple data sets containing the original (``'C00'``) and cleaned (``'CLN'``) data. To select both data sets and plot a comparison, for example: >>> orig = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896, ... tag='C00') >>> cln = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896, ... tag='CLN') >>> origasd = orig.asd(fftlength=4, overlap=2) >>> clnasd = cln.asd(fftlength=4, overlap=2) >>> plot = origasd.plot(label='Un-cleaned') >>> ax = plot.gca() >>> ax.plot(clnasd, label='Cleaned') >>> ax.set_xlim(10, 1400) >>> ax.set_ylim(1e-24, 1e-20) >>> ax.legend() >>> plot.show() Notes ----- `StateVector` data are not available in ``txt.gz`` format. """ from .io.losc import fetch_losc_data return fetch_losc_data(ifo, start, end, sample_rate=sample_rate, tag=tag, version=version, format=format, verbose=verbose, cache=cache, host=host, cls=cls, **kwargs)
[ "def", "fetch_open_data", "(", "cls", ",", "ifo", ",", "start", ",", "end", ",", "sample_rate", "=", "4096", ",", "tag", "=", "None", ",", "version", "=", "None", ",", "format", "=", "'hdf5'", ",", "host", "=", "GWOSC_DEFAULT_HOST", ",", "verbose", "="...
42.589744
21.273504
def precmd(self, line): """Handle alias expansion and ';;' separator.""" if not line.strip(): return line args = line.split() while args[0] in self.aliases: line = self.aliases[args[0]] ii = 1 for tmpArg in args[1:]: line = line.replace("%" + str(ii), tmpArg) ii += 1 line = line.replace("%*", ' '.join(args[1:])) args = line.split() # split into ';;' separated commands # unless it's an alias command if args[0] != 'alias': marker = line.find(';;') if marker >= 0: # queue up everything after marker next = line[marker+2:].lstrip() self.cmdqueue.append(next) line = line[:marker].rstrip() return line
[ "def", "precmd", "(", "self", ",", "line", ")", ":", "if", "not", "line", ".", "strip", "(", ")", ":", "return", "line", "args", "=", "line", ".", "split", "(", ")", "while", "args", "[", "0", "]", "in", "self", ".", "aliases", ":", "line", "="...
36.333333
9.333333
def warn_deprecated( since, message='', name='', alternative='', pending=False, obj_type='attribute', addendum='', removal=''): """ Used to display deprecation in a standard way. Parameters ---------- since : str The release at which this API became deprecated. message : str, optional Override the default deprecation message. The format specifier `%(name)s` may be used for the name of the function, and `%(alternative)s` may be used in the deprecation message to insert the name of an alternative to the deprecated function. `%(obj_type)s` may be used to insert a friendly name for the type of object being deprecated. name : str, optional The name of the deprecated object. alternative : str, optional An alternative API that the user may use in place of the deprecated API. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. Cannot be used together with *removal*. removal : str, optional The expected removal version. With the default (an empty string), a removal version is automatically computed from *since*. Set to other Falsy values to not schedule a removal date. Cannot be used together with *pending*. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. Examples -------- Basic example:: # To warn of the deprecation of "matplotlib.name_of_module" warn_deprecated('1.4.0', name='matplotlib.name_of_module', obj_type='module') """ message = '\n' + _generate_deprecation_message( since, message, name, alternative, pending, obj_type, addendum, removal=removal) category = (PendingDeprecationWarning if pending else _projectWarning) warnings.warn(message, category, stacklevel=2)
[ "def", "warn_deprecated", "(", "since", ",", "message", "=", "''", ",", "name", "=", "''", ",", "alternative", "=", "''", ",", "pending", "=", "False", ",", "obj_type", "=", "'attribute'", ",", "addendum", "=", "''", ",", "removal", "=", "''", ")", "...
44.659574
19.382979
def parse_network_osm_query(data): """ Convert OSM query data to DataFrames of ways and way-nodes. Parameters ---------- data : dict Result of an OSM query. Returns ------- nodes, ways, waynodes : pandas.DataFrame """ if len(data['elements']) == 0: raise RuntimeError('OSM query results contain no data.') nodes = [] ways = [] waynodes = [] for e in data['elements']: if e['type'] == 'node': nodes.append(process_node(e)) elif e['type'] == 'way': w, wn = process_way(e) ways.append(w) waynodes.extend(wn) nodes = pd.DataFrame.from_records(nodes, index='id') ways = pd.DataFrame.from_records(ways, index='id') waynodes = pd.DataFrame.from_records(waynodes, index='way_id') return (nodes, ways, waynodes)
[ "def", "parse_network_osm_query", "(", "data", ")", ":", "if", "len", "(", "data", "[", "'elements'", "]", ")", "==", "0", ":", "raise", "RuntimeError", "(", "'OSM query results contain no data.'", ")", "nodes", "=", "[", "]", "ways", "=", "[", "]", "wayno...
24.352941
20.058824
def find_expired_nodes(self, node_ids=None): """ Detects connections that have held a reference for longer than its process_ttl without refreshing its session. This function does not actually removed them from the hash. (See remove_expired_nodes.) :param list node_ids: optional, a list of ids to check to see if they have expired. If node_ids is not passed in, all nodes in the hash will be checked. """ if node_ids: nodes = zip(node_ids, [int(t) for t in self.conn.client.hmget(self.nodelist_key, node_ids)]) else: nodes = self.get_all_nodes().items() expiration_delta = self.conn.PROCESS_TTL * 1000. now = int(time.time() * 1000.) return [node_id for (node_id, last_updated) in nodes if (now - last_updated) > expiration_delta]
[ "def", "find_expired_nodes", "(", "self", ",", "node_ids", "=", "None", ")", ":", "if", "node_ids", ":", "nodes", "=", "zip", "(", "node_ids", ",", "[", "int", "(", "t", ")", "for", "t", "in", "self", ".", "conn", ".", "client", ".", "hmget", "(", ...
47.333333
26.222222
def setHeight(self, personID, height): """setHeight(string, double) -> None Sets the height in m for this person. """ self._connection._sendDoubleCmd( tc.CMD_SET_PERSON_VARIABLE, tc.VAR_HEIGHT, personID, height)
[ "def", "setHeight", "(", "self", ",", "personID", ",", "height", ")", ":", "self", ".", "_connection", ".", "_sendDoubleCmd", "(", "tc", ".", "CMD_SET_PERSON_VARIABLE", ",", "tc", ".", "VAR_HEIGHT", ",", "personID", ",", "height", ")" ]
35.714286
11.285714
def derive(self, path): """ :param path: a path like "m/44'/0'/1'/0/10" if deriving from a master key, or a relative path like "./0/10" :return: the derived ExtendedPublicKey if deriving from an ExtendedPublicKey, the derived ExtendedPrivateKey if deriving from an ExtendedPrivateKey """ steps = path.split('/') if steps[0] not in {'m', '.'}: raise ValueError('Invalid derivation path: {}'.format(path)) if steps[0] == 'm' and not self.is_master(): raise ValueError('Trying to derive absolute path from non-master key') current = self for step in steps[1:]: hardened = False if step[-1] == "'": hardened = True step = step[:-1] index = int(step) current = current.get_child(index, hardened) return current
[ "def", "derive", "(", "self", ",", "path", ")", ":", "steps", "=", "path", ".", "split", "(", "'/'", ")", "if", "steps", "[", "0", "]", "not", "in", "{", "'m'", ",", "'.'", "}", ":", "raise", "ValueError", "(", "'Invalid derivation path: {}'", ".", ...
36.28
21.24
def all_network_files(): """All network files""" # TODO: list explicitly since some are missing? network_types = [ 'AND-circle', 'MAJ-specialized', 'MAJ-complete', 'iit-3.0-modular' ] network_sizes = range(5, 8) network_files = [] for n in network_sizes: for t in network_types: network_files.append('{}-{}'.format(n, t)) return network_files
[ "def", "all_network_files", "(", ")", ":", "# TODO: list explicitly since some are missing?", "network_types", "=", "[", "'AND-circle'", ",", "'MAJ-specialized'", ",", "'MAJ-complete'", ",", "'iit-3.0-modular'", "]", "network_sizes", "=", "range", "(", "5", ",", "8", ...
27.466667
15