text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def returner(ret): ''' Parse the return data and return metrics to AppOptics. For each state that's provided in the configuration, return tagged metrics for the result of that state if it's present. ''' options = _get_options(ret) states_to_report = ['state.highstate'] if options.get('sls_states'): states_to_report.append('state.sls') if ret['fun'] in states_to_report: tags = options.get('tags', {}).copy() tags['state_type'] = ret['fun'] log.info("Tags for this run are %s", tags) matched_states = set(ret['fun_args']).intersection( set(options.get('sls_states', []))) # What can I do if a run has multiple states that match? # In the mean time, find one matching state name and use it. if matched_states: tags['state_name'] = sorted(matched_states)[0] log.debug('Found returned data from %s.', tags['state_name']) _state_metrics(ret, options, tags)
[ "def", "returner", "(", "ret", ")", ":", "options", "=", "_get_options", "(", "ret", ")", "states_to_report", "=", "[", "'state.highstate'", "]", "if", "options", ".", "get", "(", "'sls_states'", ")", ":", "states_to_report", ".", "append", "(", "'state.sls'", ")", "if", "ret", "[", "'fun'", "]", "in", "states_to_report", ":", "tags", "=", "options", ".", "get", "(", "'tags'", ",", "{", "}", ")", ".", "copy", "(", ")", "tags", "[", "'state_type'", "]", "=", "ret", "[", "'fun'", "]", "log", ".", "info", "(", "\"Tags for this run are %s\"", ",", "tags", ")", "matched_states", "=", "set", "(", "ret", "[", "'fun_args'", "]", ")", ".", "intersection", "(", "set", "(", "options", ".", "get", "(", "'sls_states'", ",", "[", "]", ")", ")", ")", "# What can I do if a run has multiple states that match?", "# In the mean time, find one matching state name and use it.", "if", "matched_states", ":", "tags", "[", "'state_name'", "]", "=", "sorted", "(", "matched_states", ")", "[", "0", "]", "log", ".", "debug", "(", "'Found returned data from %s.'", ",", "tags", "[", "'state_name'", "]", ")", "_state_metrics", "(", "ret", ",", "options", ",", "tags", ")" ]
40.666667
17.416667
def start_webhook(dispatcher, webhook_path, *, loop=None, skip_updates=None, on_startup=None, on_shutdown=None, check_ip=False, retry_after=None, route_name=DEFAULT_ROUTE_NAME, **kwargs): """ Start bot in webhook mode :param dispatcher: :param webhook_path: :param loop: :param skip_updates: :param on_startup: :param on_shutdown: :param check_ip: :param route_name: :param kwargs: :return: """ executor = set_webhook(dispatcher=dispatcher, webhook_path=webhook_path, loop=loop, skip_updates=skip_updates, on_startup=on_startup, on_shutdown=on_shutdown, check_ip=check_ip, retry_after=retry_after, route_name=route_name) executor.run_app(**kwargs)
[ "def", "start_webhook", "(", "dispatcher", ",", "webhook_path", ",", "*", ",", "loop", "=", "None", ",", "skip_updates", "=", "None", ",", "on_startup", "=", "None", ",", "on_shutdown", "=", "None", ",", "check_ip", "=", "False", ",", "retry_after", "=", "None", ",", "route_name", "=", "DEFAULT_ROUTE_NAME", ",", "*", "*", "kwargs", ")", ":", "executor", "=", "set_webhook", "(", "dispatcher", "=", "dispatcher", ",", "webhook_path", "=", "webhook_path", ",", "loop", "=", "loop", ",", "skip_updates", "=", "skip_updates", ",", "on_startup", "=", "on_startup", ",", "on_shutdown", "=", "on_shutdown", ",", "check_ip", "=", "check_ip", ",", "retry_after", "=", "retry_after", ",", "route_name", "=", "route_name", ")", "executor", ".", "run_app", "(", "*", "*", "kwargs", ")" ]
34.592593
17.259259
def apply_async(f, args=(), kwargs=None, callback=None): """Apply a function but emulate the API of an asynchronous call. Parameters ---------- f : callable The function to call. args : tuple, optional The positional arguments. kwargs : dict, optional The keyword arguments. Returns ------- future : ApplyAsyncResult The result of calling the function boxed in a future-like api. Notes ----- This calls the function eagerly but wraps it so that ``SequentialPool`` can be used where a :class:`multiprocessing.Pool` or :class:`gevent.pool.Pool` would be used. """ try: value = (identity if callback is None else callback)( f(*args, **kwargs or {}), ) successful = True except Exception as e: value = e successful = False return ApplyAsyncResult(value, successful)
[ "def", "apply_async", "(", "f", ",", "args", "=", "(", ")", ",", "kwargs", "=", "None", ",", "callback", "=", "None", ")", ":", "try", ":", "value", "=", "(", "identity", "if", "callback", "is", "None", "else", "callback", ")", "(", "f", "(", "*", "args", ",", "*", "*", "kwargs", "or", "{", "}", ")", ",", ")", "successful", "=", "True", "except", "Exception", "as", "e", ":", "value", "=", "e", "successful", "=", "False", "return", "ApplyAsyncResult", "(", "value", ",", "successful", ")" ]
30.333333
19.030303
def read_until_yieldable(self): """Read in additional chunks until it is yieldable.""" while not self.yieldable(): read_content, read_position = _get_next_chunk(self.fp, self.read_position, self.chunk_size) self.add_to_buffer(read_content, read_position)
[ "def", "read_until_yieldable", "(", "self", ")", ":", "while", "not", "self", ".", "yieldable", "(", ")", ":", "read_content", ",", "read_position", "=", "_get_next_chunk", "(", "self", ".", "fp", ",", "self", ".", "read_position", ",", "self", ".", "chunk_size", ")", "self", ".", "add_to_buffer", "(", "read_content", ",", "read_position", ")" ]
58
19.2
def create_comment(self, comment, repository_id, pull_request_id, thread_id, project=None): """CreateComment. [Preview API] Create a comment on a specific thread in a pull request (up to 500 comments can be created per thread). :param :class:`<Comment> <azure.devops.v5_1.git.models.Comment>` comment: The comment to create. Comments can be up to 150,000 characters. :param str repository_id: The repository ID of the pull request's target branch. :param int pull_request_id: ID of the pull request. :param int thread_id: ID of the thread that the desired comment is in. :param str project: Project ID or project name :rtype: :class:`<Comment> <azure.devops.v5_1.git.models.Comment>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') if pull_request_id is not None: route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int') if thread_id is not None: route_values['threadId'] = self._serialize.url('thread_id', thread_id, 'int') content = self._serialize.body(comment, 'Comment') response = self._send(http_method='POST', location_id='965a3ec7-5ed8-455a-bdcb-835a5ea7fe7b', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('Comment', response)
[ "def", "create_comment", "(", "self", ",", "comment", ",", "repository_id", ",", "pull_request_id", ",", "thread_id", ",", "project", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "repository_id", "is", "not", "None", ":", "route_values", "[", "'repositoryId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'repository_id'", ",", "repository_id", ",", "'str'", ")", "if", "pull_request_id", "is", "not", "None", ":", "route_values", "[", "'pullRequestId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'pull_request_id'", ",", "pull_request_id", ",", "'int'", ")", "if", "thread_id", "is", "not", "None", ":", "route_values", "[", "'threadId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'thread_id'", ",", "thread_id", ",", "'int'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "comment", ",", "'Comment'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'965a3ec7-5ed8-455a-bdcb-835a5ea7fe7b'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'Comment'", ",", "response", ")" ]
65.076923
29.461538
def filter_results(source, results, aggressive): """Filter out spurious reports from pycodestyle. If aggressive is True, we allow possibly unsafe fixes (E711, E712). """ non_docstring_string_line_numbers = multiline_string_lines( source, include_docstrings=False) all_string_line_numbers = multiline_string_lines( source, include_docstrings=True) commented_out_code_line_numbers = commented_out_code_lines(source) has_e901 = any(result['id'].lower() == 'e901' for result in results) for r in results: issue_id = r['id'].lower() if r['line'] in non_docstring_string_line_numbers: if issue_id.startswith(('e1', 'e501', 'w191')): continue if r['line'] in all_string_line_numbers: if issue_id in ['e501']: continue # We must offset by 1 for lines that contain the trailing contents of # multiline strings. if not aggressive and (r['line'] + 1) in all_string_line_numbers: # Do not modify multiline strings in non-aggressive mode. Remove # trailing whitespace could break doctests. if issue_id.startswith(('w29', 'w39')): continue if aggressive <= 0: if issue_id.startswith(('e711', 'e72', 'w6')): continue if aggressive <= 1: if issue_id.startswith(('e712', 'e713', 'e714')): continue if aggressive <= 2: if issue_id.startswith(('e704')): continue if r['line'] in commented_out_code_line_numbers: if issue_id.startswith(('e26', 'e501')): continue # Do not touch indentation if there is a token error caused by # incomplete multi-line statement. Otherwise, we risk screwing up the # indentation. if has_e901: if issue_id.startswith(('e1', 'e7')): continue yield r
[ "def", "filter_results", "(", "source", ",", "results", ",", "aggressive", ")", ":", "non_docstring_string_line_numbers", "=", "multiline_string_lines", "(", "source", ",", "include_docstrings", "=", "False", ")", "all_string_line_numbers", "=", "multiline_string_lines", "(", "source", ",", "include_docstrings", "=", "True", ")", "commented_out_code_line_numbers", "=", "commented_out_code_lines", "(", "source", ")", "has_e901", "=", "any", "(", "result", "[", "'id'", "]", ".", "lower", "(", ")", "==", "'e901'", "for", "result", "in", "results", ")", "for", "r", "in", "results", ":", "issue_id", "=", "r", "[", "'id'", "]", ".", "lower", "(", ")", "if", "r", "[", "'line'", "]", "in", "non_docstring_string_line_numbers", ":", "if", "issue_id", ".", "startswith", "(", "(", "'e1'", ",", "'e501'", ",", "'w191'", ")", ")", ":", "continue", "if", "r", "[", "'line'", "]", "in", "all_string_line_numbers", ":", "if", "issue_id", "in", "[", "'e501'", "]", ":", "continue", "# We must offset by 1 for lines that contain the trailing contents of", "# multiline strings.", "if", "not", "aggressive", "and", "(", "r", "[", "'line'", "]", "+", "1", ")", "in", "all_string_line_numbers", ":", "# Do not modify multiline strings in non-aggressive mode. Remove", "# trailing whitespace could break doctests.", "if", "issue_id", ".", "startswith", "(", "(", "'w29'", ",", "'w39'", ")", ")", ":", "continue", "if", "aggressive", "<=", "0", ":", "if", "issue_id", ".", "startswith", "(", "(", "'e711'", ",", "'e72'", ",", "'w6'", ")", ")", ":", "continue", "if", "aggressive", "<=", "1", ":", "if", "issue_id", ".", "startswith", "(", "(", "'e712'", ",", "'e713'", ",", "'e714'", ")", ")", ":", "continue", "if", "aggressive", "<=", "2", ":", "if", "issue_id", ".", "startswith", "(", "(", "'e704'", ")", ")", ":", "continue", "if", "r", "[", "'line'", "]", "in", "commented_out_code_line_numbers", ":", "if", "issue_id", ".", "startswith", "(", "(", "'e26'", ",", "'e501'", ")", ")", ":", "continue", "# Do not touch indentation if there is a token error caused by", "# incomplete multi-line statement. Otherwise, we risk screwing up the", "# indentation.", "if", "has_e901", ":", "if", "issue_id", ".", "startswith", "(", "(", "'e1'", ",", "'e7'", ")", ")", ":", "continue", "yield", "r" ]
33.293103
22.310345
def get_contacts(self): """Use this method to get contacts from your Telegram address book. Returns: On success, a list of :obj:`User` objects is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. """ while True: try: contacts = self.send(functions.contacts.GetContacts(hash=0)) except FloodWait as e: log.warning("get_contacts flood: waiting {} seconds".format(e.x)) time.sleep(e.x) else: log.info("Total contacts: {}".format(len(self.peers_by_phone))) return [pyrogram.User._parse(self, user) for user in contacts.users]
[ "def", "get_contacts", "(", "self", ")", ":", "while", "True", ":", "try", ":", "contacts", "=", "self", ".", "send", "(", "functions", ".", "contacts", ".", "GetContacts", "(", "hash", "=", "0", ")", ")", "except", "FloodWait", "as", "e", ":", "log", ".", "warning", "(", "\"get_contacts flood: waiting {} seconds\"", ".", "format", "(", "e", ".", "x", ")", ")", "time", ".", "sleep", "(", "e", ".", "x", ")", "else", ":", "log", ".", "info", "(", "\"Total contacts: {}\"", ".", "format", "(", "len", "(", "self", ".", "peers_by_phone", ")", ")", ")", "return", "[", "pyrogram", ".", "User", ".", "_parse", "(", "self", ",", "user", ")", "for", "user", "in", "contacts", ".", "users", "]" ]
40.277778
25.388889
def _ParseCshVariables(self, lines): """Extract env_var and path values from csh derivative shells. Path attributes can be set several ways: - setenv takes the form "setenv PATH_NAME COLON:SEPARATED:LIST" - set takes the form "set path_name=(space separated list)" and is automatically exported for several types of files. The first entry in each stanza is used to decide what context to use. Other entries are used to identify the path name and any assigned values. Args: lines: A list of lines, each of which is a list of space separated words. Returns: a dictionary of path names and values. """ paths = {} for line in lines: if len(line) < 2: continue action = line[0] if action == "setenv": target = line[1] path_vals = [] if line[2:]: path_vals = line[2].split(":") self._ExpandPath(target, path_vals, paths) elif action == "set": set_vals = self._CSH_SET_RE.search(" ".join(line[1:])) if set_vals: target, vals = set_vals.groups() # Automatically exported to ENV vars. if target in ("path", "term", "user"): target = target.upper() path_vals = vals.split() self._ExpandPath(target, path_vals, paths) return paths
[ "def", "_ParseCshVariables", "(", "self", ",", "lines", ")", ":", "paths", "=", "{", "}", "for", "line", "in", "lines", ":", "if", "len", "(", "line", ")", "<", "2", ":", "continue", "action", "=", "line", "[", "0", "]", "if", "action", "==", "\"setenv\"", ":", "target", "=", "line", "[", "1", "]", "path_vals", "=", "[", "]", "if", "line", "[", "2", ":", "]", ":", "path_vals", "=", "line", "[", "2", "]", ".", "split", "(", "\":\"", ")", "self", ".", "_ExpandPath", "(", "target", ",", "path_vals", ",", "paths", ")", "elif", "action", "==", "\"set\"", ":", "set_vals", "=", "self", ".", "_CSH_SET_RE", ".", "search", "(", "\" \"", ".", "join", "(", "line", "[", "1", ":", "]", ")", ")", "if", "set_vals", ":", "target", ",", "vals", "=", "set_vals", ".", "groups", "(", ")", "# Automatically exported to ENV vars.", "if", "target", "in", "(", "\"path\"", ",", "\"term\"", ",", "\"user\"", ")", ":", "target", "=", "target", ".", "upper", "(", ")", "path_vals", "=", "vals", ".", "split", "(", ")", "self", ".", "_ExpandPath", "(", "target", ",", "path_vals", ",", "paths", ")", "return", "paths" ]
34.315789
18.710526
def get_schemaloc_string(self, ns_uris=None, sort=False, delim="\n"): """Constructs and returns a schemalocation attribute. If no namespaces in this set have any schema locations defined, returns an empty string. Args: ns_uris (iterable): The namespaces to include in the constructed attribute value. If None, all are included. sort (bool): Whether the sort the namespace URIs. delim (str): The delimiter to use between namespace/schemaloc *pairs*. Returns: str: A schemalocation attribute in the format: ``xsi:schemaLocation="nsuri schemaloc<delim>nsuri2 schemaloc2<delim>..."`` """ if not ns_uris: ns_uris = six.iterkeys(self.__ns_uri_map) if sort: ns_uris = sorted(ns_uris) schemalocs = [] for ns_uri in ns_uris: ni = self.__lookup_uri(ns_uri) if ni.schema_location: schemalocs.append("{0.uri} {0.schema_location}".format(ni)) if not schemalocs: return "" return 'xsi:schemaLocation="{0}"'.format(delim.join(schemalocs))
[ "def", "get_schemaloc_string", "(", "self", ",", "ns_uris", "=", "None", ",", "sort", "=", "False", ",", "delim", "=", "\"\\n\"", ")", ":", "if", "not", "ns_uris", ":", "ns_uris", "=", "six", ".", "iterkeys", "(", "self", ".", "__ns_uri_map", ")", "if", "sort", ":", "ns_uris", "=", "sorted", "(", "ns_uris", ")", "schemalocs", "=", "[", "]", "for", "ns_uri", "in", "ns_uris", ":", "ni", "=", "self", ".", "__lookup_uri", "(", "ns_uri", ")", "if", "ni", ".", "schema_location", ":", "schemalocs", ".", "append", "(", "\"{0.uri} {0.schema_location}\"", ".", "format", "(", "ni", ")", ")", "if", "not", "schemalocs", ":", "return", "\"\"", "return", "'xsi:schemaLocation=\"{0}\"'", ".", "format", "(", "delim", ".", "join", "(", "schemalocs", ")", ")" ]
33.371429
25
def get_subject_version_ids(self, subject): """ Return the list of schema version ids which have been registered under the given subject. """ res = requests.get(self._url('/subjects/{}/versions', subject)) raise_if_failed(res) return res.json()
[ "def", "get_subject_version_ids", "(", "self", ",", "subject", ")", ":", "res", "=", "requests", ".", "get", "(", "self", ".", "_url", "(", "'/subjects/{}/versions'", ",", "subject", ")", ")", "raise_if_failed", "(", "res", ")", "return", "res", ".", "json", "(", ")" ]
36.625
12.625
def df(unit = 'GB'): '''A wrapper for the df shell command.''' details = {} headers = ['Filesystem', 'Type', 'Size', 'Used', 'Available', 'Capacity', 'MountedOn'] n = len(headers) unit = df_conversions[unit] p = subprocess.Popen(args = ['df', '-TP'], stdout = subprocess.PIPE) # -P prevents line wrapping on long filesystem names stdout, stderr = p.communicate() lines = stdout.split("\n") lines[0] = lines[0].replace("Mounted on", "MountedOn").replace("1K-blocks", "Size").replace("1024-blocks", "Size") assert(lines[0].split() == headers) lines = [l.strip() for l in lines if l.strip()] for line in lines[1:]: tokens = line.split() if tokens[0] == 'none': # skip uninteresting entries continue assert(len(tokens) == n) d = {} for x in range(1, len(headers)): d[headers[x]] = tokens[x] d['Size'] = float(d['Size']) / unit assert(d['Capacity'].endswith("%")) d['Use%'] = d['Capacity'] d['Used'] = float(d['Used']) / unit d['Available'] = float(d['Available']) / unit d['Using'] = 100*(d['Used']/d['Size']) # same as Use% but with more precision if d['Type'].startswith('ext'): pass d['Using'] += 5 # ext2, ext3, and ext4 reserve 5% by default else: ext3_filesystems = ['ganon:', 'kortemmelab:', 'albana:'] for e3fs in ext3_filesystems: if tokens[0].find(e3fs) != -1: d['Using'] += 5 # ext3 reserves 5% break details[tokens[0]] = d return details
[ "def", "df", "(", "unit", "=", "'GB'", ")", ":", "details", "=", "{", "}", "headers", "=", "[", "'Filesystem'", ",", "'Type'", ",", "'Size'", ",", "'Used'", ",", "'Available'", ",", "'Capacity'", ",", "'MountedOn'", "]", "n", "=", "len", "(", "headers", ")", "unit", "=", "df_conversions", "[", "unit", "]", "p", "=", "subprocess", ".", "Popen", "(", "args", "=", "[", "'df'", ",", "'-TP'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "# -P prevents line wrapping on long filesystem names", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "lines", "=", "stdout", ".", "split", "(", "\"\\n\"", ")", "lines", "[", "0", "]", "=", "lines", "[", "0", "]", ".", "replace", "(", "\"Mounted on\"", ",", "\"MountedOn\"", ")", ".", "replace", "(", "\"1K-blocks\"", ",", "\"Size\"", ")", ".", "replace", "(", "\"1024-blocks\"", ",", "\"Size\"", ")", "assert", "(", "lines", "[", "0", "]", ".", "split", "(", ")", "==", "headers", ")", "lines", "=", "[", "l", ".", "strip", "(", ")", "for", "l", "in", "lines", "if", "l", ".", "strip", "(", ")", "]", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "tokens", "=", "line", ".", "split", "(", ")", "if", "tokens", "[", "0", "]", "==", "'none'", ":", "# skip uninteresting entries", "continue", "assert", "(", "len", "(", "tokens", ")", "==", "n", ")", "d", "=", "{", "}", "for", "x", "in", "range", "(", "1", ",", "len", "(", "headers", ")", ")", ":", "d", "[", "headers", "[", "x", "]", "]", "=", "tokens", "[", "x", "]", "d", "[", "'Size'", "]", "=", "float", "(", "d", "[", "'Size'", "]", ")", "/", "unit", "assert", "(", "d", "[", "'Capacity'", "]", ".", "endswith", "(", "\"%\"", ")", ")", "d", "[", "'Use%'", "]", "=", "d", "[", "'Capacity'", "]", "d", "[", "'Used'", "]", "=", "float", "(", "d", "[", "'Used'", "]", ")", "/", "unit", "d", "[", "'Available'", "]", "=", "float", "(", "d", "[", "'Available'", "]", ")", "/", "unit", "d", "[", "'Using'", "]", "=", "100", "*", "(", "d", "[", "'Used'", "]", "/", "d", "[", "'Size'", "]", ")", "# same as Use% but with more precision", "if", "d", "[", "'Type'", "]", ".", "startswith", "(", "'ext'", ")", ":", "pass", "d", "[", "'Using'", "]", "+=", "5", "# ext2, ext3, and ext4 reserve 5% by default", "else", ":", "ext3_filesystems", "=", "[", "'ganon:'", ",", "'kortemmelab:'", ",", "'albana:'", "]", "for", "e3fs", "in", "ext3_filesystems", ":", "if", "tokens", "[", "0", "]", ".", "find", "(", "e3fs", ")", "!=", "-", "1", ":", "d", "[", "'Using'", "]", "+=", "5", "# ext3 reserves 5%", "break", "details", "[", "tokens", "[", "0", "]", "]", "=", "d", "return", "details" ]
36.386364
21.659091
def to_binary(self): """Convert N-ary operators to binary operators.""" node = self.node.to_binary() if node is self.node: return self else: return _expr(node)
[ "def", "to_binary", "(", "self", ")", ":", "node", "=", "self", ".", "node", ".", "to_binary", "(", ")", "if", "node", "is", "self", ".", "node", ":", "return", "self", "else", ":", "return", "_expr", "(", "node", ")" ]
29.857143
12.714286
def separable_series(h, N=1): """ finds the first N rank 1 tensors such that their sum approximates the tensor h (2d or 3d) best returns (e.g. for 3d case) res = (hx,hy,hz)[i] s.t. h \approx sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2]) Parameters ---------- h: ndarray input array (2 or 2 dimensional) N: int order of approximation Returns ------- res, the series of tensors res[i] = (hx,hy,hz)[i] """ if h.ndim == 2: return _separable_series2(h, N) elif h.ndim == 3: return _separable_series3(h, N) else: raise ValueError("unsupported array dimension: %s (only 2d or 3d) " % h.ndim)
[ "def", "separable_series", "(", "h", ",", "N", "=", "1", ")", ":", "if", "h", ".", "ndim", "==", "2", ":", "return", "_separable_series2", "(", "h", ",", "N", ")", "elif", "h", ".", "ndim", "==", "3", ":", "return", "_separable_series3", "(", "h", ",", "N", ")", "else", ":", "raise", "ValueError", "(", "\"unsupported array dimension: %s (only 2d or 3d) \"", "%", "h", ".", "ndim", ")" ]
22.833333
22.033333
def isometric_remesh(script, SamplingRate=10): """Isometric parameterization: remeshing """ filter_xml = ''.join([ ' <filter name="Iso Parametrization Remeshing">\n', ' <Param name="SamplingRate"', 'value="%d"' % SamplingRate, 'description="Sampling Rate"', 'type="RichInt"', 'tooltip="This specify the sampling rate for remeshing."', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
[ "def", "isometric_remesh", "(", "script", ",", "SamplingRate", "=", "10", ")", ":", "filter_xml", "=", "''", ".", "join", "(", "[", "' <filter name=\"Iso Parametrization Remeshing\">\\n'", ",", "' <Param name=\"SamplingRate\"'", ",", "'value=\"%d\"'", "%", "SamplingRate", ",", "'description=\"Sampling Rate\"'", ",", "'type=\"RichInt\"'", ",", "'tooltip=\"This specify the sampling rate for remeshing.\"'", ",", "'/>\\n'", ",", "' </filter>\\n'", "]", ")", "util", ".", "write_filter", "(", "script", ",", "filter_xml", ")", "return", "None" ]
32.333333
12.933333
def list_preferences_communication_channel_id(self, user_id, communication_channel_id): """ List preferences. Fetch all preferences for the given communication channel """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - communication_channel_id """ID""" path["communication_channel_id"] = communication_channel_id self.logger.debug("GET /api/v1/users/{user_id}/communication_channels/{communication_channel_id}/notification_preferences with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{user_id}/communication_channels/{communication_channel_id}/notification_preferences".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_preferences_communication_channel_id", "(", "self", ",", "user_id", ",", "communication_channel_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"user_id\"", "]", "=", "user_id", "# REQUIRED - PATH - communication_channel_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"communication_channel_id\"", "]", "=", "communication_channel_id", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/users/{user_id}/communication_channels/{communication_channel_id}/notification_preferences with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/users/{user_id}/communication_channels/{communication_channel_id}/notification_preferences\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "all_pages", "=", "True", ")" ]
45.65
35.15
def allows_not_principal(self): """Find allowed not-principals.""" not_principals = [] for statement in self.statements: if statement.not_principal and statement.effect == "Allow": not_principals.append(statement) return not_principals
[ "def", "allows_not_principal", "(", "self", ")", ":", "not_principals", "=", "[", "]", "for", "statement", "in", "self", ".", "statements", ":", "if", "statement", ".", "not_principal", "and", "statement", ".", "effect", "==", "\"Allow\"", ":", "not_principals", ".", "append", "(", "statement", ")", "return", "not_principals" ]
36.125
14.125
def p_chr(p): """ string : CHR arg_list """ if len(p[2]) < 1: syntax_error(p.lineno(1), "CHR$ function need at less 1 parameter") p[0] = None return for i in range(len(p[2])): # Convert every argument to 8bit unsigned p[2][i].value = make_typecast(TYPE.ubyte, p[2][i].value, p.lineno(1)) p[0] = make_builtin(p.lineno(1), 'CHR', p[2], type_=TYPE.string)
[ "def", "p_chr", "(", "p", ")", ":", "if", "len", "(", "p", "[", "2", "]", ")", "<", "1", ":", "syntax_error", "(", "p", ".", "lineno", "(", "1", ")", ",", "\"CHR$ function need at less 1 parameter\"", ")", "p", "[", "0", "]", "=", "None", "return", "for", "i", "in", "range", "(", "len", "(", "p", "[", "2", "]", ")", ")", ":", "# Convert every argument to 8bit unsigned", "p", "[", "2", "]", "[", "i", "]", ".", "value", "=", "make_typecast", "(", "TYPE", ".", "ubyte", ",", "p", "[", "2", "]", "[", "i", "]", ".", "value", ",", "p", ".", "lineno", "(", "1", ")", ")", "p", "[", "0", "]", "=", "make_builtin", "(", "p", ".", "lineno", "(", "1", ")", ",", "'CHR'", ",", "p", "[", "2", "]", ",", "type_", "=", "TYPE", ".", "string", ")" ]
33
25.5
def publish(endpoint, purge_files, rebuild_manifest, skip_upload): """Publish the site""" print("Publishing site to %s ..." % endpoint.upper()) yass = Yass(CWD) target = endpoint.lower() sitename = yass.sitename if not sitename: raise ValueError("Missing site name") endpoint = yass.config.get("hosting.%s" % target) if not endpoint: raise ValueError("%s endpoint is missing in the config" % target.upper()) if target == "s3": p = publisher.S3Website(sitename=sitename, aws_access_key_id=endpoint.get("aws_access_key_id"), aws_secret_access_key=endpoint.get("aws_secret_access_key"), region=endpoint.get("aws_region")) if not p.website_exists: print(">>>") print("Setting S3 site...") if p.create_website() is True: # Need to give it enough time to create it # Should be a one time thing time.sleep(10) p.create_www_website() print("New bucket created: %s" % p.sitename) if rebuild_manifest: print(">>>") print("Rebuilding site's manifest...") p.create_manifest_from_s3_files() if purge_files is True or endpoint.get("purge_files") is True: print(">>>") print("Purging files...") exclude_files = endpoint.get("purge_exclude_files", []) p.purge_files(exclude_files=exclude_files) if not skip_upload: print(">>>") print("Uploading your site...") p.upload(yass.build_dir) else: print(">>>") print("WARNING: files upload was skipped because of the use of --skip-upload") print("") print("Yass! Your site has been successfully published to: ") print(p.website_endpoint_url) footer()
[ "def", "publish", "(", "endpoint", ",", "purge_files", ",", "rebuild_manifest", ",", "skip_upload", ")", ":", "print", "(", "\"Publishing site to %s ...\"", "%", "endpoint", ".", "upper", "(", ")", ")", "yass", "=", "Yass", "(", "CWD", ")", "target", "=", "endpoint", ".", "lower", "(", ")", "sitename", "=", "yass", ".", "sitename", "if", "not", "sitename", ":", "raise", "ValueError", "(", "\"Missing site name\"", ")", "endpoint", "=", "yass", ".", "config", ".", "get", "(", "\"hosting.%s\"", "%", "target", ")", "if", "not", "endpoint", ":", "raise", "ValueError", "(", "\"%s endpoint is missing in the config\"", "%", "target", ".", "upper", "(", ")", ")", "if", "target", "==", "\"s3\"", ":", "p", "=", "publisher", ".", "S3Website", "(", "sitename", "=", "sitename", ",", "aws_access_key_id", "=", "endpoint", ".", "get", "(", "\"aws_access_key_id\"", ")", ",", "aws_secret_access_key", "=", "endpoint", ".", "get", "(", "\"aws_secret_access_key\"", ")", ",", "region", "=", "endpoint", ".", "get", "(", "\"aws_region\"", ")", ")", "if", "not", "p", ".", "website_exists", ":", "print", "(", "\">>>\"", ")", "print", "(", "\"Setting S3 site...\"", ")", "if", "p", ".", "create_website", "(", ")", "is", "True", ":", "# Need to give it enough time to create it", "# Should be a one time thing", "time", ".", "sleep", "(", "10", ")", "p", ".", "create_www_website", "(", ")", "print", "(", "\"New bucket created: %s\"", "%", "p", ".", "sitename", ")", "if", "rebuild_manifest", ":", "print", "(", "\">>>\"", ")", "print", "(", "\"Rebuilding site's manifest...\"", ")", "p", ".", "create_manifest_from_s3_files", "(", ")", "if", "purge_files", "is", "True", "or", "endpoint", ".", "get", "(", "\"purge_files\"", ")", "is", "True", ":", "print", "(", "\">>>\"", ")", "print", "(", "\"Purging files...\"", ")", "exclude_files", "=", "endpoint", ".", "get", "(", "\"purge_exclude_files\"", ",", "[", "]", ")", "p", ".", "purge_files", "(", "exclude_files", "=", "exclude_files", ")", "if", "not", "skip_upload", ":", "print", "(", "\">>>\"", ")", "print", "(", "\"Uploading your site...\"", ")", "p", ".", "upload", "(", "yass", ".", "build_dir", ")", "else", ":", "print", "(", "\">>>\"", ")", "print", "(", "\"WARNING: files upload was skipped because of the use of --skip-upload\"", ")", "print", "(", "\"\"", ")", "print", "(", "\"Yass! Your site has been successfully published to: \"", ")", "print", "(", "p", ".", "website_endpoint_url", ")", "footer", "(", ")" ]
34.854545
21.109091
def enqueue(self, name=None, action=None, method=None, wait_url=None, wait_url_method=None, workflow_sid=None, **kwargs): """ Create a <Enqueue> element :param name: Friendly name :param action: Action URL :param method: Action URL method :param wait_url: Wait URL :param wait_url_method: Wait URL method :param workflow_sid: TaskRouter Workflow SID :param kwargs: additional attributes :returns: <Enqueue> element """ return self.nest(Enqueue( name=name, action=action, method=method, wait_url=wait_url, wait_url_method=wait_url_method, workflow_sid=workflow_sid, **kwargs ))
[ "def", "enqueue", "(", "self", ",", "name", "=", "None", ",", "action", "=", "None", ",", "method", "=", "None", ",", "wait_url", "=", "None", ",", "wait_url_method", "=", "None", ",", "workflow_sid", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "nest", "(", "Enqueue", "(", "name", "=", "name", ",", "action", "=", "action", ",", "method", "=", "method", ",", "wait_url", "=", "wait_url", ",", "wait_url_method", "=", "wait_url_method", ",", "workflow_sid", "=", "workflow_sid", ",", "*", "*", "kwargs", ")", ")" ]
31.625
12.875
def _checkout(self): """ check out the image filesystem on self.mount_point """ cmd = ["atomic", "mount", "--storage", "ostree", self.ref_image_name, self.mount_point] # self.mount_point has to be created by us self._run_and_log(cmd, self.ostree_path, "Failed to mount selected image as an ostree repo.")
[ "def", "_checkout", "(", "self", ")", ":", "cmd", "=", "[", "\"atomic\"", ",", "\"mount\"", ",", "\"--storage\"", ",", "\"ostree\"", ",", "self", ".", "ref_image_name", ",", "self", ".", "mount_point", "]", "# self.mount_point has to be created by us", "self", ".", "_run_and_log", "(", "cmd", ",", "self", ".", "ostree_path", ",", "\"Failed to mount selected image as an ostree repo.\"", ")" ]
59.5
21.833333
def _var(ins): """ Defines a memory variable. """ output = [] output.append('%s:' % ins.quad[1]) output.append('DEFB %s' % ((int(ins.quad[2]) - 1) * '00, ' + '00')) return output
[ "def", "_var", "(", "ins", ")", ":", "output", "=", "[", "]", "output", ".", "append", "(", "'%s:'", "%", "ins", ".", "quad", "[", "1", "]", ")", "output", ".", "append", "(", "'DEFB %s'", "%", "(", "(", "int", "(", "ins", ".", "quad", "[", "2", "]", ")", "-", "1", ")", "*", "'00, '", "+", "'00'", ")", ")", "return", "output" ]
24.5
18.375
def navigation(self, id=None): """Function decorator for navbar registration. Convenience function, calls :meth:`.register_element` with ``id`` and the decorated function as ``elem``. :param id: ID to pass on. If ``None``, uses the decorated functions name. """ def wrapper(f): self.register_element(id or f.__name__, f) return f return wrapper
[ "def", "navigation", "(", "self", ",", "id", "=", "None", ")", ":", "def", "wrapper", "(", "f", ")", ":", "self", ".", "register_element", "(", "id", "or", "f", ".", "__name__", ",", "f", ")", "return", "f", "return", "wrapper" ]
28.866667
22
def schedule_task(self): """ Schedules this publish action as a Celery task. """ from .tasks import publish_task publish_task.apply_async(kwargs={'pk': self.pk}, eta=self.scheduled_time)
[ "def", "schedule_task", "(", "self", ")", ":", "from", ".", "tasks", "import", "publish_task", "publish_task", ".", "apply_async", "(", "kwargs", "=", "{", "'pk'", ":", "self", ".", "pk", "}", ",", "eta", "=", "self", ".", "scheduled_time", ")" ]
31.571429
16.142857
def supplementary_material(soup): """ supplementary-material tags """ supplementary_material = [] supplementary_material_tags = raw_parser.supplementary_material(soup) position = 1 for tag in supplementary_material_tags: item = {} copy_attribute(tag.attrs, 'id', item) # Get the tag type nodenames = ["supplementary-material"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) copy_attribute(details, 'asset', item) copy_attribute(details, 'component_doi', item) copy_attribute(details, 'sibling_ordinal', item) if raw_parser.label(tag): item['label'] = node_text(raw_parser.label(tag)) item['full_label'] = node_contents_str(raw_parser.label(tag)) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) supplementary_material.append(item) return supplementary_material
[ "def", "supplementary_material", "(", "soup", ")", ":", "supplementary_material", "=", "[", "]", "supplementary_material_tags", "=", "raw_parser", ".", "supplementary_material", "(", "soup", ")", "position", "=", "1", "for", "tag", "in", "supplementary_material_tags", ":", "item", "=", "{", "}", "copy_attribute", "(", "tag", ".", "attrs", ",", "'id'", ",", "item", ")", "# Get the tag type", "nodenames", "=", "[", "\"supplementary-material\"", "]", "details", "=", "tag_details", "(", "tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "item", ")", "copy_attribute", "(", "details", ",", "'asset'", ",", "item", ")", "copy_attribute", "(", "details", ",", "'component_doi'", ",", "item", ")", "copy_attribute", "(", "details", ",", "'sibling_ordinal'", ",", "item", ")", "if", "raw_parser", ".", "label", "(", "tag", ")", ":", "item", "[", "'label'", "]", "=", "node_text", "(", "raw_parser", ".", "label", "(", "tag", ")", ")", "item", "[", "'full_label'", "]", "=", "node_contents_str", "(", "raw_parser", ".", "label", "(", "tag", ")", ")", "# Increment the position", "item", "[", "'position'", "]", "=", "position", "# Ordinal should be the same as position in this case but set it anyway", "item", "[", "'ordinal'", "]", "=", "tag_ordinal", "(", "tag", ")", "supplementary_material", ".", "append", "(", "item", ")", "return", "supplementary_material" ]
30.028571
18.942857
def levelize_smooth_or_improve_candidates(to_levelize, max_levels): """Turn parameter in to a list per level. Helper function to preprocess the smooth and improve_candidates parameters passed to smoothed_aggregation_solver and rootnode_solver. Parameters ---------- to_levelize : {string, tuple, list} Parameter to preprocess, i.e., levelize and convert to a level-by-level list such that entry i specifies the parameter at level i max_levels : int Defines the maximum number of levels considered Returns ------- to_levelize : list The parameter list such that entry i specifies the parameter choice at level i. Notes -------- This routine is needed because the user will pass in a parameter option such as smooth='jacobi', or smooth=['jacobi', None], and this option must be "levelized", or converted to a list of length max_levels such that entry [i] in that list is the parameter choice for level i. The parameter choice in to_levelize can be a string, tuple or list. If it is a string or tuple, then that option is assumed to be the parameter setting at every level. If to_levelize is inititally a list, if the length of the list is less than max_levels, the last entry in the list defines that parameter for all subsequent levels. Examples -------- >>> from pyamg.util.utils import levelize_smooth_or_improve_candidates >>> improve_candidates = ['gauss_seidel', None] >>> levelize_smooth_or_improve_candidates(improve_candidates, 4) ['gauss_seidel', None, None, None] """ if isinstance(to_levelize, tuple) or isinstance(to_levelize, str): to_levelize = [to_levelize for i in range(max_levels)] elif isinstance(to_levelize, list): if len(to_levelize) < max_levels: mlz = max_levels - len(to_levelize) toext = [to_levelize[-1] for i in range(mlz)] to_levelize.extend(toext) elif to_levelize is None: to_levelize = [(None, {}) for i in range(max_levels)] return to_levelize
[ "def", "levelize_smooth_or_improve_candidates", "(", "to_levelize", ",", "max_levels", ")", ":", "if", "isinstance", "(", "to_levelize", ",", "tuple", ")", "or", "isinstance", "(", "to_levelize", ",", "str", ")", ":", "to_levelize", "=", "[", "to_levelize", "for", "i", "in", "range", "(", "max_levels", ")", "]", "elif", "isinstance", "(", "to_levelize", ",", "list", ")", ":", "if", "len", "(", "to_levelize", ")", "<", "max_levels", ":", "mlz", "=", "max_levels", "-", "len", "(", "to_levelize", ")", "toext", "=", "[", "to_levelize", "[", "-", "1", "]", "for", "i", "in", "range", "(", "mlz", ")", "]", "to_levelize", ".", "extend", "(", "toext", ")", "elif", "to_levelize", "is", "None", ":", "to_levelize", "=", "[", "(", "None", ",", "{", "}", ")", "for", "i", "in", "range", "(", "max_levels", ")", "]", "return", "to_levelize" ]
39.653846
24.807692
def delete(name, profile="splunk"): ''' Delete a splunk search CLI Example: splunk_search.delete 'my search name' ''' client = _get_splunk(profile) try: client.saved_searches.delete(name) return True except KeyError: return None
[ "def", "delete", "(", "name", ",", "profile", "=", "\"splunk\"", ")", ":", "client", "=", "_get_splunk", "(", "profile", ")", "try", ":", "client", ".", "saved_searches", ".", "delete", "(", "name", ")", "return", "True", "except", "KeyError", ":", "return", "None" ]
19.714286
21.142857
def partition_dict(items, key): """ Given an ordered dictionary of items and a key in that dict, return an ordered dict of items before, the keyed item, and an ordered dict of items after. >>> od = collections.OrderedDict(zip(range(5), 'abcde')) >>> before, item, after = partition_dict(od, 3) >>> before OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')]) >>> item 'd' >>> after OrderedDict([(4, 'e')]) Like string.partition, if the key is not found in the items, the before will contain all items, item will be None, and after will be an empty iterable. >>> before, item, after = partition_dict(od, -1) >>> before OrderedDict([(0, 'a'), ..., (4, 'e')]) >>> item >>> list(after) [] """ def unmatched(pair): test_key, item, = pair return test_key != key items_iter = iter(items.items()) item = items.get(key) left = collections.OrderedDict(itertools.takewhile(unmatched, items_iter)) right = collections.OrderedDict(items_iter) return left, item, right
[ "def", "partition_dict", "(", "items", ",", "key", ")", ":", "def", "unmatched", "(", "pair", ")", ":", "test_key", ",", "item", ",", "=", "pair", "return", "test_key", "!=", "key", "items_iter", "=", "iter", "(", "items", ".", "items", "(", ")", ")", "item", "=", "items", ".", "get", "(", "key", ")", "left", "=", "collections", ".", "OrderedDict", "(", "itertools", ".", "takewhile", "(", "unmatched", ",", "items_iter", ")", ")", "right", "=", "collections", ".", "OrderedDict", "(", "items_iter", ")", "return", "left", ",", "item", ",", "right" ]
27.028571
19.885714
def _detect_timezone(): ''' Get timezone as set by the system ''' default_timezone = 'America/New_York' locale_code = locale.getdefaultlocale() return default_timezone if not locale_code[0] else \ str(pytz.country_timezones[locale_code[0][-2:]][0])
[ "def", "_detect_timezone", "(", ")", ":", "default_timezone", "=", "'America/New_York'", "locale_code", "=", "locale", ".", "getdefaultlocale", "(", ")", "return", "default_timezone", "if", "not", "locale_code", "[", "0", "]", "else", "str", "(", "pytz", ".", "country_timezones", "[", "locale_code", "[", "0", "]", "[", "-", "2", ":", "]", "]", "[", "0", "]", ")" ]
34.125
15.625
def minimum_needs_parameter(field=None, parameter_name=None): """Get minimum needs parameter from a given field. :param field: Field provided :type field: dict :param parameter_name: Need parameter's name :type parameter_name: str :return: Need paramter :rtype: ResourceParameter """ try: if field['key']: for need_field in minimum_needs_fields: if need_field['key'] == field['key']: return need_field['need_parameter'] except (TypeError, KeyError): # in case field is None or field doesn't contain key. # just pass pass if parameter_name: for need_field in minimum_needs_fields: if need_field['need_parameter'].name == parameter_name: return need_field['need_parameter'] return None
[ "def", "minimum_needs_parameter", "(", "field", "=", "None", ",", "parameter_name", "=", "None", ")", ":", "try", ":", "if", "field", "[", "'key'", "]", ":", "for", "need_field", "in", "minimum_needs_fields", ":", "if", "need_field", "[", "'key'", "]", "==", "field", "[", "'key'", "]", ":", "return", "need_field", "[", "'need_parameter'", "]", "except", "(", "TypeError", ",", "KeyError", ")", ":", "# in case field is None or field doesn't contain key.", "# just pass", "pass", "if", "parameter_name", ":", "for", "need_field", "in", "minimum_needs_fields", ":", "if", "need_field", "[", "'need_parameter'", "]", ".", "name", "==", "parameter_name", ":", "return", "need_field", "[", "'need_parameter'", "]", "return", "None" ]
30.555556
18.666667
def for_all(*generators): """ Takes a list of generators and returns a closure which takes a property, then tests the property against arbitrary instances of the generators. """ # Pass in n as an argument n = 100 def test_property(property_function): """ A closure which takes a property and tests it against arbitrary instances of the generators in the closure. """ for generator in generators: assert (issubclass(generator, ArbitraryInterface) or generator in arbitrary) def test_once(): """ Tests the property against a single set of instances of the generators. """ instances = [arbitrary(generator) for generator in generators] try: assert property_function(*instances) except AssertionError: generator_names = ', '.join( generator.__name__ for generator in generators) stringed_instances = ', '.join( str(instance) for instance in instances) error_message = ' '.join([ 'Instances <', stringed_instances, '> of <', generator_names, '> do not satisfy a property.' ]) raise AssertionError(error_message) for _ in range(n): test_once() return test_property
[ "def", "for_all", "(", "*", "generators", ")", ":", "# Pass in n as an argument", "n", "=", "100", "def", "test_property", "(", "property_function", ")", ":", "\"\"\"\n A closure which takes a property and tests it against arbitrary\n instances of the generators in the closure.\n \"\"\"", "for", "generator", "in", "generators", ":", "assert", "(", "issubclass", "(", "generator", ",", "ArbitraryInterface", ")", "or", "generator", "in", "arbitrary", ")", "def", "test_once", "(", ")", ":", "\"\"\"\n Tests the property against a single set of instances of the\n generators.\n \"\"\"", "instances", "=", "[", "arbitrary", "(", "generator", ")", "for", "generator", "in", "generators", "]", "try", ":", "assert", "property_function", "(", "*", "instances", ")", "except", "AssertionError", ":", "generator_names", "=", "', '", ".", "join", "(", "generator", ".", "__name__", "for", "generator", "in", "generators", ")", "stringed_instances", "=", "', '", ".", "join", "(", "str", "(", "instance", ")", "for", "instance", "in", "instances", ")", "error_message", "=", "' '", ".", "join", "(", "[", "'Instances <'", ",", "stringed_instances", ",", "'> of <'", ",", "generator_names", ",", "'> do not satisfy a property.'", "]", ")", "raise", "AssertionError", "(", "error_message", ")", "for", "_", "in", "range", "(", "n", ")", ":", "test_once", "(", ")", "return", "test_property" ]
35.375
17.225
def listDataTiers(self, data_tier_name=""): """ API to list data tiers known to DBS. :param data_tier_name: List details on that data tier (Optional) :type data_tier_name: str :returns: List of dictionaries containing the following keys (data_tier_id, data_tier_name, create_by, creation_date) """ data_tier_name = data_tier_name.replace("*", "%") try: conn = self.dbi.connection() return self.dbsDataTierListDAO.execute(conn, data_tier_name.upper()) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except ValueError as ve: dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input Data", self.logger.exception, ve.message) except TypeError as te: dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input DataType", self.logger.exception, te.message) except NameError as ne: dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input Searching Key", self.logger.exception, ne.message) except Exception as ex: sError = "DBSReaderModel/listDataTiers. %s\n. Exception trace: \n %s" \ % ( ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) finally: if conn: conn.close()
[ "def", "listDataTiers", "(", "self", ",", "data_tier_name", "=", "\"\"", ")", ":", "data_tier_name", "=", "data_tier_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "try", ":", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "return", "self", ".", "dbsDataTierListDAO", ".", "execute", "(", "conn", ",", "data_tier_name", ".", "upper", "(", ")", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "message", ")", "except", "ValueError", "as", "ve", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"Invalid Input Data\"", ",", "self", ".", "logger", ".", "exception", ",", "ve", ".", "message", ")", "except", "TypeError", "as", "te", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"Invalid Input DataType\"", ",", "self", ".", "logger", ".", "exception", ",", "te", ".", "message", ")", "except", "NameError", "as", "ne", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"Invalid Input Searching Key\"", ",", "self", ".", "logger", ".", "exception", ",", "ne", ".", "message", ")", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSReaderModel/listDataTiers. %s\\n. Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")", "finally", ":", "if", "conn", ":", "conn", ".", "close", "(", ")" ]
51.206897
30.586207
def set_assets(self, asset_ids): """Sets the assets. arg: asset_ids (osid.id.Id[]): the asset ``Ids`` raise: InvalidArgument - ``asset_ids`` is invalid raise: NullArgument - ``asset_ids`` is ``null`` raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.ActivityForm.set_assets_template if not isinstance(asset_ids, list): raise errors.InvalidArgument() if self.get_assets_metadata().is_read_only(): raise errors.NoAccess() idstr_list = [] for object_id in asset_ids: if not self._is_valid_id(object_id): raise errors.InvalidArgument() idstr_list.append(str(object_id)) self._my_map['assetIds'] = idstr_list
[ "def", "set_assets", "(", "self", ",", "asset_ids", ")", ":", "# Implemented from template for osid.learning.ActivityForm.set_assets_template", "if", "not", "isinstance", "(", "asset_ids", ",", "list", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "if", "self", ".", "get_assets_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "idstr_list", "=", "[", "]", "for", "object_id", "in", "asset_ids", ":", "if", "not", "self", ".", "_is_valid_id", "(", "object_id", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "idstr_list", ".", "append", "(", "str", "(", "object_id", ")", ")", "self", ".", "_my_map", "[", "'assetIds'", "]", "=", "idstr_list" ]
41.666667
14.619048
def add_static_path(prefix: str, path: str, no_watch: bool = False) -> None: """Add directory to serve static files. First argument ``prefix`` is a URL prefix for the ``path``. ``path`` must be a directory. If ``no_watch`` is True, any change of the files in the path do not trigger restart if ``--autoreload`` is enabled. """ app = get_app() app.add_static_path(prefix, path) if not no_watch: watch_dir(path)
[ "def", "add_static_path", "(", "prefix", ":", "str", ",", "path", ":", "str", ",", "no_watch", ":", "bool", "=", "False", ")", "->", "None", ":", "app", "=", "get_app", "(", ")", "app", ".", "add_static_path", "(", "prefix", ",", "path", ")", "if", "not", "no_watch", ":", "watch_dir", "(", "path", ")" ]
40
21.090909
def balance_ions(anions, cations, anion_zs=None, cation_zs=None, anion_concs=None, cation_concs=None, rho_w=997.1, method='increase dominant', selected_ion=None): r'''Performs an ion balance to adjust measured experimental ion compositions to electroneutrality. Can accept either the actual mole fractions of the ions, or their concentrations in units of [mg/L] as well for convinience. The default method will locate the most prevalent ion in the type of ion not in excess - and increase it until the two ion types balance. Parameters ---------- anions : list(ChemicalMetadata) List of all negatively charged ions measured as being in the solution; ChemicalMetadata instances or simply objects with the attributes `MW` and `charge`, [-] cations : list(ChemicalMetadata) List of all positively charged ions measured as being in the solution; ChemicalMetadata instances or simply objects with the attributes `MW` and `charge`, [-] anion_zs : list, optional Mole fractions of each anion as measured in the aqueous solution, [-] cation_zs : list, optional Mole fractions of each cation as measured in the aqueous solution, [-] anion_concs : list, optional Concentrations of each anion in the aqueous solution in the units often reported (for convinience only) [mg/L] cation_concs : list, optional Concentrations of each cation in the aqueous solution in the units often reported (for convinience only) [mg/L] rho_w : float, optional Density of the aqueous solutionr at the temperature and pressure the anion and cation concentrations were measured (if specified), [kg/m^3] method : str, optional The method to use to balance the ionimbalance; one of 'dominant', 'decrease dominant', 'increase dominant', 'proportional insufficient ions increase', 'proportional excess ions decrease', 'proportional cation adjustment', 'proportional anion adjustment', 'Na or Cl increase', 'Na or Cl decrease', 'adjust', 'increase', 'decrease', 'makeup']. selected_ion : ChemicalMetadata, optional Some methods adjust only one user-specified ion; this is that input. For the case of the 'makeup' method, this is a tuple of (anion, cation) ChemicalMetadata instances and only the ion type not in excess will be used. Returns ------- anions : list(ChemicalMetadata) List of all negatively charged ions measured as being in the solution; ChemicalMetadata instances after potentially adding in an ion which was not present but specified by the user, [-] cations : list(ChemicalMetadata) List of all positively charged ions measured as being in the solution; ChemicalMetadata instances after potentially adding in an ion which was not present but specified by the user, [-] anion_zs : list, Mole fractions of each anion in the aqueous solution after the charge balance, [-] cation_zs : list Mole fractions of each cation in the aqueous solution after the charge balance, [-] z_water : float Mole fraction of the water in the solution, [-] Notes ----- The methods perform the charge balance as follows: * 'dominant' : The ion with the largest mole fraction in solution has its concentration adjusted up or down as necessary to balance the solution. * 'decrease dominant' : The ion with the largest mole fraction in the type of ion with *excess* charge has its own mole fraction decreased to balance the solution. * 'increase dominant' : The ion with the largest mole fraction in the type of ion with *insufficient* charge has its own mole fraction decreased to balance the solution. * 'proportional insufficient ions increase' : The ion charge type which is present insufficiently has each of the ions mole fractions *increased* proportionally until the solution is balanced. * 'proportional excess ions decrease' : The ion charge type which is present in excess has each of the ions mole fractions *decreased* proportionally until the solution is balanced. * 'proportional cation adjustment' : All *cations* have their mole fractions increased or decreased proportionally as necessary to balance the solution. * 'proportional anion adjustment' : All *anions* have their mole fractions increased or decreased proportionally as necessary to balance the solution. * 'Na or Cl increase' : Either Na+ or Cl- is *added* to the solution until the solution is balanced; the species will be added if they were not present initially as well. * 'Na or Cl decrease' : Either Na+ or Cl- is *removed* from the solution until the solution is balanced; the species will be added if they were not present initially as well. * 'adjust' : An ion specified with the parameter `selected_ion` has its mole fraction *increased or decreased* as necessary to balance the solution. An exception is raised if the specified ion alone cannot balance the solution. * 'increase' : An ion specified with the parameter `selected_ion` has its mole fraction *increased* as necessary to balance the solution. An exception is raised if the specified ion alone cannot balance the solution. * 'decrease' : An ion specified with the parameter `selected_ion` has its mole fraction *decreased* as necessary to balance the solution. An exception is raised if the specified ion alone cannot balance the solution. * 'makeup' : Two ions ase specified as a tuple with the parameter `selected_ion`. Whichever ion type is present in the solution insufficiently is added; i.e. if the ions were Mg+2 and Cl-, and there was too much negative charge in the solution, Mg+2 would be added until the solution was balanced. Examples -------- >>> anions_n = ['Cl-', 'HCO3-', 'SO4-2'] >>> cations_n = ['Na+', 'K+', 'Ca+2', 'Mg+2'] >>> cations = [pubchem_db.search_name(i) for i in cations_n] >>> anions = [pubchem_db.search_name(i) for i in anions_n] >>> an_res, cat_res, an_zs, cat_zs, z_water = balance_ions(anions, cations, ... anion_zs=[0.02557, 0.00039, 0.00026], cation_zs=[0.0233, 0.00075, ... 0.00262, 0.00119], method='proportional excess ions decrease') >>> an_zs [0.02557, 0.00039, 0.00026] >>> cat_zs [0.01948165456267761, 0.0006270918850647299, 0.0021906409851594564, 0.0009949857909693717] >>> z_water 0.9504856267761288 References ---------- ''' anions = list(anions) cations = list(cations) n_anions = len(anions) n_cations = len(cations) ions = anions + cations anion_charges = [i.charge for i in anions] cation_charges = [i.charge for i in cations] charges = anion_charges + cation_charges + [0] MW_water = [18.01528] rho_w = rho_w/1000 # Convert to kg/liter if anion_concs is not None and cation_concs is not None: anion_ws = [i*1E-6/rho_w for i in anion_concs] cation_ws = [i*1E-6/rho_w for i in cation_concs] w_water = 1 - sum(anion_ws) - sum(cation_ws) anion_MWs = [i.MW for i in anions] cation_MWs = [i.MW for i in cations] MWs = anion_MWs + cation_MWs + MW_water zs = ws_to_zs(anion_ws + cation_ws + [w_water], MWs) else: if anion_zs is None or cation_zs is None: raise Exception('Either both of anion_concs and cation_concs or ' 'anion_zs and cation_zs must be specified.') else: zs = anion_zs + cation_zs zs = zs + [1 - sum(zs)] impacts = [zi*ci for zi, ci in zip(zs, charges)] balance_error = sum(impacts) if abs(balance_error) < 1E-7: anion_zs = zs[0:n_anions] cation_zs = zs[n_anions:n_cations+n_anions] z_water = zs[-1] return anions, cations, anion_zs, cation_zs, z_water if 'dominant' in method: anion_zs, cation_zs, z_water = ion_balance_dominant(impacts, balance_error, charges, zs, n_anions, n_cations, method) return anions, cations, anion_zs, cation_zs, z_water elif 'proportional' in method: anion_zs, cation_zs, z_water = ion_balance_proportional( anion_charges, cation_charges, zs, n_anions, n_cations, balance_error, method) return anions, cations, anion_zs, cation_zs, z_water elif method == 'Na or Cl increase': increase = True if balance_error < 0: selected_ion = pubchem_db.search_name('Na+') else: selected_ion = pubchem_db.search_name('Cl-') elif method == 'Na or Cl decrease': increase = False if balance_error > 0: selected_ion = pubchem_db.search_name('Na+') else: selected_ion = pubchem_db.search_name('Cl-') # All of the below work with the variable selected_ion elif method == 'adjust': # A single ion will be increase or decreased to fix the balance automatically increase = None elif method == 'increase': increase = True # Raise exception if approach doesn't work elif method == 'decrease': increase = False # Raise exception if approach doesn't work elif method == 'makeup': # selected ion starts out as a tuple in this case; always adding the compound increase = True if balance_error < 0: selected_ion = selected_ion[1] else: selected_ion = selected_ion[0] else: raise Exception('Method not recognized') if selected_ion is None: raise Exception("For methods 'adjust', 'increase', 'decrease', and " "'makeup', an ion must be specified with the " "`selected_ion` parameter") anion_zs, cation_zs, z_water = ion_balance_adjust_wrapper(charges, zs, n_anions, n_cations, anions, cations, selected_ion, increase=increase) return anions, cations, anion_zs, cation_zs, z_water
[ "def", "balance_ions", "(", "anions", ",", "cations", ",", "anion_zs", "=", "None", ",", "cation_zs", "=", "None", ",", "anion_concs", "=", "None", ",", "cation_concs", "=", "None", ",", "rho_w", "=", "997.1", ",", "method", "=", "'increase dominant'", ",", "selected_ion", "=", "None", ")", ":", "anions", "=", "list", "(", "anions", ")", "cations", "=", "list", "(", "cations", ")", "n_anions", "=", "len", "(", "anions", ")", "n_cations", "=", "len", "(", "cations", ")", "ions", "=", "anions", "+", "cations", "anion_charges", "=", "[", "i", ".", "charge", "for", "i", "in", "anions", "]", "cation_charges", "=", "[", "i", ".", "charge", "for", "i", "in", "cations", "]", "charges", "=", "anion_charges", "+", "cation_charges", "+", "[", "0", "]", "MW_water", "=", "[", "18.01528", "]", "rho_w", "=", "rho_w", "/", "1000", "# Convert to kg/liter", "if", "anion_concs", "is", "not", "None", "and", "cation_concs", "is", "not", "None", ":", "anion_ws", "=", "[", "i", "*", "1E-6", "/", "rho_w", "for", "i", "in", "anion_concs", "]", "cation_ws", "=", "[", "i", "*", "1E-6", "/", "rho_w", "for", "i", "in", "cation_concs", "]", "w_water", "=", "1", "-", "sum", "(", "anion_ws", ")", "-", "sum", "(", "cation_ws", ")", "anion_MWs", "=", "[", "i", ".", "MW", "for", "i", "in", "anions", "]", "cation_MWs", "=", "[", "i", ".", "MW", "for", "i", "in", "cations", "]", "MWs", "=", "anion_MWs", "+", "cation_MWs", "+", "MW_water", "zs", "=", "ws_to_zs", "(", "anion_ws", "+", "cation_ws", "+", "[", "w_water", "]", ",", "MWs", ")", "else", ":", "if", "anion_zs", "is", "None", "or", "cation_zs", "is", "None", ":", "raise", "Exception", "(", "'Either both of anion_concs and cation_concs or '", "'anion_zs and cation_zs must be specified.'", ")", "else", ":", "zs", "=", "anion_zs", "+", "cation_zs", "zs", "=", "zs", "+", "[", "1", "-", "sum", "(", "zs", ")", "]", "impacts", "=", "[", "zi", "*", "ci", "for", "zi", ",", "ci", "in", "zip", "(", "zs", ",", "charges", ")", "]", "balance_error", "=", "sum", "(", "impacts", ")", "if", "abs", "(", "balance_error", ")", "<", "1E-7", ":", "anion_zs", "=", "zs", "[", "0", ":", "n_anions", "]", "cation_zs", "=", "zs", "[", "n_anions", ":", "n_cations", "+", "n_anions", "]", "z_water", "=", "zs", "[", "-", "1", "]", "return", "anions", ",", "cations", ",", "anion_zs", ",", "cation_zs", ",", "z_water", "if", "'dominant'", "in", "method", ":", "anion_zs", ",", "cation_zs", ",", "z_water", "=", "ion_balance_dominant", "(", "impacts", ",", "balance_error", ",", "charges", ",", "zs", ",", "n_anions", ",", "n_cations", ",", "method", ")", "return", "anions", ",", "cations", ",", "anion_zs", ",", "cation_zs", ",", "z_water", "elif", "'proportional'", "in", "method", ":", "anion_zs", ",", "cation_zs", ",", "z_water", "=", "ion_balance_proportional", "(", "anion_charges", ",", "cation_charges", ",", "zs", ",", "n_anions", ",", "n_cations", ",", "balance_error", ",", "method", ")", "return", "anions", ",", "cations", ",", "anion_zs", ",", "cation_zs", ",", "z_water", "elif", "method", "==", "'Na or Cl increase'", ":", "increase", "=", "True", "if", "balance_error", "<", "0", ":", "selected_ion", "=", "pubchem_db", ".", "search_name", "(", "'Na+'", ")", "else", ":", "selected_ion", "=", "pubchem_db", ".", "search_name", "(", "'Cl-'", ")", "elif", "method", "==", "'Na or Cl decrease'", ":", "increase", "=", "False", "if", "balance_error", ">", "0", ":", "selected_ion", "=", "pubchem_db", ".", "search_name", "(", "'Na+'", ")", "else", ":", "selected_ion", "=", "pubchem_db", ".", "search_name", "(", "'Cl-'", ")", "# All of the below work with the variable selected_ion", "elif", "method", "==", "'adjust'", ":", "# A single ion will be increase or decreased to fix the balance automatically", "increase", "=", "None", "elif", "method", "==", "'increase'", ":", "increase", "=", "True", "# Raise exception if approach doesn't work", "elif", "method", "==", "'decrease'", ":", "increase", "=", "False", "# Raise exception if approach doesn't work", "elif", "method", "==", "'makeup'", ":", "# selected ion starts out as a tuple in this case; always adding the compound", "increase", "=", "True", "if", "balance_error", "<", "0", ":", "selected_ion", "=", "selected_ion", "[", "1", "]", "else", ":", "selected_ion", "=", "selected_ion", "[", "0", "]", "else", ":", "raise", "Exception", "(", "'Method not recognized'", ")", "if", "selected_ion", "is", "None", ":", "raise", "Exception", "(", "\"For methods 'adjust', 'increase', 'decrease', and \"", "\"'makeup', an ion must be specified with the \"", "\"`selected_ion` parameter\"", ")", "anion_zs", ",", "cation_zs", ",", "z_water", "=", "ion_balance_adjust_wrapper", "(", "charges", ",", "zs", ",", "n_anions", ",", "n_cations", ",", "anions", ",", "cations", ",", "selected_ion", ",", "increase", "=", "increase", ")", "return", "anions", ",", "cations", ",", "anion_zs", ",", "cation_zs", ",", "z_water" ]
46.468182
23.295455
def run( self ): """ Interact with the blockchain peer, until we get a socket error or we exit the loop explicitly. The order of operations is: * send version * receive version * send verack * send getdata * receive blocks * for each block: * for each transaction with nulldata: * for each input: * get the transaction that produced the consumed input Return True on success Return False on error """ log.debug("Segwit support: {}".format(get_features('segwit'))) self.begin() try: self.loop() except socket.error, se: if not self.finished: # unexpected log.exception(se) return False # fetch remaining sender transactions try: self.fetch_sender_txs() except Exception, e: log.exception(e) return False # should be done now try: self.block_data_sanity_checks() except AssertionError, ae: log.exception(ae) return False return True
[ "def", "run", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Segwit support: {}\"", ".", "format", "(", "get_features", "(", "'segwit'", ")", ")", ")", "self", ".", "begin", "(", ")", "try", ":", "self", ".", "loop", "(", ")", "except", "socket", ".", "error", ",", "se", ":", "if", "not", "self", ".", "finished", ":", "# unexpected", "log", ".", "exception", "(", "se", ")", "return", "False", "# fetch remaining sender transactions", "try", ":", "self", ".", "fetch_sender_txs", "(", ")", "except", "Exception", ",", "e", ":", "log", ".", "exception", "(", "e", ")", "return", "False", "# should be done now", "try", ":", "self", ".", "block_data_sanity_checks", "(", ")", "except", "AssertionError", ",", "ae", ":", "log", ".", "exception", "(", "ae", ")", "return", "False", "return", "True" ]
24.625
17.416667
def get_tan_mechanisms(self): """ Get the available TAN mechanisms. Note: Only checks for HITANS versions listed in IMPLEMENTED_HKTAN_VERSIONS. :return: Dictionary of security_function: TwoStepParameters objects. """ retval = OrderedDict() for version in sorted(IMPLEMENTED_HKTAN_VERSIONS.keys()): for seg in self.bpd.find_segments('HITANS', version): for parameter in seg.parameter.twostep_parameters: if parameter.security_function in self.allowed_security_functions: retval[parameter.security_function] = parameter return retval
[ "def", "get_tan_mechanisms", "(", "self", ")", ":", "retval", "=", "OrderedDict", "(", ")", "for", "version", "in", "sorted", "(", "IMPLEMENTED_HKTAN_VERSIONS", ".", "keys", "(", ")", ")", ":", "for", "seg", "in", "self", ".", "bpd", ".", "find_segments", "(", "'HITANS'", ",", "version", ")", ":", "for", "parameter", "in", "seg", ".", "parameter", ".", "twostep_parameters", ":", "if", "parameter", ".", "security_function", "in", "self", ".", "allowed_security_functions", ":", "retval", "[", "parameter", ".", "security_function", "]", "=", "parameter", "return", "retval" ]
36.388889
26.277778
def keys_to_snake_case(camel_case_dict): """ Make a copy of a dictionary with all keys converted to snake case. This is just calls to_snake_case on each of the keys in the dictionary and returns a new dictionary. :param camel_case_dict: Dictionary with the keys to convert. :type camel_case_dict: Dictionary. :return: Dictionary with the keys converted to snake case. """ return dict((to_snake_case(key), value) for (key, value) in camel_case_dict.items())
[ "def", "keys_to_snake_case", "(", "camel_case_dict", ")", ":", "return", "dict", "(", "(", "to_snake_case", "(", "key", ")", ",", "value", ")", "for", "(", "key", ",", "value", ")", "in", "camel_case_dict", ".", "items", "(", ")", ")" ]
43.636364
24.545455
def start(self, *args): """ Start the daemon """ if os.path.exists(self.pidfile): msg = "pidfile exists. Exit.\n" sys.stderr.write(msg % self.pidfile) sys.exit(1) self.daemonize() self.run(*args)
[ "def", "start", "(", "self", ",", "*", "args", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "pidfile", ")", ":", "msg", "=", "\"pidfile exists. Exit.\\n\"", "sys", ".", "stderr", ".", "write", "(", "msg", "%", "self", ".", "pidfile", ")", "sys", ".", "exit", "(", "1", ")", "self", ".", "daemonize", "(", ")", "self", ".", "run", "(", "*", "args", ")" ]
22.583333
14.416667
def prepare(self, node): """ Initialise arguments effects as this analysis in inter-procedural. Initialisation done for Pythonic functions and default values set for user defined functions. """ super(ArgumentReadOnce, self).prepare(node) # global functions init for n in self.global_declarations.values(): fe = ArgumentReadOnce.FunctionEffects(n) self.node_to_functioneffect[n] = fe self.result.add(fe) # Pythonic functions init def save_effect(module): """ Recursively save read once effect for Pythonic functions. """ for intr in module.values(): if isinstance(intr, dict): # Submodule case save_effect(intr) else: fe = ArgumentReadOnce.FunctionEffects(intr) self.node_to_functioneffect[intr] = fe self.result.add(fe) if isinstance(intr, intrinsic.Class): # Class case save_effect(intr.fields) for module in MODULES.values(): save_effect(module)
[ "def", "prepare", "(", "self", ",", "node", ")", ":", "super", "(", "ArgumentReadOnce", ",", "self", ")", ".", "prepare", "(", "node", ")", "# global functions init", "for", "n", "in", "self", ".", "global_declarations", ".", "values", "(", ")", ":", "fe", "=", "ArgumentReadOnce", ".", "FunctionEffects", "(", "n", ")", "self", ".", "node_to_functioneffect", "[", "n", "]", "=", "fe", "self", ".", "result", ".", "add", "(", "fe", ")", "# Pythonic functions init", "def", "save_effect", "(", "module", ")", ":", "\"\"\" Recursively save read once effect for Pythonic functions. \"\"\"", "for", "intr", "in", "module", ".", "values", "(", ")", ":", "if", "isinstance", "(", "intr", ",", "dict", ")", ":", "# Submodule case", "save_effect", "(", "intr", ")", "else", ":", "fe", "=", "ArgumentReadOnce", ".", "FunctionEffects", "(", "intr", ")", "self", ".", "node_to_functioneffect", "[", "intr", "]", "=", "fe", "self", ".", "result", ".", "add", "(", "fe", ")", "if", "isinstance", "(", "intr", ",", "intrinsic", ".", "Class", ")", ":", "# Class case", "save_effect", "(", "intr", ".", "fields", ")", "for", "module", "in", "MODULES", ".", "values", "(", ")", ":", "save_effect", "(", "module", ")" ]
39.310345
14.586207
def f_load(self, name=None, index=None, as_new=False, load_parameters=pypetconstants.LOAD_DATA, load_derived_parameters=pypetconstants.LOAD_SKELETON, load_results=pypetconstants.LOAD_SKELETON, load_other_data=pypetconstants.LOAD_SKELETON, recursive=True, load_data=None, max_depth=None, force=False, dynamic_imports=None, with_run_information=True, with_meta_data=True, storage_service=None, **kwargs): """Loads a trajectory via the storage service. If you want to load individual results or parameters manually, you can take a look at :func:`~pypet.trajectory.Trajectory.f_load_items`. To only load subtrees check out :func:`~pypet.naturalnaming.NNGroupNode.f_load_child`. For `f_load` you can pass the following arguments: :param name: Name of the trajectory to be loaded. If no name or index is specified the current name of the trajectory is used. :param index: If you don't specify a name you can specify an integer index instead. The corresponding trajectory in the hdf5 file at the index position is loaded (counting starts with 0). Negative indices are also allowed counting in reverse order. For instance, `-1` refers to the last trajectory in the file, `-2` to the second last, and so on. :param as_new: Whether you want to rerun the experiments. So the trajectory is loaded only with parameters. The current trajectory name is kept in this case, which should be different from the trajectory name specified in the input parameter `name`. If you load `as_new=True` all parameters are unlocked. If you load `as_new=False` the current trajectory is replaced by the one on disk, i.e. name, timestamp, formatted time etc. are all taken from disk. :param load_parameters: How parameters and config items are loaded :param load_derived_parameters: How derived parameters are loaded :param load_results: How results are loaded You can specify how to load the parameters, derived parameters and results as follows: * :const:`pypet.pypetconstants.LOAD_NOTHING`: (0) Nothing is loaded. * :const:`pypet.pypetconstants.LOAD_SKELETON`: (1) The skeleton is loaded including annotations (See :ref:`more-on-annotations`). This means that only empty *parameter* and *result* objects will be created and you can manually load the data into them afterwards. Note that :class:`pypet.annotations.Annotations` do not count as data and they will be loaded because they are assumed to be small. * :const:`pypet.pypetconstants.LOAD_DATA`: (2) The whole data is loaded. Note in case you have non-empty leaves already in RAM, these are left untouched. * :const:`pypet.pypetconstants.OVERWRITE_DATA`: (3) As before, but non-empty nodes are emptied and reloaded. Note that in all cases except :const:`pypet.pypetconstants.LOAD_NOTHING`, annotations will be reloaded if the corresponding instance is created or the annotations of an existing instance were emptied before. :param recursive: If data should be loaded recursively. If set to `None`, this is equivalent to set all data loading to `:const:`pypet.pypetconstants.LOAD_NOTHING`. :param load_data: As the above, per default set to `None`. If not `None` the setting of `load_data` will overwrite the settings of `load_parameters`, `load_derived_parameters`, `load_results`, and `load_other_data`. This is more or less or shortcut if all types should be loaded the same. :param max_depth: Maximum depth to load nodes (inclusive). :param force: *pypet* will refuse to load trajectories that have been created using *pypet* with a different version number or a different python version. To force the load of a trajectory from a previous version simply set ``force = True``. Note that it is not checked if other versions of packages differ from previous experiments, i.e. numpy, scipy, etc. But you can check for this manually. The versions of other packages can be found under ``'config.environment.name_of_environment.versions.package_name'``. :param dynamic_imports: If you've written a custom parameter that needs to be loaded dynamically during runtime, this needs to be specified here as a list of classes or strings naming classes and there module paths. For example: `dynamic_imports = ['pypet.parameter.PickleParameter',MyCustomParameter]` If you only have a single class to import, you do not need the list brackets: `dynamic_imports = 'pypet.parameter.PickleParameter'` The classes passed here are added for good and will be kept by the trajectory. Please add your dynamically imported classes only once. :param with_run_information: If information about the individual runs should be loaded. If you have many runs, like 1,000,000 or more you can spare time by setting `with_run_information=False`. Note that `f_get_run_information` and `f_idx_to_run` do not work in such a case. Moreover, setting `v_idx` does not work either. If you load the trajectory without this information, be careful, this is not recommended. :param wiht_meta_data: If meta data should be loaded. :param storage_service: Pass a storage service used by the trajectory. Alternatively pass a constructor and other ``**kwargs`` are passed onto the constructor. Leave `None` in combination with using no other kwargs, if you don't want to change the service the trajectory is currently using. :param kwargs: Other arguments passed to the storage service constructor. Don't pass any other kwargs and ``storage_service=None``, if you don't want to change the current service. """ # Do some argument validity checks first if name is None and index is None: name = self.v_name if as_new: load_parameters = pypetconstants.LOAD_DATA load_derived_parameters = pypetconstants.LOAD_NOTHING load_results = pypetconstants.LOAD_NOTHING load_other_data = pypetconstants.LOAD_NOTHING unused_kwargs = set(kwargs.keys()) if self.v_storage_service is None or storage_service is not None or len(kwargs) > 0: self._storage_service, unused_kwargs = storage_factory(storage_service=storage_service, trajectory=self, **kwargs) if len(unused_kwargs) > 0: raise ValueError('The following keyword arguments were not used: `%s`' % str(unused_kwargs)) if dynamic_imports is not None: self.f_add_to_dynamic_imports(dynamic_imports) if load_data is not None: load_parameters = load_data load_derived_parameters = load_data load_results = load_data load_other_data = load_data self._storage_service.load(pypetconstants.TRAJECTORY, self, trajectory_name=name, trajectory_index=index, as_new=as_new, load_parameters=load_parameters, load_derived_parameters=load_derived_parameters, load_results=load_results, load_other_data=load_other_data, recursive=recursive, max_depth=max_depth, with_run_information=with_run_information, with_meta_data=with_meta_data, force=force) # If a trajectory is newly loaded, all parameters are unlocked. if as_new: for param in self._parameters.values(): param.f_unlock()
[ "def", "f_load", "(", "self", ",", "name", "=", "None", ",", "index", "=", "None", ",", "as_new", "=", "False", ",", "load_parameters", "=", "pypetconstants", ".", "LOAD_DATA", ",", "load_derived_parameters", "=", "pypetconstants", ".", "LOAD_SKELETON", ",", "load_results", "=", "pypetconstants", ".", "LOAD_SKELETON", ",", "load_other_data", "=", "pypetconstants", ".", "LOAD_SKELETON", ",", "recursive", "=", "True", ",", "load_data", "=", "None", ",", "max_depth", "=", "None", ",", "force", "=", "False", ",", "dynamic_imports", "=", "None", ",", "with_run_information", "=", "True", ",", "with_meta_data", "=", "True", ",", "storage_service", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Do some argument validity checks first", "if", "name", "is", "None", "and", "index", "is", "None", ":", "name", "=", "self", ".", "v_name", "if", "as_new", ":", "load_parameters", "=", "pypetconstants", ".", "LOAD_DATA", "load_derived_parameters", "=", "pypetconstants", ".", "LOAD_NOTHING", "load_results", "=", "pypetconstants", ".", "LOAD_NOTHING", "load_other_data", "=", "pypetconstants", ".", "LOAD_NOTHING", "unused_kwargs", "=", "set", "(", "kwargs", ".", "keys", "(", ")", ")", "if", "self", ".", "v_storage_service", "is", "None", "or", "storage_service", "is", "not", "None", "or", "len", "(", "kwargs", ")", ">", "0", ":", "self", ".", "_storage_service", ",", "unused_kwargs", "=", "storage_factory", "(", "storage_service", "=", "storage_service", ",", "trajectory", "=", "self", ",", "*", "*", "kwargs", ")", "if", "len", "(", "unused_kwargs", ")", ">", "0", ":", "raise", "ValueError", "(", "'The following keyword arguments were not used: `%s`'", "%", "str", "(", "unused_kwargs", ")", ")", "if", "dynamic_imports", "is", "not", "None", ":", "self", ".", "f_add_to_dynamic_imports", "(", "dynamic_imports", ")", "if", "load_data", "is", "not", "None", ":", "load_parameters", "=", "load_data", "load_derived_parameters", "=", "load_data", "load_results", "=", "load_data", "load_other_data", "=", "load_data", "self", ".", "_storage_service", ".", "load", "(", "pypetconstants", ".", "TRAJECTORY", ",", "self", ",", "trajectory_name", "=", "name", ",", "trajectory_index", "=", "index", ",", "as_new", "=", "as_new", ",", "load_parameters", "=", "load_parameters", ",", "load_derived_parameters", "=", "load_derived_parameters", ",", "load_results", "=", "load_results", ",", "load_other_data", "=", "load_other_data", ",", "recursive", "=", "recursive", ",", "max_depth", "=", "max_depth", ",", "with_run_information", "=", "with_run_information", ",", "with_meta_data", "=", "with_meta_data", ",", "force", "=", "force", ")", "# If a trajectory is newly loaded, all parameters are unlocked.", "if", "as_new", ":", "for", "param", "in", "self", ".", "_parameters", ".", "values", "(", ")", ":", "param", ".", "f_unlock", "(", ")" ]
45.797872
30.601064
async def get_supported_playback_functions( self, uri="" ) -> List[SupportedFunctions]: """Return list of inputs and their supported functions.""" return [ SupportedFunctions.make(**x) for x in await self.services["avContent"]["getSupportedPlaybackFunction"]( uri=uri ) ]
[ "async", "def", "get_supported_playback_functions", "(", "self", ",", "uri", "=", "\"\"", ")", "->", "List", "[", "SupportedFunctions", "]", ":", "return", "[", "SupportedFunctions", ".", "make", "(", "*", "*", "x", ")", "for", "x", "in", "await", "self", ".", "services", "[", "\"avContent\"", "]", "[", "\"getSupportedPlaybackFunction\"", "]", "(", "uri", "=", "uri", ")", "]" ]
35
17.4
def create(self, handle=None, handle_type=None, **args): """ Creates an ontology based on a handle Handle is one of the following - `FILENAME.json` : creates an ontology from an obographs json file - `obo:ONTID` : E.g. obo:pato - creates an ontology from obolibrary PURL (requires owltools) - `ONTID` : E.g. 'pato' - creates an ontology from a remote SPARQL query Arguments --------- handle : str specifies how to retrieve the ontology info """ if handle is None: self.test = self.test+1 logging.info("T: "+str(self.test)) global default_ontology if default_ontology is None: logging.info("Creating new instance of default ontology") default_ontology = create_ontology(default_ontology_handle) logging.info("Using default_ontology") return default_ontology return create_ontology(handle, **args)
[ "def", "create", "(", "self", ",", "handle", "=", "None", ",", "handle_type", "=", "None", ",", "*", "*", "args", ")", ":", "if", "handle", "is", "None", ":", "self", ".", "test", "=", "self", ".", "test", "+", "1", "logging", ".", "info", "(", "\"T: \"", "+", "str", "(", "self", ".", "test", ")", ")", "global", "default_ontology", "if", "default_ontology", "is", "None", ":", "logging", ".", "info", "(", "\"Creating new instance of default ontology\"", ")", "default_ontology", "=", "create_ontology", "(", "default_ontology_handle", ")", "logging", ".", "info", "(", "\"Using default_ontology\"", ")", "return", "default_ontology", "return", "create_ontology", "(", "handle", ",", "*", "*", "args", ")" ]
38.384615
20.384615
def subscribe_to_quorum_channel(self): """In case the experiment enforces a quorum, listen for notifications before creating Partipant objects. """ from dallinger.experiment_server.sockets import chat_backend self.log("Bot subscribing to quorum channel.") chat_backend.subscribe(self, "quorum")
[ "def", "subscribe_to_quorum_channel", "(", "self", ")", ":", "from", "dallinger", ".", "experiment_server", ".", "sockets", "import", "chat_backend", "self", ".", "log", "(", "\"Bot subscribing to quorum channel.\"", ")", "chat_backend", ".", "subscribe", "(", "self", ",", "\"quorum\"", ")" ]
42
11.5
def _build_dictionary(self, results): """ Build model dictionary keyed by the relation's foreign key. :param results: The results :type results: Collection :rtype: dict """ foreign = self._foreign_key dictionary = {} for result in results: key = getattr(result.pivot, foreign) if key not in dictionary: dictionary[key] = [] dictionary[key].append(result) return dictionary
[ "def", "_build_dictionary", "(", "self", ",", "results", ")", ":", "foreign", "=", "self", ".", "_foreign_key", "dictionary", "=", "{", "}", "for", "result", "in", "results", ":", "key", "=", "getattr", "(", "result", ".", "pivot", ",", "foreign", ")", "if", "key", "not", "in", "dictionary", ":", "dictionary", "[", "key", "]", "=", "[", "]", "dictionary", "[", "key", "]", ".", "append", "(", "result", ")", "return", "dictionary" ]
23.333333
17.428571
def WheelUp(wheelTimes: int = 1, interval: float = 0.05, waitTime: float = OPERATION_WAIT_TIME) -> None: """ Simulate mouse wheel up. wheelTimes: int. interval: float. waitTime: float. """ for i in range(wheelTimes): mouse_event(MouseEventFlag.Wheel, 0, 0, 120, 0) #WHEEL_DELTA=120 time.sleep(interval) time.sleep(waitTime)
[ "def", "WheelUp", "(", "wheelTimes", ":", "int", "=", "1", ",", "interval", ":", "float", "=", "0.05", ",", "waitTime", ":", "float", "=", "OPERATION_WAIT_TIME", ")", "->", "None", ":", "for", "i", "in", "range", "(", "wheelTimes", ")", ":", "mouse_event", "(", "MouseEventFlag", ".", "Wheel", ",", "0", ",", "0", ",", "120", ",", "0", ")", "#WHEEL_DELTA=120", "time", ".", "sleep", "(", "interval", ")", "time", ".", "sleep", "(", "waitTime", ")" ]
32.818182
18.636364
def read_queue(self): """ This is responsible for reading events off the queue, and sending notifications to users, via email or SMS. The following are necessary for AWS access: RESTCLIENTS_AMAZON_AWS_ACCESS_KEY RESTCLIENTS_AMAZON_AWS_SECRET_KEY RESTCLIENTS_AMAZON_QUEUE - the AWS Queue name you want to read events from RESTCLIENTS_AMAZON_SQS_DAO_CLASS """ queue = AmazonSQS().create_queue(settings.RESTCLIENTS_AMAZON_QUEUE) queue.set_message_class(RawMessage) message = queue.read() if message is None: return body = message.get_body() queue.delete_message(message) return body
[ "def", "read_queue", "(", "self", ")", ":", "queue", "=", "AmazonSQS", "(", ")", ".", "create_queue", "(", "settings", ".", "RESTCLIENTS_AMAZON_QUEUE", ")", "queue", ".", "set_message_class", "(", "RawMessage", ")", "message", "=", "queue", ".", "read", "(", ")", "if", "message", "is", "None", ":", "return", "body", "=", "message", ".", "get_body", "(", ")", "queue", ".", "delete_message", "(", "message", ")", "return", "body" ]
35.45
14.65
def get_converted(self, symbol, units='CAD', system=None, tag=None): """ Uses a Symbol's Dataframe, to build a new Dataframe, with the data converted to the new units Parameters ---------- symbol : str or tuple of the form (Dataframe, str) String representing a symbol's name, or a dataframe with the data required to be converted. If supplying a dataframe, units must be passed. units : str, optional Specify the units to convert the symbol to, default to CAD system : str, optional If None, the default system specified at instantiation is used. System defines which conversion approach to take. tag : str, optional Tags define which set of conversion data is used. If None, the default tag specified at instantiation is used. """ if isinstance(symbol, (str, unicode)): sym = self.get(symbol) df = sym.df curu = sym.units requ = units elif isinstance(symbol, tuple): df = symbol[0] curu = symbol[1] requ = units else: raise TypeError("Expected str or (DataFrame, str), found {}".format(type(symbol))) system = system or self.default_system tag = tag or self.default_tag conv = self.converters[system][tag] newdf = conv.convert(df, curu, requ) newdf = pd.merge(df, newdf, left_index=True, right_index=True) newdf = newdf[df.columns[0] + "_y"].to_frame() newdf.columns = df.columns return newdf
[ "def", "get_converted", "(", "self", ",", "symbol", ",", "units", "=", "'CAD'", ",", "system", "=", "None", ",", "tag", "=", "None", ")", ":", "if", "isinstance", "(", "symbol", ",", "(", "str", ",", "unicode", ")", ")", ":", "sym", "=", "self", ".", "get", "(", "symbol", ")", "df", "=", "sym", ".", "df", "curu", "=", "sym", ".", "units", "requ", "=", "units", "elif", "isinstance", "(", "symbol", ",", "tuple", ")", ":", "df", "=", "symbol", "[", "0", "]", "curu", "=", "symbol", "[", "1", "]", "requ", "=", "units", "else", ":", "raise", "TypeError", "(", "\"Expected str or (DataFrame, str), found {}\"", ".", "format", "(", "type", "(", "symbol", ")", ")", ")", "system", "=", "system", "or", "self", ".", "default_system", "tag", "=", "tag", "or", "self", ".", "default_tag", "conv", "=", "self", ".", "converters", "[", "system", "]", "[", "tag", "]", "newdf", "=", "conv", ".", "convert", "(", "df", ",", "curu", ",", "requ", ")", "newdf", "=", "pd", ".", "merge", "(", "df", ",", "newdf", ",", "left_index", "=", "True", ",", "right_index", "=", "True", ")", "newdf", "=", "newdf", "[", "df", ".", "columns", "[", "0", "]", "+", "\"_y\"", "]", ".", "to_frame", "(", ")", "newdf", ".", "columns", "=", "df", ".", "columns", "return", "newdf" ]
40.214286
17.928571
def register(workflow_id, workflow_version): """Register an (empty) workflow definition in the database.""" name = "workflow_definitions:{}:{}".format(workflow_id, workflow_version) workflow_definition = dict(id=workflow_id, version=workflow_version, stages=[]) # DB.set_hash_values(name, workflow_definition) DB.save_dict(name, workflow_definition, hierarchical=False)
[ "def", "register", "(", "workflow_id", ",", "workflow_version", ")", ":", "name", "=", "\"workflow_definitions:{}:{}\"", ".", "format", "(", "workflow_id", ",", "workflow_version", ")", "workflow_definition", "=", "dict", "(", "id", "=", "workflow_id", ",", "version", "=", "workflow_version", ",", "stages", "=", "[", "]", ")", "# DB.set_hash_values(name, workflow_definition)", "DB", ".", "save_dict", "(", "name", ",", "workflow_definition", ",", "hierarchical", "=", "False", ")" ]
59.142857
15.428571
def remove_all_listeners(self, event=None): """Remove all functions for all events, or one event if one is specifed. :param event: Optional event you wish to remove all functions from """ if event is not None: self._registered_events[event] = OrderedDict() else: self._registered_events = defaultdict(OrderedDict)
[ "def", "remove_all_listeners", "(", "self", ",", "event", "=", "None", ")", ":", "if", "event", "is", "not", "None", ":", "self", ".", "_registered_events", "[", "event", "]", "=", "OrderedDict", "(", ")", "else", ":", "self", ".", "_registered_events", "=", "defaultdict", "(", "OrderedDict", ")" ]
41.111111
17.222222
def convertSequenceMachineSequence(generatedSequences): """ Convert a sequence from the SequenceMachine into a list of sequences, such that each sequence is a list of set of SDRs. """ sequenceList = [] currentSequence = [] for s in generatedSequences: if s is None: sequenceList.append(currentSequence) currentSequence = [] else: currentSequence.append(s) return sequenceList
[ "def", "convertSequenceMachineSequence", "(", "generatedSequences", ")", ":", "sequenceList", "=", "[", "]", "currentSequence", "=", "[", "]", "for", "s", "in", "generatedSequences", ":", "if", "s", "is", "None", ":", "sequenceList", ".", "append", "(", "currentSequence", ")", "currentSequence", "=", "[", "]", "else", ":", "currentSequence", ".", "append", "(", "s", ")", "return", "sequenceList" ]
26.933333
16.266667
def cb_histogram(fastq, umi_histogram): ''' Counts the number of reads for each cellular barcode Expects formatted fastq files. ''' annotations = detect_fastq_annotations(fastq) re_string = construct_transformed_regex(annotations) parser_re = re.compile(re_string) cb_counter = collections.Counter() umi_counter = collections.Counter() for read in read_fastq(fastq): match = parser_re.search(read).groupdict() cb = match['CB'] cb_counter[cb] += 1 if umi_histogram: umi = match['MB'] umi_counter[(cb, umi)] += 1 for bc, count in cb_counter.most_common(): sys.stdout.write('{}\t{}\n'.format(bc, count)) if umi_histogram: with open(umi_histogram, "w") as umi_handle: for cbumi, count in umi_counter.most_common(): umi_handle.write('{}\t{}\t{}\n'.format(cbumi[0], cbumi[1], count))
[ "def", "cb_histogram", "(", "fastq", ",", "umi_histogram", ")", ":", "annotations", "=", "detect_fastq_annotations", "(", "fastq", ")", "re_string", "=", "construct_transformed_regex", "(", "annotations", ")", "parser_re", "=", "re", ".", "compile", "(", "re_string", ")", "cb_counter", "=", "collections", ".", "Counter", "(", ")", "umi_counter", "=", "collections", ".", "Counter", "(", ")", "for", "read", "in", "read_fastq", "(", "fastq", ")", ":", "match", "=", "parser_re", ".", "search", "(", "read", ")", ".", "groupdict", "(", ")", "cb", "=", "match", "[", "'CB'", "]", "cb_counter", "[", "cb", "]", "+=", "1", "if", "umi_histogram", ":", "umi", "=", "match", "[", "'MB'", "]", "umi_counter", "[", "(", "cb", ",", "umi", ")", "]", "+=", "1", "for", "bc", ",", "count", "in", "cb_counter", ".", "most_common", "(", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'{}\\t{}\\n'", ".", "format", "(", "bc", ",", "count", ")", ")", "if", "umi_histogram", ":", "with", "open", "(", "umi_histogram", ",", "\"w\"", ")", "as", "umi_handle", ":", "for", "cbumi", ",", "count", "in", "umi_counter", ".", "most_common", "(", ")", ":", "umi_handle", ".", "write", "(", "'{}\\t{}\\t{}\\n'", ".", "format", "(", "cbumi", "[", "0", "]", ",", "cbumi", "[", "1", "]", ",", "count", ")", ")" ]
33.333333
17.555556
def runSharedFeatures(noiseLevel=None, profile=False): """ Runs a simple experiment where three objects share a number of location, feature pairs. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "shared_features", enableLateralSP=True, enableFeedForwardSP=True ) pairs = createThreeObjects() objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024 ) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "numSteps": 10, "noiseLevel": noiseLevel, "pairs": { 0: zip(range(10), range(10)) } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], )
[ "def", "runSharedFeatures", "(", "noiseLevel", "=", "None", ",", "profile", "=", "False", ")", ":", "exp", "=", "L4L2Experiment", "(", "\"shared_features\"", ",", "enableLateralSP", "=", "True", ",", "enableFeedForwardSP", "=", "True", ")", "pairs", "=", "createThreeObjects", "(", ")", "objects", "=", "createObjectMachine", "(", "machineType", "=", "\"simple\"", ",", "numInputBits", "=", "20", ",", "sensorInputSize", "=", "1024", ",", "externalInputSize", "=", "1024", ")", "for", "object", "in", "pairs", ":", "objects", ".", "addObject", "(", "object", ")", "exp", ".", "learnObjects", "(", "objects", ".", "provideObjectsToLearn", "(", ")", ")", "if", "profile", ":", "exp", ".", "printProfile", "(", ")", "inferConfig", "=", "{", "\"numSteps\"", ":", "10", ",", "\"noiseLevel\"", ":", "noiseLevel", ",", "\"pairs\"", ":", "{", "0", ":", "zip", "(", "range", "(", "10", ")", ",", "range", "(", "10", ")", ")", "}", "}", "exp", ".", "infer", "(", "objects", ".", "provideObjectToInfer", "(", "inferConfig", ")", ",", "objectName", "=", "0", ")", "if", "profile", ":", "exp", ".", "printProfile", "(", ")", "exp", ".", "plotInferenceStats", "(", "fields", "=", "[", "\"L2 Representation\"", ",", "\"Overlap L2 with object\"", ",", "\"L4 Representation\"", "]", ",", ")" ]
22.764706
22.215686
def indentsize(line): """Return the indent size, in spaces, at the start of a line of text.""" expline = string.expandtabs(line) return len(expline) - len(string.lstrip(expline))
[ "def", "indentsize", "(", "line", ")", ":", "expline", "=", "string", ".", "expandtabs", "(", "line", ")", "return", "len", "(", "expline", ")", "-", "len", "(", "string", ".", "lstrip", "(", "expline", ")", ")" ]
46.75
8.75
def uninstall(self): ''' Uninstall the module finder. If not installed, this will do nothing. After uninstallation, none of the newly loaded modules will be decorated (that is, everything will be back to normal). ''' if self.installed: sys.meta_path.remove(self) # Reload all decorated items import_list = [] for name in self.__loaded_modules: del sys.modules[name] import_list.append(name) for name in import_list: __import__(name) self.__reset()
[ "def", "uninstall", "(", "self", ")", ":", "if", "self", ".", "installed", ":", "sys", ".", "meta_path", ".", "remove", "(", "self", ")", "# Reload all decorated items", "import_list", "=", "[", "]", "for", "name", "in", "self", ".", "__loaded_modules", ":", "del", "sys", ".", "modules", "[", "name", "]", "import_list", ".", "append", "(", "name", ")", "for", "name", "in", "import_list", ":", "__import__", "(", "name", ")", "self", ".", "__reset", "(", ")" ]
28.4
20.7
def dummyListOfDicts(size=100): """ returns a list (of the given size) of dicts with fake data. some dictionary keys are missing for some of the items. """ titles="ahp,halfwidth,peak,expT,expI,sweep".split(",") ld=[] #list of dicts for i in range(size): d={} for t in titles: if int(np.random.random(1)*100)>5: #5% of values are missing d[t]=float(np.random.random(1)*100) #random number 0-100 if t=="sweep" and "sweep" in d.keys(): d[t]=int(d[t]) ld.append(d) return ld
[ "def", "dummyListOfDicts", "(", "size", "=", "100", ")", ":", "titles", "=", "\"ahp,halfwidth,peak,expT,expI,sweep\"", ".", "split", "(", "\",\"", ")", "ld", "=", "[", "]", "#list of dicts", "for", "i", "in", "range", "(", "size", ")", ":", "d", "=", "{", "}", "for", "t", "in", "titles", ":", "if", "int", "(", "np", ".", "random", ".", "random", "(", "1", ")", "*", "100", ")", ">", "5", ":", "#5% of values are missing", "d", "[", "t", "]", "=", "float", "(", "np", ".", "random", ".", "random", "(", "1", ")", "*", "100", ")", "#random number 0-100", "if", "t", "==", "\"sweep\"", "and", "\"sweep\"", "in", "d", ".", "keys", "(", ")", ":", "d", "[", "t", "]", "=", "int", "(", "d", "[", "t", "]", ")", "ld", ".", "append", "(", "d", ")", "return", "ld" ]
35.4375
17.1875
def delete(self, value, key): """ Delete a value from a tree. """ # Base case: The empty tree cannot possibly have the desired value. if self is NULL: raise KeyError(value) direction = cmp(key(value), key(self.value)) # Because we lean to the left, the left case stands alone. if direction < 0: if (not self.left.red and self.left is not NULL and not self.left.left.red): self = self.move_red_left() # Delete towards the left. left = self.left.delete(value, key) self = self._replace(left=left) else: # If we currently lean to the left, lean to the right for now. if self.left.red: self = self.rotate_right() # Best case: The node on our right (which we just rotated there) is a # red link and also we were just holding the node to delete. In that # case, we just rotated NULL into our current node, and the node to # the right is the lone matching node to delete. if direction == 0 and self.right is NULL: return NULL # No? Okay. Move more reds to the right so that we can continue to # traverse in that direction. At *this* spot, we do have to confirm # that node.right is not NULL... if (not self.right.red and self.right is not NULL and not self.right.left.red): self = self.move_red_right() if direction > 0: # Delete towards the right. right = self.right.delete(value, key) self = self._replace(right=right) else: # Annoying case: The current node was the node to delete all # along! Use a right-handed minimum deletion. First find the # replacement value to rebuild the current node with, then delete # the replacement value from the right-side tree. Finally, create # the new node with the old value replaced and the replaced value # deleted. rnode = self.right while rnode is not NULL: rnode = rnode.left right, replacement = self.right.delete_min() self = self._replace(value=replacement, right=right) return self.balance()
[ "def", "delete", "(", "self", ",", "value", ",", "key", ")", ":", "# Base case: The empty tree cannot possibly have the desired value.", "if", "self", "is", "NULL", ":", "raise", "KeyError", "(", "value", ")", "direction", "=", "cmp", "(", "key", "(", "value", ")", ",", "key", "(", "self", ".", "value", ")", ")", "# Because we lean to the left, the left case stands alone.", "if", "direction", "<", "0", ":", "if", "(", "not", "self", ".", "left", ".", "red", "and", "self", ".", "left", "is", "not", "NULL", "and", "not", "self", ".", "left", ".", "left", ".", "red", ")", ":", "self", "=", "self", ".", "move_red_left", "(", ")", "# Delete towards the left.", "left", "=", "self", ".", "left", ".", "delete", "(", "value", ",", "key", ")", "self", "=", "self", ".", "_replace", "(", "left", "=", "left", ")", "else", ":", "# If we currently lean to the left, lean to the right for now.", "if", "self", ".", "left", ".", "red", ":", "self", "=", "self", ".", "rotate_right", "(", ")", "# Best case: The node on our right (which we just rotated there) is a", "# red link and also we were just holding the node to delete. In that", "# case, we just rotated NULL into our current node, and the node to", "# the right is the lone matching node to delete.", "if", "direction", "==", "0", "and", "self", ".", "right", "is", "NULL", ":", "return", "NULL", "# No? Okay. Move more reds to the right so that we can continue to", "# traverse in that direction. At *this* spot, we do have to confirm", "# that node.right is not NULL...", "if", "(", "not", "self", ".", "right", ".", "red", "and", "self", ".", "right", "is", "not", "NULL", "and", "not", "self", ".", "right", ".", "left", ".", "red", ")", ":", "self", "=", "self", ".", "move_red_right", "(", ")", "if", "direction", ">", "0", ":", "# Delete towards the right.", "right", "=", "self", ".", "right", ".", "delete", "(", "value", ",", "key", ")", "self", "=", "self", ".", "_replace", "(", "right", "=", "right", ")", "else", ":", "# Annoying case: The current node was the node to delete all", "# along! Use a right-handed minimum deletion. First find the", "# replacement value to rebuild the current node with, then delete", "# the replacement value from the right-side tree. Finally, create", "# the new node with the old value replaced and the replaced value", "# deleted.", "rnode", "=", "self", ".", "right", "while", "rnode", "is", "not", "NULL", ":", "rnode", "=", "rnode", ".", "left", "right", ",", "replacement", "=", "self", ".", "right", ".", "delete_min", "(", ")", "self", "=", "self", ".", "_replace", "(", "value", "=", "replacement", ",", "right", "=", "right", ")", "return", "self", ".", "balance", "(", ")" ]
41.271186
19.169492
def print_prediction (self, ptup, precision=2): """Print a summary of a predicted position. The argument *ptup* is a tuple returned by :meth:`predict`. It is printed to :data:`sys.stdout` in a reasonable format that uses Unicode characters. """ from . import ellipses bestra, bestdec, maj, min, pa = ptup f = ellipses.sigmascale (1) maj *= R2A min *= R2A pa *= R2D print_ ('position =', fmtradec (bestra, bestdec, precision=precision)) print_ ('err(1σ) = %.*f" × %.*f" @ %.0f°' % (precision, maj * f, precision, min * f, pa))
[ "def", "print_prediction", "(", "self", ",", "ptup", ",", "precision", "=", "2", ")", ":", "from", ".", "import", "ellipses", "bestra", ",", "bestdec", ",", "maj", ",", "min", ",", "pa", "=", "ptup", "f", "=", "ellipses", ".", "sigmascale", "(", "1", ")", "maj", "*=", "R2A", "min", "*=", "R2A", "pa", "*=", "R2D", "print_", "(", "'position ='", ",", "fmtradec", "(", "bestra", ",", "bestdec", ",", "precision", "=", "precision", ")", ")", "print_", "(", "'err(1σ) = %.*f\" × %.*f\" @ %.0f°' % ", "p", "e", "cision, m", "a", " * ", ",", "p", "r", "cision,", "", "min", "*", "f", ",", "pa", ")", ")" ]
35.263158
23.894737
def remove_selected_classification(self): """Remove selected item on hazard class form.""" removed_classes = self.hazard_class_form.selectedItems() current_item = self.hazard_class_form.currentItem() removed_index = self.hazard_class_form.indexFromItem(current_item) del self.classification[removed_index.row()] for item in removed_classes: self.hazard_class_form.takeItem( self.hazard_class_form.row(item))
[ "def", "remove_selected_classification", "(", "self", ")", ":", "removed_classes", "=", "self", ".", "hazard_class_form", ".", "selectedItems", "(", ")", "current_item", "=", "self", ".", "hazard_class_form", ".", "currentItem", "(", ")", "removed_index", "=", "self", ".", "hazard_class_form", ".", "indexFromItem", "(", "current_item", ")", "del", "self", ".", "classification", "[", "removed_index", ".", "row", "(", ")", "]", "for", "item", "in", "removed_classes", ":", "self", ".", "hazard_class_form", ".", "takeItem", "(", "self", ".", "hazard_class_form", ".", "row", "(", "item", ")", ")" ]
52.777778
11.888889
def getScriptLanguageSystems(feaFile): """Return dictionary keyed by Unicode script code containing lists of (OT_SCRIPT_TAG, [OT_LANGUAGE_TAG, ...]) tuples (excluding "DFLT"). """ languagesByScript = collections.OrderedDict() for ls in [ st for st in feaFile.statements if isinstance(st, ast.LanguageSystemStatement) ]: if ls.script == "DFLT": continue languagesByScript.setdefault(ls.script, []).append(ls.language) langSysMap = collections.OrderedDict() for script, languages in languagesByScript.items(): sc = unicodedata.ot_tag_to_script(script) langSysMap.setdefault(sc, []).append((script, languages)) return langSysMap
[ "def", "getScriptLanguageSystems", "(", "feaFile", ")", ":", "languagesByScript", "=", "collections", ".", "OrderedDict", "(", ")", "for", "ls", "in", "[", "st", "for", "st", "in", "feaFile", ".", "statements", "if", "isinstance", "(", "st", ",", "ast", ".", "LanguageSystemStatement", ")", "]", ":", "if", "ls", ".", "script", "==", "\"DFLT\"", ":", "continue", "languagesByScript", ".", "setdefault", "(", "ls", ".", "script", ",", "[", "]", ")", ".", "append", "(", "ls", ".", "language", ")", "langSysMap", "=", "collections", ".", "OrderedDict", "(", ")", "for", "script", ",", "languages", "in", "languagesByScript", ".", "items", "(", ")", ":", "sc", "=", "unicodedata", ".", "ot_tag_to_script", "(", "script", ")", "langSysMap", ".", "setdefault", "(", "sc", ",", "[", "]", ")", ".", "append", "(", "(", "script", ",", "languages", ")", ")", "return", "langSysMap" ]
37.473684
16.736842
def get(cls, name, raise_exc=True): """ Get the element by name. Does an exact match by element type. :param str name: name of element :param bool raise_exc: optionally disable exception. :raises ElementNotFound: if element does not exist :rtype: Element """ element = cls.objects.filter(name, exact_match=True).first() if \ name is not None else None if not element and raise_exc: raise ElementNotFound('Cannot find specified element: %s, type: ' '%s' % (name, cls.__name__)) return element
[ "def", "get", "(", "cls", ",", "name", ",", "raise_exc", "=", "True", ")", ":", "element", "=", "cls", ".", "objects", ".", "filter", "(", "name", ",", "exact_match", "=", "True", ")", ".", "first", "(", ")", "if", "name", "is", "not", "None", "else", "None", "if", "not", "element", "and", "raise_exc", ":", "raise", "ElementNotFound", "(", "'Cannot find specified element: %s, type: '", "'%s'", "%", "(", "name", ",", "cls", ".", "__name__", ")", ")", "return", "element" ]
40.466667
14.6
def _dispatch_cmd(self, handler, argv): """Introspect sub-command handler signature to determine how to dispatch the command. The raw handler provided by the base 'RawCmdln' class is still supported: def do_foo(self, argv): # 'argv' is the vector of command line args, argv[0] is # the command name itself (i.e. "foo" or an alias) pass In addition, if the handler has more than 2 arguments option processing is automatically done (using optparse): @cmdln.option('-v', '--verbose', action='store_true') def do_bar(self, subcmd, opts, *args): # subcmd = <"bar" or an alias> # opts = <an optparse.Values instance> if opts.verbose: print "lots of debugging output..." # args = <tuple of arguments> for arg in args: bar(arg) TODO: explain that "*args" can be other signatures as well. The `cmdln.option` decorator corresponds to an `add_option()` method call on an `optparse.OptionParser` instance. You can declare a specific number of arguments: @cmdln.option('-v', '--verbose', action='store_true') def do_bar2(self, subcmd, opts, bar_one, bar_two): #... and an appropriate error message will be raised/printed if the command is called with a different number of args. """ co_argcount = handler.__func__.__code__.co_argcount if co_argcount == 2: # handler ::= do_foo(self, argv) return handler(argv) elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...) try: optparser = handler.optparser except AttributeError: optparser = handler.__func__.optparser = SubCmdOptionParser() assert isinstance(optparser, SubCmdOptionParser) # apply subcommand options' defaults from config files, if any. subcmd = handler.__name__.split('do_', 1)[1] optparser.set_defaults(**self.get_option_defaults(subcmd)) optparser.set_cmdln_info(self, argv[0]) try: opts, args = optparser.parse_args(argv[1:]) except StopOptionProcessing: #TODO: this doesn't really fly for a replacement of # optparse.py behaviour, does it? return 0 # Normal command termination try: return handler(argv[0], opts, *args) except TypeError: _, ex, _ = sys.exc_info() # Some TypeError's are user errors: # do_foo() takes at least 4 arguments (3 given) # do_foo() takes at most 5 arguments (6 given) # do_foo() takes exactly 5 arguments (6 given) # do_foo() takes exactly 5 positional arguments (6 given) # Raise CmdlnUserError for these with a suitably # massaged error message. tb = sys.exc_info()[2] # the traceback object if tb.tb_next is not None: # If the traceback is more than one level deep, then the # TypeError do *not* happen on the "handler(...)" call # above. In that we don't want to handle it specially # here: it would falsely mask deeper code errors. raise msg = ex.args[0] match = _INCORRECT_NUM_ARGS_RE.search(msg) if match: msg = list(match.groups()) msg[1] = int(msg[1]) - 3 if msg[1] == 1: msg[2] = msg[2].replace("arguments", "argument") msg[3] = int(msg[3]) - 3 msg = ''.join(map(str, msg)) raise CmdlnUserError(msg) else: raise else: raise CmdlnError("incorrect argcount for %s(): takes %d, must " "take 2 for 'argv' signature or 3+ for 'opts' " "signature" % (handler.__name__, co_argcount))
[ "def", "_dispatch_cmd", "(", "self", ",", "handler", ",", "argv", ")", ":", "co_argcount", "=", "handler", ".", "__func__", ".", "__code__", ".", "co_argcount", "if", "co_argcount", "==", "2", ":", "# handler ::= do_foo(self, argv)", "return", "handler", "(", "argv", ")", "elif", "co_argcount", ">=", "3", ":", "# handler ::= do_foo(self, subcmd, opts, ...)", "try", ":", "optparser", "=", "handler", ".", "optparser", "except", "AttributeError", ":", "optparser", "=", "handler", ".", "__func__", ".", "optparser", "=", "SubCmdOptionParser", "(", ")", "assert", "isinstance", "(", "optparser", ",", "SubCmdOptionParser", ")", "# apply subcommand options' defaults from config files, if any.", "subcmd", "=", "handler", ".", "__name__", ".", "split", "(", "'do_'", ",", "1", ")", "[", "1", "]", "optparser", ".", "set_defaults", "(", "*", "*", "self", ".", "get_option_defaults", "(", "subcmd", ")", ")", "optparser", ".", "set_cmdln_info", "(", "self", ",", "argv", "[", "0", "]", ")", "try", ":", "opts", ",", "args", "=", "optparser", ".", "parse_args", "(", "argv", "[", "1", ":", "]", ")", "except", "StopOptionProcessing", ":", "#TODO: this doesn't really fly for a replacement of", "# optparse.py behaviour, does it?", "return", "0", "# Normal command termination", "try", ":", "return", "handler", "(", "argv", "[", "0", "]", ",", "opts", ",", "*", "args", ")", "except", "TypeError", ":", "_", ",", "ex", ",", "_", "=", "sys", ".", "exc_info", "(", ")", "# Some TypeError's are user errors:", "# do_foo() takes at least 4 arguments (3 given)", "# do_foo() takes at most 5 arguments (6 given)", "# do_foo() takes exactly 5 arguments (6 given)", "# do_foo() takes exactly 5 positional arguments (6 given)", "# Raise CmdlnUserError for these with a suitably", "# massaged error message.", "tb", "=", "sys", ".", "exc_info", "(", ")", "[", "2", "]", "# the traceback object", "if", "tb", ".", "tb_next", "is", "not", "None", ":", "# If the traceback is more than one level deep, then the", "# TypeError do *not* happen on the \"handler(...)\" call", "# above. In that we don't want to handle it specially", "# here: it would falsely mask deeper code errors.", "raise", "msg", "=", "ex", ".", "args", "[", "0", "]", "match", "=", "_INCORRECT_NUM_ARGS_RE", ".", "search", "(", "msg", ")", "if", "match", ":", "msg", "=", "list", "(", "match", ".", "groups", "(", ")", ")", "msg", "[", "1", "]", "=", "int", "(", "msg", "[", "1", "]", ")", "-", "3", "if", "msg", "[", "1", "]", "==", "1", ":", "msg", "[", "2", "]", "=", "msg", "[", "2", "]", ".", "replace", "(", "\"arguments\"", ",", "\"argument\"", ")", "msg", "[", "3", "]", "=", "int", "(", "msg", "[", "3", "]", ")", "-", "3", "msg", "=", "''", ".", "join", "(", "map", "(", "str", ",", "msg", ")", ")", "raise", "CmdlnUserError", "(", "msg", ")", "else", ":", "raise", "else", ":", "raise", "CmdlnError", "(", "\"incorrect argcount for %s(): takes %d, must \"", "\"take 2 for 'argv' signature or 3+ for 'opts' \"", "\"signature\"", "%", "(", "handler", ".", "__name__", ",", "co_argcount", ")", ")" ]
45.290323
20.387097
def interpretBitEncoding(bitEncoding): """Returns a floattype string and a numpy array type. :param bitEncoding: Must be either '64' or '32' :returns: (floattype, numpyType) """ if bitEncoding == '64': floattype = 'd' # 64-bit numpyType = numpy.float64 elif bitEncoding == '32': floattype = 'f' # 32-bit numpyType = numpy.float32 else: errorText = ''.join(['bitEncoding \'', bitEncoding, '\' not defined. ', 'Must be \'64\' or \'32\'' ]) raise TypeError(errorText) return (floattype, numpyType)
[ "def", "interpretBitEncoding", "(", "bitEncoding", ")", ":", "if", "bitEncoding", "==", "'64'", ":", "floattype", "=", "'d'", "# 64-bit", "numpyType", "=", "numpy", ".", "float64", "elif", "bitEncoding", "==", "'32'", ":", "floattype", "=", "'f'", "# 32-bit", "numpyType", "=", "numpy", ".", "float32", "else", ":", "errorText", "=", "''", ".", "join", "(", "[", "'bitEncoding \\''", ",", "bitEncoding", ",", "'\\' not defined. '", ",", "'Must be \\'64\\' or \\'32\\''", "]", ")", "raise", "TypeError", "(", "errorText", ")", "return", "(", "floattype", ",", "numpyType", ")" ]
32.421053
13.578947
def _create_translation_file(feature_folder, dataset_name, translation, formula_id2index): """ Write a loop-up file that contains the direct (record-wise) lookup information. Parameters ---------- feature_folder : Path to the feature files. dataset_name : 'traindata', 'validdata' or 'testdata'. translation : list of triples (raw data id, formula in latex, formula id) """ translationfilename = "%s/translation-%s.csv" % (feature_folder, dataset_name) with open(translationfilename, "w") as f: f.write("index,raw_data_id,latex,formula_id\n") for el in translation: f.write("%i,%i,%s,%i\n" % (formula_id2index[el[2]], el[0], el[1], el[2]))
[ "def", "_create_translation_file", "(", "feature_folder", ",", "dataset_name", ",", "translation", ",", "formula_id2index", ")", ":", "translationfilename", "=", "\"%s/translation-%s.csv\"", "%", "(", "feature_folder", ",", "dataset_name", ")", "with", "open", "(", "translationfilename", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"index,raw_data_id,latex,formula_id\\n\"", ")", "for", "el", "in", "translation", ":", "f", ".", "write", "(", "\"%i,%i,%s,%i\\n\"", "%", "(", "formula_id2index", "[", "el", "[", "2", "]", "]", ",", "el", "[", "0", "]", ",", "el", "[", "1", "]", ",", "el", "[", "2", "]", ")", ")" ]
37.166667
15
def _add_debugging_fields(gelf_dict, record): """Add debugging fields to the given ``gelf_dict`` :param gelf_dict: dictionary representation of a GELF log. :type gelf_dict: dict :param record: :class:`logging.LogRecord` to extract debugging fields from to insert into the given ``gelf_dict``. :type record: logging.LogRecord """ gelf_dict.update({ 'file': record.pathname, 'line': record.lineno, '_function': record.funcName, '_pid': record.process, '_thread_name': record.threadName, }) # record.processName was added in Python 2.6.2 pn = getattr(record, 'processName', None) if pn is not None: gelf_dict['_process_name'] = pn
[ "def", "_add_debugging_fields", "(", "gelf_dict", ",", "record", ")", ":", "gelf_dict", ".", "update", "(", "{", "'file'", ":", "record", ".", "pathname", ",", "'line'", ":", "record", ".", "lineno", ",", "'_function'", ":", "record", ".", "funcName", ",", "'_pid'", ":", "record", ".", "process", ",", "'_thread_name'", ":", "record", ".", "threadName", ",", "}", ")", "# record.processName was added in Python 2.6.2", "pn", "=", "getattr", "(", "record", ",", "'processName'", ",", "None", ")", "if", "pn", "is", "not", "None", ":", "gelf_dict", "[", "'_process_name'", "]", "=", "pn" ]
37.190476
13.428571
def filled_contour(self, min=None, max=None): """Get contour polygons between the given levels. Parameters ---------- min : numbers.Number or None The minimum data level of the contour polygon. If :obj:`None`, ``numpy.finfo(numpy.float64).min`` will be used. max : numbers.Number or None The maximum data level of the contour polygon. If :obj:`None`, ``numpy.finfo(numpy.float64).max`` will be used. Returns ------- : The result of the :attr:`formatter` called on the filled contour between `min` and `max`. """ # pylint: disable=redefined-builtin,redefined-outer-name # Get the contour vertices. if min is None: min = np.finfo(np.float64).min if max is None: max = np.finfo(np.float64).max vertices, codes = ( self._contour_generator.create_filled_contour(min, max)) return self.formatter((min, max), vertices, codes)
[ "def", "filled_contour", "(", "self", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "# pylint: disable=redefined-builtin,redefined-outer-name", "# Get the contour vertices.", "if", "min", "is", "None", ":", "min", "=", "np", ".", "finfo", "(", "np", ".", "float64", ")", ".", "min", "if", "max", "is", "None", ":", "max", "=", "np", ".", "finfo", "(", "np", ".", "float64", ")", ".", "max", "vertices", ",", "codes", "=", "(", "self", ".", "_contour_generator", ".", "create_filled_contour", "(", "min", ",", "max", ")", ")", "return", "self", ".", "formatter", "(", "(", "min", ",", "max", ")", ",", "vertices", ",", "codes", ")" ]
36.571429
19.071429
def unlink_from(self, provider): ''' 解绑特定第三方平台 ''' if type(provider) != str: raise TypeError('input should be a string') self.link_with(provider, None) # self._sync_auth_data(provider) return self
[ "def", "unlink_from", "(", "self", ",", "provider", ")", ":", "if", "type", "(", "provider", ")", "!=", "str", ":", "raise", "TypeError", "(", "'input should be a string'", ")", "self", ".", "link_with", "(", "provider", ",", "None", ")", "# self._sync_auth_data(provider)", "return", "self" ]
28.444444
14.888889
def _get_job_collection_name(self, process_name): """jobs are stored in 4 collections: hourly, daily, monthly and yearly; method looks for the proper job_collection base on process TIME_QUALIFIER""" qualifier = context.process_context[process_name].time_qualifier if qualifier == QUALIFIER_HOURLY: collection_name = COLLECTION_JOB_HOURLY elif qualifier == QUALIFIER_DAILY: collection_name = COLLECTION_JOB_DAILY elif qualifier == QUALIFIER_MONTHLY: collection_name = COLLECTION_JOB_MONTHLY elif qualifier == QUALIFIER_YEARLY: collection_name = COLLECTION_JOB_YEARLY else: raise ValueError('Unknown time qualifier: {0} for {1}'.format(qualifier, process_name)) return collection_name
[ "def", "_get_job_collection_name", "(", "self", ",", "process_name", ")", ":", "qualifier", "=", "context", ".", "process_context", "[", "process_name", "]", ".", "time_qualifier", "if", "qualifier", "==", "QUALIFIER_HOURLY", ":", "collection_name", "=", "COLLECTION_JOB_HOURLY", "elif", "qualifier", "==", "QUALIFIER_DAILY", ":", "collection_name", "=", "COLLECTION_JOB_DAILY", "elif", "qualifier", "==", "QUALIFIER_MONTHLY", ":", "collection_name", "=", "COLLECTION_JOB_MONTHLY", "elif", "qualifier", "==", "QUALIFIER_YEARLY", ":", "collection_name", "=", "COLLECTION_JOB_YEARLY", "else", ":", "raise", "ValueError", "(", "'Unknown time qualifier: {0} for {1}'", ".", "format", "(", "qualifier", ",", "process_name", ")", ")", "return", "collection_name" ]
50
14.4375
def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert): """Add metadata attributes to DataArray""" if dtype_out_vert == 'vert_int': if units != '': units = '(vertical integral of {0}): {0} kg m^-2)'.format(units) else: units = '(vertical integral of quantity with unspecified units)' data.attrs['units'] = units data.attrs['description'] = description return data
[ "def", "_add_metadata_as_attrs_da", "(", "data", ",", "units", ",", "description", ",", "dtype_out_vert", ")", ":", "if", "dtype_out_vert", "==", "'vert_int'", ":", "if", "units", "!=", "''", ":", "units", "=", "'(vertical integral of {0}): {0} kg m^-2)'", ".", "format", "(", "units", ")", "else", ":", "units", "=", "'(vertical integral of quantity with unspecified units)'", "data", ".", "attrs", "[", "'units'", "]", "=", "units", "data", ".", "attrs", "[", "'description'", "]", "=", "description", "return", "data" ]
43.1
18.9
def delete_orderrun(backend, orderrun_id): """ Delete the orderrun specified by the argument. """ click.secho('%s - Deleting orderrun %s' % (get_datetime(), orderrun_id), fg='green') check_and_print(DKCloudCommandRunner.delete_orderrun(backend.dki, orderrun_id.strip()))
[ "def", "delete_orderrun", "(", "backend", ",", "orderrun_id", ")", ":", "click", ".", "secho", "(", "'%s - Deleting orderrun %s'", "%", "(", "get_datetime", "(", ")", ",", "orderrun_id", ")", ",", "fg", "=", "'green'", ")", "check_and_print", "(", "DKCloudCommandRunner", ".", "delete_orderrun", "(", "backend", ".", "dki", ",", "orderrun_id", ".", "strip", "(", ")", ")", ")" ]
47.5
18.5
def token_from_fragment(self, authorization_response): """Parse token from the URI fragment, used by MobileApplicationClients. :param authorization_response: The full URL of the redirect back to you :return: A token dict """ self._client.parse_request_uri_response( authorization_response, state=self._state ) self.token = self._client.token return self.token
[ "def", "token_from_fragment", "(", "self", ",", "authorization_response", ")", ":", "self", ".", "_client", ".", "parse_request_uri_response", "(", "authorization_response", ",", "state", "=", "self", ".", "_state", ")", "self", ".", "token", "=", "self", ".", "_client", ".", "token", "return", "self", ".", "token" ]
38.727273
15.636364
def p_decl(self, p): 'decl : sigtypes declnamelist SEMICOLON' decllist = [] for rname, rlength in p[2]: decllist.extend(self.create_decl(p[1], rname, length=rlength, lineno=p.lineno(2))) p[0] = Decl(tuple(decllist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_decl", "(", "self", ",", "p", ")", ":", "decllist", "=", "[", "]", "for", "rname", ",", "rlength", "in", "p", "[", "2", "]", ":", "decllist", ".", "extend", "(", "self", ".", "create_decl", "(", "p", "[", "1", "]", ",", "rname", ",", "length", "=", "rlength", ",", "lineno", "=", "p", ".", "lineno", "(", "2", ")", ")", ")", "p", "[", "0", "]", "=", "Decl", "(", "tuple", "(", "decllist", ")", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
44.25
16.25
def _set_splits(self, split_dict): """Split setter (private method).""" # Update the dictionary representation. # Use from/to proto for a clean copy self._splits = split_dict.copy() # Update the proto del self.as_proto.splits[:] # Clear previous for split_info in split_dict.to_proto(): self.as_proto.splits.add().CopyFrom(split_info)
[ "def", "_set_splits", "(", "self", ",", "split_dict", ")", ":", "# Update the dictionary representation.", "# Use from/to proto for a clean copy", "self", ".", "_splits", "=", "split_dict", ".", "copy", "(", ")", "# Update the proto", "del", "self", ".", "as_proto", ".", "splits", "[", ":", "]", "# Clear previous", "for", "split_info", "in", "split_dict", ".", "to_proto", "(", ")", ":", "self", ".", "as_proto", ".", "splits", ".", "add", "(", ")", ".", "CopyFrom", "(", "split_info", ")" ]
36.1
9.7
def load_ssh_key(key_id, skip_priv_key=False): """ Load a local ssh private key (in PEM format). PEM format is the OpenSSH default format for private keys. See similar code in imgapi.js#loadSSHKey. @param key_id {str} An ssh public key fingerprint or ssh private key path. @param skip_priv_key {boolean} Optional. Default false. If true, then this will skip loading the private key file and `priv_key` will be `None` in the retval. @returns {dict} with these keys: - pub_key_path - fingerprint - priv_key_path - priv_key - algorithm """ priv_key = None # If `key_id` is already a private key path, then easy. if not FINGERPRINT_RE.match(key_id): if not skip_priv_key: f = io.open(key_id, 'rb') try: priv_key = f.read() finally: f.close() pub_key_path = key_id + '.pub' f = io.open(pub_key_path, 'r') try: pub_key = f.read() finally: f.close() fingerprint = fingerprint_from_ssh_pub_key(pub_key) # XXX: pubkey should NOT be in PEM format. try: algo = ALGO_FROM_SSH_KEY_TYPE[pub_key.split()[0]] except KeyError: raise MantaError("Unsupported key type for: {}".format(key_id)) return dict( pub_key_path=pub_key_path, fingerprint=fingerprint, priv_key_path=key_id, priv_key=priv_key, algorithm=algo) # Else, look at all pub/priv keys in "~/.ssh" for a matching fingerprint. fingerprint = key_id pub_key_glob = expanduser('~/.ssh/*.pub') pub_key = None for pub_key_path in glob(pub_key_glob): try: f = io.open(pub_key_path, 'r') except IOError: # This can happen if the .pub file is a broken symlink. log.debug("could not open '%s', skip it", pub_key_path) continue try: pub_key = f.read() finally: f.close() # The MD5 fingerprint functions return the hexdigest without the hash # algorithm prefix ("MD5:"), and the SHA256 functions return the # fingerprint with the prefix ("SHA256:"). Ideally we'd want to # normalize these, but more importantly we don't want to break backwards # compatibility for either the SHA or MD5 users. md5_fp = fingerprint_from_ssh_pub_key(pub_key) sha256_fp = sha256_fingerprint_from_ssh_pub_key(pub_key) if (sha256_fp == fingerprint or md5_fp == fingerprint or "MD5:" + md5_fp == fingerprint): # if the user has given us sha256 fingerprint, canonicalize # it to the md5 fingerprint fingerprint = md5_fp break else: raise MantaError( "no '~/.ssh/*.pub' key found with fingerprint '%s'" % fingerprint) # XXX: pubkey should NOT be in PEM format. try: algo = ALGO_FROM_SSH_KEY_TYPE[pub_key.split()[0]] except KeyError: raise MantaError("Unsupported key type for: {}".format(key_id)) priv_key_path = os.path.splitext(pub_key_path)[0] if not skip_priv_key: f = io.open(priv_key_path, 'rb') try: priv_key = f.read() finally: f.close() return dict( pub_key_path=pub_key_path, fingerprint=fingerprint, priv_key_path=priv_key_path, priv_key=priv_key, algorithm=algo)
[ "def", "load_ssh_key", "(", "key_id", ",", "skip_priv_key", "=", "False", ")", ":", "priv_key", "=", "None", "# If `key_id` is already a private key path, then easy.", "if", "not", "FINGERPRINT_RE", ".", "match", "(", "key_id", ")", ":", "if", "not", "skip_priv_key", ":", "f", "=", "io", ".", "open", "(", "key_id", ",", "'rb'", ")", "try", ":", "priv_key", "=", "f", ".", "read", "(", ")", "finally", ":", "f", ".", "close", "(", ")", "pub_key_path", "=", "key_id", "+", "'.pub'", "f", "=", "io", ".", "open", "(", "pub_key_path", ",", "'r'", ")", "try", ":", "pub_key", "=", "f", ".", "read", "(", ")", "finally", ":", "f", ".", "close", "(", ")", "fingerprint", "=", "fingerprint_from_ssh_pub_key", "(", "pub_key", ")", "# XXX: pubkey should NOT be in PEM format.", "try", ":", "algo", "=", "ALGO_FROM_SSH_KEY_TYPE", "[", "pub_key", ".", "split", "(", ")", "[", "0", "]", "]", "except", "KeyError", ":", "raise", "MantaError", "(", "\"Unsupported key type for: {}\"", ".", "format", "(", "key_id", ")", ")", "return", "dict", "(", "pub_key_path", "=", "pub_key_path", ",", "fingerprint", "=", "fingerprint", ",", "priv_key_path", "=", "key_id", ",", "priv_key", "=", "priv_key", ",", "algorithm", "=", "algo", ")", "# Else, look at all pub/priv keys in \"~/.ssh\" for a matching fingerprint.", "fingerprint", "=", "key_id", "pub_key_glob", "=", "expanduser", "(", "'~/.ssh/*.pub'", ")", "pub_key", "=", "None", "for", "pub_key_path", "in", "glob", "(", "pub_key_glob", ")", ":", "try", ":", "f", "=", "io", ".", "open", "(", "pub_key_path", ",", "'r'", ")", "except", "IOError", ":", "# This can happen if the .pub file is a broken symlink.", "log", ".", "debug", "(", "\"could not open '%s', skip it\"", ",", "pub_key_path", ")", "continue", "try", ":", "pub_key", "=", "f", ".", "read", "(", ")", "finally", ":", "f", ".", "close", "(", ")", "# The MD5 fingerprint functions return the hexdigest without the hash", "# algorithm prefix (\"MD5:\"), and the SHA256 functions return the", "# fingerprint with the prefix (\"SHA256:\"). Ideally we'd want to", "# normalize these, but more importantly we don't want to break backwards", "# compatibility for either the SHA or MD5 users.", "md5_fp", "=", "fingerprint_from_ssh_pub_key", "(", "pub_key", ")", "sha256_fp", "=", "sha256_fingerprint_from_ssh_pub_key", "(", "pub_key", ")", "if", "(", "sha256_fp", "==", "fingerprint", "or", "md5_fp", "==", "fingerprint", "or", "\"MD5:\"", "+", "md5_fp", "==", "fingerprint", ")", ":", "# if the user has given us sha256 fingerprint, canonicalize", "# it to the md5 fingerprint", "fingerprint", "=", "md5_fp", "break", "else", ":", "raise", "MantaError", "(", "\"no '~/.ssh/*.pub' key found with fingerprint '%s'\"", "%", "fingerprint", ")", "# XXX: pubkey should NOT be in PEM format.", "try", ":", "algo", "=", "ALGO_FROM_SSH_KEY_TYPE", "[", "pub_key", ".", "split", "(", ")", "[", "0", "]", "]", "except", "KeyError", ":", "raise", "MantaError", "(", "\"Unsupported key type for: {}\"", ".", "format", "(", "key_id", ")", ")", "priv_key_path", "=", "os", ".", "path", ".", "splitext", "(", "pub_key_path", ")", "[", "0", "]", "if", "not", "skip_priv_key", ":", "f", "=", "io", ".", "open", "(", "priv_key_path", ",", "'rb'", ")", "try", ":", "priv_key", "=", "f", ".", "read", "(", ")", "finally", ":", "f", ".", "close", "(", ")", "return", "dict", "(", "pub_key_path", "=", "pub_key_path", ",", "fingerprint", "=", "fingerprint", ",", "priv_key_path", "=", "priv_key_path", ",", "priv_key", "=", "priv_key", ",", "algorithm", "=", "algo", ")" ]
33.038095
19.342857
def _execute(self, request, **kwargs): """The top-level execute function for the endpoint. This method is intended to remain as-is, and not be overridden. It gets called by your HTTP framework's route handler, and performs the following actions to process the request: ``authenticate_request`` Validate the Bearer token, populate the ``current_user``, and make sure that the token covers the scope needed to call the requested method. * * ``parse arguments`` The argument parser is responsible for: - First, coercing and patching any parameters that might require it due to versioning (i.e. the caller is using an old API version that supports `index` as a parameter for pagination, but the current version uses the name `offset`) - Second, iterating through the endpoint's supported arguments and validating that the params passed in comply with the endpoint's requirements - Third, populating the `context.args` array with the validated arguments If any of the arguments are invalid, then the Argument parser will raise an ArgumentError that bubbles up to the `try/catch` block of the execute method. * * ``before handler`` The before_handlers are specified by the Endpoint definition, and are intended to supporty DRY-ing up your codebase. Have a set of Endpoints that all need to grab an object from the ORM based on the same parameter? Make them inherit from an Endpoint subclass that performs that task in a before_handler! * * ``handle`` The core logic of your API endpoint, as implemented by you in your Endpoint subclass. The API Framework expects ``handle`` to return a dictionary specifying the response object and the JSON key that it should hang off of, or a tuple of a dictionary and an HTTP status code. * * ``after_handler`` Like the before_handlers, the ``after_handlers`` happen after the handle method, and allow the endpoint developer to re-use code for post-processing data from an endpoint. * * ``render response`` Like the argument parser, the response renderer is responsible for a few things: - First, it converts the ORM objects into JSON-serializable Python dictionaries using the Resource objects defined by the API implementation, - Second, it does any version parameter coersion, renaming and reformatting the edge version of the response to match the version requested by the API caller, - and Third, it serializes the Python dictionary into the response format requested by the API caller (right now, we only support JSON responses, but it'd be reasonble to support something like HTML or XML or whatever in the future). The rendered JSON text is then returned as the response that should be sent by your HTTP framework's routing handler. * * ``_after_response_handler`` The `_after_response_handlers` are specified by the Endpoint definition, and enable manipulation of the response object before it is returned to the client, but after the response is rendered. Because these are instancemethods, they may share instance data from `self` specified in the endpoint's `_handle` method. ``_finalize_content`` The `_finalize_content` method is overridden by the Endpoint and is called after the response is rendered into a serializable result. This method is called with two arguments, the context and the rendered content, and expected to return updated rendered content. For in-place modification of dicts, this method will still be expected to return the given argument. ``_allow_cors`` This value is set to enable CORs for a given endpoint. When set to a string it supplies an explicit value to 'Access-Control-Allow-Origin'. Set to True, this will allow access from *all* domains; Access-Control-Allow-Origin = "*" """ try: self._create_context(request) self._authenticate() context = get_current_context() self._parse_args() if hasattr(self, '_before_handlers') and \ isinstance(self._before_handlers, (list, tuple)): for handler in self._before_handlers: handler(context) context.handler_result = self._handle(context) if hasattr(self, '_after_handlers') and \ isinstance(self._after_handlers, (list, tuple)): for handler in self._after_handlers: handler(context) self._render() response = context.response # After calling ._render(), the response is ready to go, so we # shouldn't need to handle any other exceptions beyond this point. except AuthenticationError as e: if hasattr(e, 'message') and e.message is not None: message = e.message else: message = "You don't have permission to do that." err = APIError.Forbidden(message) response = self._response_class(*err.response) response.headers["Content-Type"] = 'application/json' except ArgumentError as e: err = APIError.UnprocessableEntity(e.message) response = self._response_class(*err.response) response.headers["Content-Type"] = 'application/json' except APIError as e: response = self._response_class(*e.response) response.headers["Content-Type"] = 'application/json' except PaleRaisedResponse as r: response = self._response_class(*r.response) response.headers["Content-Type"] = 'application/json' except Exception as e: logging.exception("Failed to handle Pale Endpoint %s: %r", self.__class__.__name__, e) err = APIError.Exception(repr(e)) response = self._response_class(*err.response) response.headers["Content-Type"] = 'application/json' allow_cors = getattr(self, "_allow_cors", None) if allow_cors is True: response.headers['Access-Control-Allow-Origin'] = '*' elif isinstance(allow_cors, basestring): response.headers['Access-Control-Allow-Origin'] = allow_cors context.response = response try: if hasattr(self, '_after_response_handlers') and \ isinstance(self._after_response_handlers, (list, tuple)): for handler in self._after_response_handlers: handler(context, response) except Exception as e: logging.exception( "Failed to process _after_response_handlers for Endpoint %s", self.__class__.__name__) raise return response
[ "def", "_execute", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "_create_context", "(", "request", ")", "self", ".", "_authenticate", "(", ")", "context", "=", "get_current_context", "(", ")", "self", ".", "_parse_args", "(", ")", "if", "hasattr", "(", "self", ",", "'_before_handlers'", ")", "and", "isinstance", "(", "self", ".", "_before_handlers", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "handler", "in", "self", ".", "_before_handlers", ":", "handler", "(", "context", ")", "context", ".", "handler_result", "=", "self", ".", "_handle", "(", "context", ")", "if", "hasattr", "(", "self", ",", "'_after_handlers'", ")", "and", "isinstance", "(", "self", ".", "_after_handlers", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "handler", "in", "self", ".", "_after_handlers", ":", "handler", "(", "context", ")", "self", ".", "_render", "(", ")", "response", "=", "context", ".", "response", "# After calling ._render(), the response is ready to go, so we", "# shouldn't need to handle any other exceptions beyond this point.", "except", "AuthenticationError", "as", "e", ":", "if", "hasattr", "(", "e", ",", "'message'", ")", "and", "e", ".", "message", "is", "not", "None", ":", "message", "=", "e", ".", "message", "else", ":", "message", "=", "\"You don't have permission to do that.\"", "err", "=", "APIError", ".", "Forbidden", "(", "message", ")", "response", "=", "self", ".", "_response_class", "(", "*", "err", ".", "response", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "'application/json'", "except", "ArgumentError", "as", "e", ":", "err", "=", "APIError", ".", "UnprocessableEntity", "(", "e", ".", "message", ")", "response", "=", "self", ".", "_response_class", "(", "*", "err", ".", "response", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "'application/json'", "except", "APIError", "as", "e", ":", "response", "=", "self", ".", "_response_class", "(", "*", "e", ".", "response", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "'application/json'", "except", "PaleRaisedResponse", "as", "r", ":", "response", "=", "self", ".", "_response_class", "(", "*", "r", ".", "response", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "'application/json'", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "\"Failed to handle Pale Endpoint %s: %r\"", ",", "self", ".", "__class__", ".", "__name__", ",", "e", ")", "err", "=", "APIError", ".", "Exception", "(", "repr", "(", "e", ")", ")", "response", "=", "self", ".", "_response_class", "(", "*", "err", ".", "response", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "'application/json'", "allow_cors", "=", "getattr", "(", "self", ",", "\"_allow_cors\"", ",", "None", ")", "if", "allow_cors", "is", "True", ":", "response", ".", "headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "'*'", "elif", "isinstance", "(", "allow_cors", ",", "basestring", ")", ":", "response", ".", "headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "allow_cors", "context", ".", "response", "=", "response", "try", ":", "if", "hasattr", "(", "self", ",", "'_after_response_handlers'", ")", "and", "isinstance", "(", "self", ".", "_after_response_handlers", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "handler", "in", "self", ".", "_after_response_handlers", ":", "handler", "(", "context", ",", "response", ")", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "\"Failed to process _after_response_handlers for Endpoint %s\"", ",", "self", ".", "__class__", ".", "__name__", ")", "raise", "return", "response" ]
46.157576
23.515152
def rel_href(src, dst): """For src='foo/bar.html', dst='garply.html#frotz' return relative link '../garply.html#frotz'. """ src_dir = os.path.dirname(src) return os.path.relpath(dst, src_dir)
[ "def", "rel_href", "(", "src", ",", "dst", ")", ":", "src_dir", "=", "os", ".", "path", ".", "dirname", "(", "src", ")", "return", "os", ".", "path", ".", "relpath", "(", "dst", ",", "src_dir", ")" ]
39
5.4
async def reply_voice(self, voice: typing.Union[base.InputFile, base.String], caption: typing.Union[base.String, None] = None, duration: typing.Union[base.Integer, None] = None, disable_notification: typing.Union[base.Boolean, None] = None, reply_markup=None, reply=True) -> Message: """ Use this method to send audio files, if you want Telegram clients to display the file as a playable voice message. For this to work, your audio must be in an .ogg file encoded with OPUS (other formats may be sent as Audio or Document). Source: https://core.telegram.org/bots/api#sendvoice :param voice: Audio file to send. :type voice: :obj:`typing.Union[base.InputFile, base.String]` :param caption: Voice message caption, 0-200 characters :type caption: :obj:`typing.Union[base.String, None]` :param duration: Duration of the voice message in seconds :type duration: :obj:`typing.Union[base.Integer, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned. :rtype: :obj:`types.Message` """ return await self.bot.send_voice(chat_id=self.chat.id, voice=voice, caption=caption, duration=duration, disable_notification=disable_notification, reply_to_message_id=self.message_id if reply else None, reply_markup=reply_markup)
[ "async", "def", "reply_voice", "(", "self", ",", "voice", ":", "typing", ".", "Union", "[", "base", ".", "InputFile", ",", "base", ".", "String", "]", ",", "caption", ":", "typing", ".", "Union", "[", "base", ".", "String", ",", "None", "]", "=", "None", ",", "duration", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", "=", "None", ",", "disable_notification", ":", "typing", ".", "Union", "[", "base", ".", "Boolean", ",", "None", "]", "=", "None", ",", "reply_markup", "=", "None", ",", "reply", "=", "True", ")", "->", "Message", ":", "return", "await", "self", ".", "bot", ".", "send_voice", "(", "chat_id", "=", "self", ".", "chat", ".", "id", ",", "voice", "=", "voice", ",", "caption", "=", "caption", ",", "duration", "=", "duration", ",", "disable_notification", "=", "disable_notification", ",", "reply_to_message_id", "=", "self", ".", "message_id", "if", "reply", "else", "None", ",", "reply_markup", "=", "reply_markup", ")" ]
57.972973
26.459459
def device_sdr_entries(self): """A generator that returns the SDR list. Starting with ID=0x0000 and end when ID=0xffff is returned. """ reservation_id = self.reserve_device_sdr_repository() record_id = 0 while True: record = self.get_device_sdr(record_id, reservation_id) yield record if record.next_id == 0xffff: break record_id = record.next_id
[ "def", "device_sdr_entries", "(", "self", ")", ":", "reservation_id", "=", "self", ".", "reserve_device_sdr_repository", "(", ")", "record_id", "=", "0", "while", "True", ":", "record", "=", "self", ".", "get_device_sdr", "(", "record_id", ",", "reservation_id", ")", "yield", "record", "if", "record", ".", "next_id", "==", "0xffff", ":", "break", "record_id", "=", "record", ".", "next_id" ]
34.384615
13.615385
def _nested_op(inputs, op): # pylint: disable=invalid-name """Helper: sum a list of arrays or nested arrays.""" # First the simple non-nested case. if not isinstance(inputs[0], (list, tuple)): return op(inputs) # In the nested case, sum on each axis separately. result_list = [] for i in range(len(inputs[0])): result_list.append(_nested_op([x[i] for x in inputs], op=op)) if isinstance(inputs[0], list): return result_list return tuple(result_list)
[ "def", "_nested_op", "(", "inputs", ",", "op", ")", ":", "# pylint: disable=invalid-name", "# First the simple non-nested case.", "if", "not", "isinstance", "(", "inputs", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "op", "(", "inputs", ")", "# In the nested case, sum on each axis separately.", "result_list", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "inputs", "[", "0", "]", ")", ")", ":", "result_list", ".", "append", "(", "_nested_op", "(", "[", "x", "[", "i", "]", "for", "x", "in", "inputs", "]", ",", "op", "=", "op", ")", ")", "if", "isinstance", "(", "inputs", "[", "0", "]", ",", "list", ")", ":", "return", "result_list", "return", "tuple", "(", "result_list", ")" ]
38.916667
12.583333
def check_metadata(self): '''Ensure that the metadata in our file is self-consistent.''' assert self.header.point_count == self.point_used, ( 'inconsistent point count! {} header != {} POINT:USED'.format( self.header.point_count, self.point_used, )) assert self.header.scale_factor == self.point_scale, ( 'inconsistent scale factor! {} header != {} POINT:SCALE'.format( self.header.scale_factor, self.point_scale, )) assert self.header.frame_rate == self.point_rate, ( 'inconsistent frame rate! {} header != {} POINT:RATE'.format( self.header.frame_rate, self.point_rate, )) ratio = self.analog_rate / self.point_rate assert True or self.header.analog_per_frame == ratio, ( 'inconsistent analog rate! {} header != {} analog-fps / {} point-fps'.format( self.header.analog_per_frame, self.analog_rate, self.point_rate, )) count = self.analog_used * self.header.analog_per_frame assert True or self.header.analog_count == count, ( 'inconsistent analog count! {} header != {} analog used * {} per-frame'.format( self.header.analog_count, self.analog_used, self.header.analog_per_frame, )) start = self.get_uint16('POINT:DATA_START') assert self.header.data_block == start, ( 'inconsistent data block! {} header != {} POINT:DATA_START'.format( self.header.data_block, start)) for name in ('POINT:LABELS', 'POINT:DESCRIPTIONS', 'ANALOG:LABELS', 'ANALOG:DESCRIPTIONS'): if self.get(name) is None: warnings.warn('missing parameter {}'.format(name))
[ "def", "check_metadata", "(", "self", ")", ":", "assert", "self", ".", "header", ".", "point_count", "==", "self", ".", "point_used", ",", "(", "'inconsistent point count! {} header != {} POINT:USED'", ".", "format", "(", "self", ".", "header", ".", "point_count", ",", "self", ".", "point_used", ",", ")", ")", "assert", "self", ".", "header", ".", "scale_factor", "==", "self", ".", "point_scale", ",", "(", "'inconsistent scale factor! {} header != {} POINT:SCALE'", ".", "format", "(", "self", ".", "header", ".", "scale_factor", ",", "self", ".", "point_scale", ",", ")", ")", "assert", "self", ".", "header", ".", "frame_rate", "==", "self", ".", "point_rate", ",", "(", "'inconsistent frame rate! {} header != {} POINT:RATE'", ".", "format", "(", "self", ".", "header", ".", "frame_rate", ",", "self", ".", "point_rate", ",", ")", ")", "ratio", "=", "self", ".", "analog_rate", "/", "self", ".", "point_rate", "assert", "True", "or", "self", ".", "header", ".", "analog_per_frame", "==", "ratio", ",", "(", "'inconsistent analog rate! {} header != {} analog-fps / {} point-fps'", ".", "format", "(", "self", ".", "header", ".", "analog_per_frame", ",", "self", ".", "analog_rate", ",", "self", ".", "point_rate", ",", ")", ")", "count", "=", "self", ".", "analog_used", "*", "self", ".", "header", ".", "analog_per_frame", "assert", "True", "or", "self", ".", "header", ".", "analog_count", "==", "count", ",", "(", "'inconsistent analog count! {} header != {} analog used * {} per-frame'", ".", "format", "(", "self", ".", "header", ".", "analog_count", ",", "self", ".", "analog_used", ",", "self", ".", "header", ".", "analog_per_frame", ",", ")", ")", "start", "=", "self", ".", "get_uint16", "(", "'POINT:DATA_START'", ")", "assert", "self", ".", "header", ".", "data_block", "==", "start", ",", "(", "'inconsistent data block! {} header != {} POINT:DATA_START'", ".", "format", "(", "self", ".", "header", ".", "data_block", ",", "start", ")", ")", "for", "name", "in", "(", "'POINT:LABELS'", ",", "'POINT:DESCRIPTIONS'", ",", "'ANALOG:LABELS'", ",", "'ANALOG:DESCRIPTIONS'", ")", ":", "if", "self", ".", "get", "(", "name", ")", "is", "None", ":", "warnings", ".", "warn", "(", "'missing parameter {}'", ".", "format", "(", "name", ")", ")" ]
41.755556
21
def set_version(self, version='0.0.0', save=True): """Set configuration (not application!) version""" self.set(self.DEFAULT_SECTION_NAME, 'version', version, save=save)
[ "def", "set_version", "(", "self", ",", "version", "=", "'0.0.0'", ",", "save", "=", "True", ")", ":", "self", ".", "set", "(", "self", ".", "DEFAULT_SECTION_NAME", ",", "'version'", ",", "version", ",", "save", "=", "save", ")" ]
61.333333
15
def _log_debug(msg): ''' Log at debug level :param msg: message to log ''' if _log_level <= DEBUG: if _log_level == TRACE: traceback.print_stack() _log(msg)
[ "def", "_log_debug", "(", "msg", ")", ":", "if", "_log_level", "<=", "DEBUG", ":", "if", "_log_level", "==", "TRACE", ":", "traceback", ".", "print_stack", "(", ")", "_log", "(", "msg", ")" ]
21.777778
18.222222
def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if _isArrayLike(ids): return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]]
[ "def", "loadCats", "(", "self", ",", "ids", "=", "[", "]", ")", ":", "if", "_isArrayLike", "(", "ids", ")", ":", "return", "[", "self", ".", "cats", "[", "id", "]", "for", "id", "in", "ids", "]", "elif", "type", "(", "ids", ")", "==", "int", ":", "return", "[", "self", ".", "cats", "[", "ids", "]", "]" ]
35.5
9.1
def exclude_time(self, start, end, days): """Added an excluded time by start, end times and the days. ``start`` and ``end`` are in military integer times (e.g. - 1200 1430). ``days`` is a collection of integers or strings of fully-spelt, lowercased days of the week. """ self._excluded_times.append(TimeRange(start, end, days)) return self
[ "def", "exclude_time", "(", "self", ",", "start", ",", "end", ",", "days", ")", ":", "self", ".", "_excluded_times", ".", "append", "(", "TimeRange", "(", "start", ",", "end", ",", "days", ")", ")", "return", "self" ]
44.111111
20.333333
def getNodeByName(node, name): """ Get the first child node matching a given local name """ if node is None: raise Exception( "Cannot search for a child '%s' in a None object" % (name,) ) if not name: raise Exception("Unspecified name to find node for.") try: childNode = node.xpath("*[local-name() = '%s']" % name)[0] except: return None return childNode
[ "def", "getNodeByName", "(", "node", ",", "name", ")", ":", "if", "node", "is", "None", ":", "raise", "Exception", "(", "\"Cannot search for a child '%s' in a None object\"", "%", "(", "name", ",", ")", ")", "if", "not", "name", ":", "raise", "Exception", "(", "\"Unspecified name to find node for.\"", ")", "try", ":", "childNode", "=", "node", ".", "xpath", "(", "\"*[local-name() = '%s']\"", "%", "name", ")", "[", "0", "]", "except", ":", "return", "None", "return", "childNode" ]
26.5625
21.0625
def shell(self, gui=0, command='', dryrun=None, shell_interactive_cmd_str=None): """ Opens an SSH connection. """ from burlap.common import get_hosts_for_site if dryrun is not None: self.dryrun = dryrun r = self.local_renderer if r.genv.SITE != r.genv.default_site: shell_hosts = get_hosts_for_site() if shell_hosts: r.genv.host_string = shell_hosts[0] r.env.SITE = r.genv.SITE or r.genv.default_site if int(gui): r.env.shell_default_options.append('-X') if 'host_string' not in self.genv or not self.genv.host_string: if 'available_sites' in self.genv and r.env.SITE not in r.genv.available_sites: raise Exception('No host_string set. Unknown site %s.' % r.env.SITE) else: raise Exception('No host_string set.') if '@' in r.genv.host_string: r.env.shell_host_string = r.genv.host_string else: r.env.shell_host_string = '{user}@{host_string}' if command: r.env.shell_interactive_cmd_str = command else: r.env.shell_interactive_cmd_str = r.format(shell_interactive_cmd_str or r.env.shell_interactive_cmd) r.env.shell_default_options_str = ' '.join(r.env.shell_default_options) if self.is_local: self.vprint('Using direct local.') cmd = '{shell_interactive_cmd_str}' elif r.genv.key_filename: self.vprint('Using key filename.') # If host_string contains the port, then strip it off and pass separately. port = r.env.shell_host_string.split(':')[-1] if port.isdigit(): r.env.shell_host_string = r.env.shell_host_string.split(':')[0] + (' -p %s' % port) cmd = 'ssh -t {shell_default_options_str} -i {key_filename} {shell_host_string} "{shell_interactive_cmd_str}"' elif r.genv.password: self.vprint('Using password.') cmd = 'ssh -t {shell_default_options_str} {shell_host_string} "{shell_interactive_cmd_str}"' else: # No explicit password or key file needed? self.vprint('Using nothing.') cmd = 'ssh -t {shell_default_options_str} {shell_host_string} "{shell_interactive_cmd_str}"' r.local(cmd)
[ "def", "shell", "(", "self", ",", "gui", "=", "0", ",", "command", "=", "''", ",", "dryrun", "=", "None", ",", "shell_interactive_cmd_str", "=", "None", ")", ":", "from", "burlap", ".", "common", "import", "get_hosts_for_site", "if", "dryrun", "is", "not", "None", ":", "self", ".", "dryrun", "=", "dryrun", "r", "=", "self", ".", "local_renderer", "if", "r", ".", "genv", ".", "SITE", "!=", "r", ".", "genv", ".", "default_site", ":", "shell_hosts", "=", "get_hosts_for_site", "(", ")", "if", "shell_hosts", ":", "r", ".", "genv", ".", "host_string", "=", "shell_hosts", "[", "0", "]", "r", ".", "env", ".", "SITE", "=", "r", ".", "genv", ".", "SITE", "or", "r", ".", "genv", ".", "default_site", "if", "int", "(", "gui", ")", ":", "r", ".", "env", ".", "shell_default_options", ".", "append", "(", "'-X'", ")", "if", "'host_string'", "not", "in", "self", ".", "genv", "or", "not", "self", ".", "genv", ".", "host_string", ":", "if", "'available_sites'", "in", "self", ".", "genv", "and", "r", ".", "env", ".", "SITE", "not", "in", "r", ".", "genv", ".", "available_sites", ":", "raise", "Exception", "(", "'No host_string set. Unknown site %s.'", "%", "r", ".", "env", ".", "SITE", ")", "else", ":", "raise", "Exception", "(", "'No host_string set.'", ")", "if", "'@'", "in", "r", ".", "genv", ".", "host_string", ":", "r", ".", "env", ".", "shell_host_string", "=", "r", ".", "genv", ".", "host_string", "else", ":", "r", ".", "env", ".", "shell_host_string", "=", "'{user}@{host_string}'", "if", "command", ":", "r", ".", "env", ".", "shell_interactive_cmd_str", "=", "command", "else", ":", "r", ".", "env", ".", "shell_interactive_cmd_str", "=", "r", ".", "format", "(", "shell_interactive_cmd_str", "or", "r", ".", "env", ".", "shell_interactive_cmd", ")", "r", ".", "env", ".", "shell_default_options_str", "=", "' '", ".", "join", "(", "r", ".", "env", ".", "shell_default_options", ")", "if", "self", ".", "is_local", ":", "self", ".", "vprint", "(", "'Using direct local.'", ")", "cmd", "=", "'{shell_interactive_cmd_str}'", "elif", "r", ".", "genv", ".", "key_filename", ":", "self", ".", "vprint", "(", "'Using key filename.'", ")", "# If host_string contains the port, then strip it off and pass separately.", "port", "=", "r", ".", "env", ".", "shell_host_string", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "if", "port", ".", "isdigit", "(", ")", ":", "r", ".", "env", ".", "shell_host_string", "=", "r", ".", "env", ".", "shell_host_string", ".", "split", "(", "':'", ")", "[", "0", "]", "+", "(", "' -p %s'", "%", "port", ")", "cmd", "=", "'ssh -t {shell_default_options_str} -i {key_filename} {shell_host_string} \"{shell_interactive_cmd_str}\"'", "elif", "r", ".", "genv", ".", "password", ":", "self", ".", "vprint", "(", "'Using password.'", ")", "cmd", "=", "'ssh -t {shell_default_options_str} {shell_host_string} \"{shell_interactive_cmd_str}\"'", "else", ":", "# No explicit password or key file needed?", "self", ".", "vprint", "(", "'Using nothing.'", ")", "cmd", "=", "'ssh -t {shell_default_options_str} {shell_host_string} \"{shell_interactive_cmd_str}\"'", "r", ".", "local", "(", "cmd", ")" ]
41.660714
24.803571
def validate(cls, mapper_spec): """Validate mapper specification. Args: mapper_spec: an instance of model.MapperSpec. Raises: BadWriterParamsError: if the specification is invalid for any reason such as missing the bucket name or providing an invalid bucket name. """ writer_spec = cls.get_params(mapper_spec, allow_old=False) # Bucket Name is required if cls.BUCKET_NAME_PARAM not in writer_spec: raise errors.BadWriterParamsError( "%s is required for Google Cloud Storage" % cls.BUCKET_NAME_PARAM) try: cloudstorage.validate_bucket_name( writer_spec[cls.BUCKET_NAME_PARAM]) except ValueError, error: raise errors.BadWriterParamsError("Bad bucket name, %s" % (error)) # Validate the naming format does not throw any errors using dummy values cls._generate_filename(writer_spec, "name", "id", 0) cls._generate_filename(writer_spec, "name", "id", 0, 1, 0)
[ "def", "validate", "(", "cls", ",", "mapper_spec", ")", ":", "writer_spec", "=", "cls", ".", "get_params", "(", "mapper_spec", ",", "allow_old", "=", "False", ")", "# Bucket Name is required", "if", "cls", ".", "BUCKET_NAME_PARAM", "not", "in", "writer_spec", ":", "raise", "errors", ".", "BadWriterParamsError", "(", "\"%s is required for Google Cloud Storage\"", "%", "cls", ".", "BUCKET_NAME_PARAM", ")", "try", ":", "cloudstorage", ".", "validate_bucket_name", "(", "writer_spec", "[", "cls", ".", "BUCKET_NAME_PARAM", "]", ")", "except", "ValueError", ",", "error", ":", "raise", "errors", ".", "BadWriterParamsError", "(", "\"Bad bucket name, %s\"", "%", "(", "error", ")", ")", "# Validate the naming format does not throw any errors using dummy values", "cls", ".", "_generate_filename", "(", "writer_spec", ",", "\"name\"", ",", "\"id\"", ",", "0", ")", "cls", ".", "_generate_filename", "(", "writer_spec", ",", "\"name\"", ",", "\"id\"", ",", "0", ",", "1", ",", "0", ")" ]
36.5
20.269231
def get_parameter(self, name): """ Gets a single parameter by its name. :param str name: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :rtype: .Parameter """ name = adapt_name_for_rest(name) url = '/mdb/{}/parameters{}'.format(self._instance, name) response = self._client.get_proto(url) message = mdb_pb2.ParameterInfo() message.ParseFromString(response.content) return Parameter(message)
[ "def", "get_parameter", "(", "self", ",", "name", ")", ":", "name", "=", "adapt_name_for_rest", "(", "name", ")", "url", "=", "'/mdb/{}/parameters{}'", ".", "format", "(", "self", ".", "_instance", ",", "name", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "url", ")", "message", "=", "mdb_pb2", ".", "ParameterInfo", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "return", "Parameter", "(", "message", ")" ]
37.5
11.785714
def html_to_fc(html=None, clean_html=None, clean_visible=None, encoding=None, url=None, timestamp=None, other_features=None): '''`html` is expected to be a raw string received over the wire from a remote webserver, and `encoding`, if provided, is used to decode it. Typically, encoding comes from the Content-Type header field. The :func:`~streamcorpus_pipeline._clean_html.make_clean_html` function handles character encodings. ''' def add_feature(name, xs): if name not in fc: fc[name] = StringCounter() fc[name] += StringCounter(xs) timestamp = timestamp or int(time.time() * 1000) other_features = other_features or {} if clean_html is None: if html is not None: try: clean_html_utf8 = make_clean_html(html, encoding=encoding) except: logger.warn('dropping doc because:', exc_info=True) return clean_html = clean_html_utf8.decode('utf-8') else: clean_html_utf8 = u'' clean_html = u'' else: clean_html_utf8 = u'' if clean_visible is None or len(clean_visible) == 0: clean_visible = make_clean_visible(clean_html_utf8).decode('utf-8') elif isinstance(clean_visible, str): clean_visible = clean_visible.decode('utf-8') fc = FeatureCollection() fc[u'meta_raw'] = html and uni(html, encoding) or u'' fc[u'meta_clean_html'] = clean_html fc[u'meta_clean_visible'] = clean_visible fc[u'meta_timestamp'] = unicode(timestamp) url = url or u'' fc[u'meta_url'] = uni(url) add_feature(u'icq', features.ICQs(clean_visible)) add_feature(u'skype', features.skypes(clean_visible)) add_feature(u'phone', features.phones(clean_visible)) add_feature(u'email', features.emails(clean_visible)) bowNP, normalizations = features.noun_phrases( cleanse(clean_visible), included_unnormalized=True) add_feature(u'bowNP', bowNP) bowNP_unnorm = chain(*normalizations.values()) add_feature(u'bowNP_unnorm', bowNP_unnorm) add_feature(u'image_url', features.image_urls(clean_html)) add_feature(u'a_url', features.a_urls(clean_html)) ## get parsed versions, extract usernames fc[u'img_url_path_dirs'] = features.path_dirs(fc[u'image_url']) fc[u'img_url_hostnames'] = features.host_names(fc[u'image_url']) fc[u'usernames'] = features.usernames(fc[u'image_url']) fc[u'a_url_path_dirs'] = features.path_dirs(fc[u'a_url']) fc[u'a_url_hostnames'] = features.host_names(fc[u'a_url']) fc[u'usernames'] += features.usernames(fc[u'a_url']) #fc[u'usernames'] += features.usernames2( # fc[u'meta_clean_visible']) # beginning of treating this as a pipeline... xform = features.entity_names() fc = xform.process(fc) for feat_name, feat_val in other_features.iteritems(): fc[feat_name] += StringCounter(feat_val) return fc
[ "def", "html_to_fc", "(", "html", "=", "None", ",", "clean_html", "=", "None", ",", "clean_visible", "=", "None", ",", "encoding", "=", "None", ",", "url", "=", "None", ",", "timestamp", "=", "None", ",", "other_features", "=", "None", ")", ":", "def", "add_feature", "(", "name", ",", "xs", ")", ":", "if", "name", "not", "in", "fc", ":", "fc", "[", "name", "]", "=", "StringCounter", "(", ")", "fc", "[", "name", "]", "+=", "StringCounter", "(", "xs", ")", "timestamp", "=", "timestamp", "or", "int", "(", "time", ".", "time", "(", ")", "*", "1000", ")", "other_features", "=", "other_features", "or", "{", "}", "if", "clean_html", "is", "None", ":", "if", "html", "is", "not", "None", ":", "try", ":", "clean_html_utf8", "=", "make_clean_html", "(", "html", ",", "encoding", "=", "encoding", ")", "except", ":", "logger", ".", "warn", "(", "'dropping doc because:'", ",", "exc_info", "=", "True", ")", "return", "clean_html", "=", "clean_html_utf8", ".", "decode", "(", "'utf-8'", ")", "else", ":", "clean_html_utf8", "=", "u''", "clean_html", "=", "u''", "else", ":", "clean_html_utf8", "=", "u''", "if", "clean_visible", "is", "None", "or", "len", "(", "clean_visible", ")", "==", "0", ":", "clean_visible", "=", "make_clean_visible", "(", "clean_html_utf8", ")", ".", "decode", "(", "'utf-8'", ")", "elif", "isinstance", "(", "clean_visible", ",", "str", ")", ":", "clean_visible", "=", "clean_visible", ".", "decode", "(", "'utf-8'", ")", "fc", "=", "FeatureCollection", "(", ")", "fc", "[", "u'meta_raw'", "]", "=", "html", "and", "uni", "(", "html", ",", "encoding", ")", "or", "u''", "fc", "[", "u'meta_clean_html'", "]", "=", "clean_html", "fc", "[", "u'meta_clean_visible'", "]", "=", "clean_visible", "fc", "[", "u'meta_timestamp'", "]", "=", "unicode", "(", "timestamp", ")", "url", "=", "url", "or", "u''", "fc", "[", "u'meta_url'", "]", "=", "uni", "(", "url", ")", "add_feature", "(", "u'icq'", ",", "features", ".", "ICQs", "(", "clean_visible", ")", ")", "add_feature", "(", "u'skype'", ",", "features", ".", "skypes", "(", "clean_visible", ")", ")", "add_feature", "(", "u'phone'", ",", "features", ".", "phones", "(", "clean_visible", ")", ")", "add_feature", "(", "u'email'", ",", "features", ".", "emails", "(", "clean_visible", ")", ")", "bowNP", ",", "normalizations", "=", "features", ".", "noun_phrases", "(", "cleanse", "(", "clean_visible", ")", ",", "included_unnormalized", "=", "True", ")", "add_feature", "(", "u'bowNP'", ",", "bowNP", ")", "bowNP_unnorm", "=", "chain", "(", "*", "normalizations", ".", "values", "(", ")", ")", "add_feature", "(", "u'bowNP_unnorm'", ",", "bowNP_unnorm", ")", "add_feature", "(", "u'image_url'", ",", "features", ".", "image_urls", "(", "clean_html", ")", ")", "add_feature", "(", "u'a_url'", ",", "features", ".", "a_urls", "(", "clean_html", ")", ")", "## get parsed versions, extract usernames", "fc", "[", "u'img_url_path_dirs'", "]", "=", "features", ".", "path_dirs", "(", "fc", "[", "u'image_url'", "]", ")", "fc", "[", "u'img_url_hostnames'", "]", "=", "features", ".", "host_names", "(", "fc", "[", "u'image_url'", "]", ")", "fc", "[", "u'usernames'", "]", "=", "features", ".", "usernames", "(", "fc", "[", "u'image_url'", "]", ")", "fc", "[", "u'a_url_path_dirs'", "]", "=", "features", ".", "path_dirs", "(", "fc", "[", "u'a_url'", "]", ")", "fc", "[", "u'a_url_hostnames'", "]", "=", "features", ".", "host_names", "(", "fc", "[", "u'a_url'", "]", ")", "fc", "[", "u'usernames'", "]", "+=", "features", ".", "usernames", "(", "fc", "[", "u'a_url'", "]", ")", "#fc[u'usernames'] += features.usernames2(", "# fc[u'meta_clean_visible'])", "# beginning of treating this as a pipeline...", "xform", "=", "features", ".", "entity_names", "(", ")", "fc", "=", "xform", ".", "process", "(", "fc", ")", "for", "feat_name", ",", "feat_val", "in", "other_features", ".", "iteritems", "(", ")", ":", "fc", "[", "feat_name", "]", "+=", "StringCounter", "(", "feat_val", ")", "return", "fc" ]
36.2875
20.7875
def from_dict(cls, parameters): """Set attributes with provided values. Args: parameters(dict): Dictionary containing instance parameters. Returns: Truncnorm: Instance populated with given parameters. """ instance = cls() instance.fitted = parameters['fitted'] instance.constant_value = parameters['constant_value'] if instance.fitted and instance.constant_value is None: instance.model = scipy.stats.truncnorm(parameters['a'], parameters['b']) return instance
[ "def", "from_dict", "(", "cls", ",", "parameters", ")", ":", "instance", "=", "cls", "(", ")", "instance", ".", "fitted", "=", "parameters", "[", "'fitted'", "]", "instance", ".", "constant_value", "=", "parameters", "[", "'constant_value'", "]", "if", "instance", ".", "fitted", "and", "instance", ".", "constant_value", "is", "None", ":", "instance", ".", "model", "=", "scipy", ".", "stats", ".", "truncnorm", "(", "parameters", "[", "'a'", "]", ",", "parameters", "[", "'b'", "]", ")", "return", "instance" ]
32.705882
23.764706
def dframe(self, dimensions=None, multi_index=False): """Convert dimension values to DataFrame. Returns a pandas dataframe of columns along each dimension, either completely flat or indexed by key dimensions. Args: dimensions: Dimensions to return as columns multi_index: Convert key dimensions to (multi-)index Returns: DataFrame of columns corresponding to each dimension """ if dimensions: dimensions = [self.get_dimension(d, strict=True) for d in dimensions] else: dimensions = self.kdims vdims = [d for d in dimensions if d in self.vdims] if vdims: raise ValueError('%s element does not hold data for value ' 'dimensions. Could not return data for %s ' 'dimension(s).' % (type(self).__name__, ', '.join([d.name for d in vdims]))) return super(StatisticsElement, self).dframe(dimensions, False)
[ "def", "dframe", "(", "self", ",", "dimensions", "=", "None", ",", "multi_index", "=", "False", ")", ":", "if", "dimensions", ":", "dimensions", "=", "[", "self", ".", "get_dimension", "(", "d", ",", "strict", "=", "True", ")", "for", "d", "in", "dimensions", "]", "else", ":", "dimensions", "=", "self", ".", "kdims", "vdims", "=", "[", "d", "for", "d", "in", "dimensions", "if", "d", "in", "self", ".", "vdims", "]", "if", "vdims", ":", "raise", "ValueError", "(", "'%s element does not hold data for value '", "'dimensions. Could not return data for %s '", "'dimension(s).'", "%", "(", "type", "(", "self", ")", ".", "__name__", ",", "', '", ".", "join", "(", "[", "d", ".", "name", "for", "d", "in", "vdims", "]", ")", ")", ")", "return", "super", "(", "StatisticsElement", ",", "self", ")", ".", "dframe", "(", "dimensions", ",", "False", ")" ]
42.708333
23.875
def _writeConnectivity(self, links, fileObject): """ Write Connectivity Lines to File Method """ for link in links: linkNum = link.linkNumber downLink = link.downstreamLinkID numUpLinks = link.numUpstreamLinks upLinks = '' for upLink in link.upstreamLinks: upLinks = '{}{:>5}'.format(upLinks, str(upLink.upstreamLinkID)) line = 'CONNECT{:>5}{:>5}{:>5}{}\n'.format(linkNum, downLink, numUpLinks, upLinks) fileObject.write(line) fileObject.write('\n')
[ "def", "_writeConnectivity", "(", "self", ",", "links", ",", "fileObject", ")", ":", "for", "link", "in", "links", ":", "linkNum", "=", "link", ".", "linkNumber", "downLink", "=", "link", ".", "downstreamLinkID", "numUpLinks", "=", "link", ".", "numUpstreamLinks", "upLinks", "=", "''", "for", "upLink", "in", "link", ".", "upstreamLinks", ":", "upLinks", "=", "'{}{:>5}'", ".", "format", "(", "upLinks", ",", "str", "(", "upLink", ".", "upstreamLinkID", ")", ")", "line", "=", "'CONNECT{:>5}{:>5}{:>5}{}\\n'", ".", "format", "(", "linkNum", ",", "downLink", ",", "numUpLinks", ",", "upLinks", ")", "fileObject", ".", "write", "(", "line", ")", "fileObject", ".", "write", "(", "'\\n'", ")" ]
38.4
14.133333
def execute(mp): """ Example process for testing. Inputs: ------- file1 raster file Parameters: ----------- Output: ------- np.ndarray """ # Reading and writing data works like this: with mp.open("file1", resampling="bilinear") as raster_file: if raster_file.is_empty(): return "empty" # This assures a transparent tile instead of a pink error tile # is returned when using mapchete serve. dem = raster_file.read() return dem
[ "def", "execute", "(", "mp", ")", ":", "# Reading and writing data works like this:", "with", "mp", ".", "open", "(", "\"file1\"", ",", "resampling", "=", "\"bilinear\"", ")", "as", "raster_file", ":", "if", "raster_file", ".", "is_empty", "(", ")", ":", "return", "\"empty\"", "# This assures a transparent tile instead of a pink error tile", "# is returned when using mapchete serve.", "dem", "=", "raster_file", ".", "read", "(", ")", "return", "dem" ]
21.708333
21.958333
def encrypt(self, sa, esp, key): """ Encrypt an ESP packet @param sa: the SecurityAssociation associated with the ESP packet. @param esp: an unencrypted _ESPPlain packet with valid padding @param key: the secret key used for encryption @return: a valid ESP packet encrypted with this algorithm """ data = esp.data_for_encryption() if self.cipher: mode_iv = self._format_mode_iv(algo=self, sa=sa, iv=esp.iv) cipher = self.new_cipher(key, mode_iv) encryptor = cipher.encryptor() if self.is_aead: aad = struct.pack('!LL', esp.spi, esp.seq) encryptor.authenticate_additional_data(aad) data = encryptor.update(data) + encryptor.finalize() data += encryptor.tag[:self.icv_size] else: data = encryptor.update(data) + encryptor.finalize() return ESP(spi=esp.spi, seq=esp.seq, data=esp.iv + data)
[ "def", "encrypt", "(", "self", ",", "sa", ",", "esp", ",", "key", ")", ":", "data", "=", "esp", ".", "data_for_encryption", "(", ")", "if", "self", ".", "cipher", ":", "mode_iv", "=", "self", ".", "_format_mode_iv", "(", "algo", "=", "self", ",", "sa", "=", "sa", ",", "iv", "=", "esp", ".", "iv", ")", "cipher", "=", "self", ".", "new_cipher", "(", "key", ",", "mode_iv", ")", "encryptor", "=", "cipher", ".", "encryptor", "(", ")", "if", "self", ".", "is_aead", ":", "aad", "=", "struct", ".", "pack", "(", "'!LL'", ",", "esp", ".", "spi", ",", "esp", ".", "seq", ")", "encryptor", ".", "authenticate_additional_data", "(", "aad", ")", "data", "=", "encryptor", ".", "update", "(", "data", ")", "+", "encryptor", ".", "finalize", "(", ")", "data", "+=", "encryptor", ".", "tag", "[", ":", "self", ".", "icv_size", "]", "else", ":", "data", "=", "encryptor", ".", "update", "(", "data", ")", "+", "encryptor", ".", "finalize", "(", ")", "return", "ESP", "(", "spi", "=", "esp", ".", "spi", ",", "seq", "=", "esp", ".", "seq", ",", "data", "=", "esp", ".", "iv", "+", "data", ")" ]
38.230769
21.307692
async def get_constraints(self): """Return the machine constraints dict for this application. """ app_facade = client.ApplicationFacade.from_connection(self.connection) log.debug( 'Getting constraints for %s', self.name) result = (await app_facade.Get(self.name)).constraints return vars(result) if result else result
[ "async", "def", "get_constraints", "(", "self", ")", ":", "app_facade", "=", "client", ".", "ApplicationFacade", ".", "from_connection", "(", "self", ".", "connection", ")", "log", ".", "debug", "(", "'Getting constraints for %s'", ",", "self", ".", "name", ")", "result", "=", "(", "await", "app_facade", ".", "Get", "(", "self", ".", "name", ")", ")", ".", "constraints", "return", "vars", "(", "result", ")", "if", "result", "else", "result" ]
33.636364
21
def install_sql(self, site=None, database='default', apps=None, stop_on_error=0, fn=None): """ Installs all custom SQL. """ #from burlap.db import load_db_set stop_on_error = int(stop_on_error) site = site or ALL name = database r = self.local_renderer paths = glob.glob(r.format(r.env.install_sql_path_template)) apps = [_ for _ in (apps or '').split(',') if _.strip()] if self.verbose: print('install_sql.apps:', apps) def cmp_paths(d0, d1): if d0[1] and d0[1] in d1[2]: return -1 if d1[1] and d1[1] in d0[2]: return +1 return cmp(d0[0], d1[0]) def get_paths(t): """ Returns SQL file paths in an execution order that respect dependencies. """ data = [] # [(path, view_name, content)] for path in paths: if fn and fn not in path: continue parts = path.split('.') if len(parts) == 3 and parts[1] != t: continue if not path.lower().endswith('.sql'): continue content = open(path, 'r').read() matches = re.findall(r'[\s\t]+VIEW[\s\t]+([a-zA-Z0-9_]{3,})', content, flags=re.IGNORECASE) view_name = '' if matches: view_name = matches[0] print('Found view %s.' % view_name) data.append((path, view_name, content)) for d in sorted(data, cmp=cmp_paths): yield d[0] def run_paths(paths, cmd_template, max_retries=3): r = self.local_renderer paths = list(paths) error_counts = defaultdict(int) # {path:count} terminal = set() if self.verbose: print('Checking %i paths.' % len(paths)) while paths: path = paths.pop(0) if self.verbose: print('path:', path) app_name = re.findall(r'/([^/]+)/sql/', path)[0] if apps and app_name not in apps: self.vprint('skipping because app_name %s not in apps' % app_name) continue with self.settings(warn_only=True): if self.is_local: r.env.sql_path = path else: r.env.sql_path = '/tmp/%s' % os.path.split(path)[-1] r.put(local_path=path, remote_path=r.env.sql_path) ret = r.run_or_local(cmd_template) if ret and ret.return_code: if stop_on_error: raise Exception('Unable to execute file %s' % path) error_counts[path] += 1 if error_counts[path] < max_retries: paths.append(path) else: terminal.add(path) if terminal: print('%i files could not be loaded.' % len(terminal), file=sys.stderr) for path in sorted(list(terminal)): print(path, file=sys.stderr) print(file=sys.stderr) if self.verbose: print('install_sql.db_engine:', r.env.db_engine) for _site, site_data in self.iter_sites(site=site, no_secure=True): self.set_db(name=name, site=_site) if 'postgres' in r.env.db_engine or 'postgis' in r.env.db_engine: paths = list(get_paths('postgresql')) run_paths( paths=paths, cmd_template="psql --host={db_host} --user={db_user} --no-password -d {db_name} -f {sql_path}") elif 'mysql' in r.env.db_engine: paths = list(get_paths('mysql')) run_paths( paths=paths, cmd_template="mysql -v -h {db_host} -u {db_user} -p'{db_password}' {db_name} < {sql_path}") else: raise NotImplementedError
[ "def", "install_sql", "(", "self", ",", "site", "=", "None", ",", "database", "=", "'default'", ",", "apps", "=", "None", ",", "stop_on_error", "=", "0", ",", "fn", "=", "None", ")", ":", "#from burlap.db import load_db_set", "stop_on_error", "=", "int", "(", "stop_on_error", ")", "site", "=", "site", "or", "ALL", "name", "=", "database", "r", "=", "self", ".", "local_renderer", "paths", "=", "glob", ".", "glob", "(", "r", ".", "format", "(", "r", ".", "env", ".", "install_sql_path_template", ")", ")", "apps", "=", "[", "_", "for", "_", "in", "(", "apps", "or", "''", ")", ".", "split", "(", "','", ")", "if", "_", ".", "strip", "(", ")", "]", "if", "self", ".", "verbose", ":", "print", "(", "'install_sql.apps:'", ",", "apps", ")", "def", "cmp_paths", "(", "d0", ",", "d1", ")", ":", "if", "d0", "[", "1", "]", "and", "d0", "[", "1", "]", "in", "d1", "[", "2", "]", ":", "return", "-", "1", "if", "d1", "[", "1", "]", "and", "d1", "[", "1", "]", "in", "d0", "[", "2", "]", ":", "return", "+", "1", "return", "cmp", "(", "d0", "[", "0", "]", ",", "d1", "[", "0", "]", ")", "def", "get_paths", "(", "t", ")", ":", "\"\"\"\n Returns SQL file paths in an execution order that respect dependencies.\n \"\"\"", "data", "=", "[", "]", "# [(path, view_name, content)]", "for", "path", "in", "paths", ":", "if", "fn", "and", "fn", "not", "in", "path", ":", "continue", "parts", "=", "path", ".", "split", "(", "'.'", ")", "if", "len", "(", "parts", ")", "==", "3", "and", "parts", "[", "1", "]", "!=", "t", ":", "continue", "if", "not", "path", ".", "lower", "(", ")", ".", "endswith", "(", "'.sql'", ")", ":", "continue", "content", "=", "open", "(", "path", ",", "'r'", ")", ".", "read", "(", ")", "matches", "=", "re", ".", "findall", "(", "r'[\\s\\t]+VIEW[\\s\\t]+([a-zA-Z0-9_]{3,})'", ",", "content", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "view_name", "=", "''", "if", "matches", ":", "view_name", "=", "matches", "[", "0", "]", "print", "(", "'Found view %s.'", "%", "view_name", ")", "data", ".", "append", "(", "(", "path", ",", "view_name", ",", "content", ")", ")", "for", "d", "in", "sorted", "(", "data", ",", "cmp", "=", "cmp_paths", ")", ":", "yield", "d", "[", "0", "]", "def", "run_paths", "(", "paths", ",", "cmd_template", ",", "max_retries", "=", "3", ")", ":", "r", "=", "self", ".", "local_renderer", "paths", "=", "list", "(", "paths", ")", "error_counts", "=", "defaultdict", "(", "int", ")", "# {path:count}", "terminal", "=", "set", "(", ")", "if", "self", ".", "verbose", ":", "print", "(", "'Checking %i paths.'", "%", "len", "(", "paths", ")", ")", "while", "paths", ":", "path", "=", "paths", ".", "pop", "(", "0", ")", "if", "self", ".", "verbose", ":", "print", "(", "'path:'", ",", "path", ")", "app_name", "=", "re", ".", "findall", "(", "r'/([^/]+)/sql/'", ",", "path", ")", "[", "0", "]", "if", "apps", "and", "app_name", "not", "in", "apps", ":", "self", ".", "vprint", "(", "'skipping because app_name %s not in apps'", "%", "app_name", ")", "continue", "with", "self", ".", "settings", "(", "warn_only", "=", "True", ")", ":", "if", "self", ".", "is_local", ":", "r", ".", "env", ".", "sql_path", "=", "path", "else", ":", "r", ".", "env", ".", "sql_path", "=", "'/tmp/%s'", "%", "os", ".", "path", ".", "split", "(", "path", ")", "[", "-", "1", "]", "r", ".", "put", "(", "local_path", "=", "path", ",", "remote_path", "=", "r", ".", "env", ".", "sql_path", ")", "ret", "=", "r", ".", "run_or_local", "(", "cmd_template", ")", "if", "ret", "and", "ret", ".", "return_code", ":", "if", "stop_on_error", ":", "raise", "Exception", "(", "'Unable to execute file %s'", "%", "path", ")", "error_counts", "[", "path", "]", "+=", "1", "if", "error_counts", "[", "path", "]", "<", "max_retries", ":", "paths", ".", "append", "(", "path", ")", "else", ":", "terminal", ".", "add", "(", "path", ")", "if", "terminal", ":", "print", "(", "'%i files could not be loaded.'", "%", "len", "(", "terminal", ")", ",", "file", "=", "sys", ".", "stderr", ")", "for", "path", "in", "sorted", "(", "list", "(", "terminal", ")", ")", ":", "print", "(", "path", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "file", "=", "sys", ".", "stderr", ")", "if", "self", ".", "verbose", ":", "print", "(", "'install_sql.db_engine:'", ",", "r", ".", "env", ".", "db_engine", ")", "for", "_site", ",", "site_data", "in", "self", ".", "iter_sites", "(", "site", "=", "site", ",", "no_secure", "=", "True", ")", ":", "self", ".", "set_db", "(", "name", "=", "name", ",", "site", "=", "_site", ")", "if", "'postgres'", "in", "r", ".", "env", ".", "db_engine", "or", "'postgis'", "in", "r", ".", "env", ".", "db_engine", ":", "paths", "=", "list", "(", "get_paths", "(", "'postgresql'", ")", ")", "run_paths", "(", "paths", "=", "paths", ",", "cmd_template", "=", "\"psql --host={db_host} --user={db_user} --no-password -d {db_name} -f {sql_path}\"", ")", "elif", "'mysql'", "in", "r", ".", "env", ".", "db_engine", ":", "paths", "=", "list", "(", "get_paths", "(", "'mysql'", ")", ")", "run_paths", "(", "paths", "=", "paths", ",", "cmd_template", "=", "\"mysql -v -h {db_host} -u {db_user} -p'{db_password}' {db_name} < {sql_path}\"", ")", "else", ":", "raise", "NotImplementedError" ]
38.12037
18.675926
def updateImage(self, imgdata, xaxis=None, yaxis=None): """Updates the Widget image directly. :type imgdata: numpy.ndarray, see :meth:`pyqtgraph:pyqtgraph.ImageItem.setImage` :param xaxis: x-axis values, length should match dimension 1 of imgdata :param yaxis: y-axis values, length should match dimension 0 of imgdata """ imgdata = imgdata.T self.img.setImage(imgdata) if xaxis is not None and yaxis is not None: xscale = 1.0/(imgdata.shape[0]/xaxis[-1]) yscale = 1.0/(imgdata.shape[1]/yaxis[-1]) self.resetScale() self.img.scale(xscale, yscale) self.imgScale = (xscale, yscale) self.imageArray = np.fliplr(imgdata) self.updateColormap()
[ "def", "updateImage", "(", "self", ",", "imgdata", ",", "xaxis", "=", "None", ",", "yaxis", "=", "None", ")", ":", "imgdata", "=", "imgdata", ".", "T", "self", ".", "img", ".", "setImage", "(", "imgdata", ")", "if", "xaxis", "is", "not", "None", "and", "yaxis", "is", "not", "None", ":", "xscale", "=", "1.0", "/", "(", "imgdata", ".", "shape", "[", "0", "]", "/", "xaxis", "[", "-", "1", "]", ")", "yscale", "=", "1.0", "/", "(", "imgdata", ".", "shape", "[", "1", "]", "/", "yaxis", "[", "-", "1", "]", ")", "self", ".", "resetScale", "(", ")", "self", ".", "img", ".", "scale", "(", "xscale", ",", "yscale", ")", "self", ".", "imgScale", "=", "(", "xscale", ",", "yscale", ")", "self", ".", "imageArray", "=", "np", ".", "fliplr", "(", "imgdata", ")", "self", ".", "updateColormap", "(", ")" ]
45.352941
15.352941
def getExperiments(uuid: str): """ list active (running or completed) experiments""" return jsonify([x.deserialize() for x in Experiment.query.all()])
[ "def", "getExperiments", "(", "uuid", ":", "str", ")", ":", "return", "jsonify", "(", "[", "x", ".", "deserialize", "(", ")", "for", "x", "in", "Experiment", ".", "query", ".", "all", "(", ")", "]", ")" ]
52
13
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = winchester_config['database']['url'] context.configure(url=url) with context.begin_transaction(): context.run_migrations()
[ "def", "run_migrations_offline", "(", ")", ":", "url", "=", "winchester_config", "[", "'database'", "]", "[", "'url'", "]", "context", ".", "configure", "(", "url", "=", "url", ")", "with", "context", ".", "begin_transaction", "(", ")", ":", "context", ".", "run_migrations", "(", ")" ]
29.411765
16.529412
def ready_argument_list(self, arguments): """ready argument list to be passed to the kernel, allocates gpu mem :param arguments: List of arguments to be passed to the kernel. The order should match the argument list on the OpenCL kernel. Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on. :type arguments: list(numpy objects) :returns: A list of arguments that can be passed to an OpenCL kernel. :rtype: list( pyopencl.Buffer, numpy.int32, ... ) """ gpu_args = [] for arg in arguments: # if arg i is a numpy array copy to device if isinstance(arg, numpy.ndarray): gpu_args.append(cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf=arg)) else: # if not an array, just pass argument along gpu_args.append(arg) return gpu_args
[ "def", "ready_argument_list", "(", "self", ",", "arguments", ")", ":", "gpu_args", "=", "[", "]", "for", "arg", "in", "arguments", ":", "# if arg i is a numpy array copy to device", "if", "isinstance", "(", "arg", ",", "numpy", ".", "ndarray", ")", ":", "gpu_args", ".", "append", "(", "cl", ".", "Buffer", "(", "self", ".", "ctx", ",", "self", ".", "mf", ".", "READ_WRITE", "|", "self", ".", "mf", ".", "COPY_HOST_PTR", ",", "hostbuf", "=", "arg", ")", ")", "else", ":", "# if not an array, just pass argument along", "gpu_args", ".", "append", "(", "arg", ")", "return", "gpu_args" ]
48.473684
21.894737
def next_frame_glow_shapes(): """Hparams for qualitative and quantitative results on shapes dataset.""" hparams = next_frame_glow_bair_quant() hparams.video_num_input_frames = 1 hparams.video_num_target_frames = 2 hparams.num_train_frames = 2 hparams.num_cond_latents = 1 hparams.coupling = "additive" hparams.coupling_width = 512 hparams.latent_encoder_depth = 10 hparams.latent_skip = False hparams.learning_rate_constant = 1e-4 hparams.batch_size = 10 return hparams
[ "def", "next_frame_glow_shapes", "(", ")", ":", "hparams", "=", "next_frame_glow_bair_quant", "(", ")", "hparams", ".", "video_num_input_frames", "=", "1", "hparams", ".", "video_num_target_frames", "=", "2", "hparams", ".", "num_train_frames", "=", "2", "hparams", ".", "num_cond_latents", "=", "1", "hparams", ".", "coupling", "=", "\"additive\"", "hparams", ".", "coupling_width", "=", "512", "hparams", ".", "latent_encoder_depth", "=", "10", "hparams", ".", "latent_skip", "=", "False", "hparams", ".", "learning_rate_constant", "=", "1e-4", "hparams", ".", "batch_size", "=", "10", "return", "hparams" ]
34.428571
8.071429