code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def _is_path(s): """Return whether an object is a path.""" if isinstance(s, string_types): try: return op.exists(s) except (OSError, ValueError): return False else: return False
def function[_is_path, parameter[s]]: constant[Return whether an object is a path.] if call[name[isinstance], parameter[name[s], name[string_types]]] begin[:] <ast.Try object at 0x7da207f01d50>
keyword[def] identifier[_is_path] ( identifier[s] ): literal[string] keyword[if] identifier[isinstance] ( identifier[s] , identifier[string_types] ): keyword[try] : keyword[return] identifier[op] . identifier[exists] ( identifier[s] ) keyword[except] ( identifier[OSError] , identifier[ValueError] ): keyword[return] keyword[False] keyword[else] : keyword[return] keyword[False]
def _is_path(s): """Return whether an object is a path.""" if isinstance(s, string_types): try: return op.exists(s) # depends on [control=['try'], data=[]] except (OSError, ValueError): return False # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: return False
def retrieve_page(self, method, path, post_params={}, headers={}, status=200, username=None, password=None, *args, **kwargs): """ Makes the actual request. This will also go through and generate the needed steps to make the request, i.e. basic auth. ``method``: Any supported HTTP methods defined in :rfc:`2616`. ``path``: Absolute or relative path. See :meth:`_prepare_uri` for more detail. ``post_params``: Dictionary of key/value pairs to be added as `POST` parameters. ``headers``: Dictionary of key/value pairs to be added to the HTTP headers. ``status``: Will error out if the HTTP status code does not match this value. Set this to `None` to disable checking. ``username``, ``password``: Username and password for basic auth; see :meth:`_prepare_basicauth` for more detail. An important note is that when ``post_params`` is specified, its behavior depends on the ``method``. That is, for `PUT` and `POST` requests, the dictionary is multipart encoded and put into the body of the request. For everything else, it is added as a query string to the URL. """ # Copy headers so that making changes here won't affect the original headers = headers.copy() # Update basic auth information basicauth = self._prepare_basicauth(username, password) if basicauth: headers.update([basicauth]) # If this is a POST or PUT, we can put the data into the body as # form-data encoded; otherwise, it should be part of the query string. if method in ["PUT", "POST"]: datagen, form_hdrs = poster.encode.multipart_encode(post_params) body = "".join(datagen) headers.update(form_hdrs) uri = self._prepare_uri(path) else: body = "" uri = self._prepare_uri(path, post_params) # Make the actual request response = self._make_request(uri, method, body, headers) # Assert that the status we received was expected. if status: real_status = int(response.status_int) assert real_status == int(status), \ "expected %s, received %s." % (status, real_status) return response
def function[retrieve_page, parameter[self, method, path, post_params, headers, status, username, password]]: constant[ Makes the actual request. This will also go through and generate the needed steps to make the request, i.e. basic auth. ``method``: Any supported HTTP methods defined in :rfc:`2616`. ``path``: Absolute or relative path. See :meth:`_prepare_uri` for more detail. ``post_params``: Dictionary of key/value pairs to be added as `POST` parameters. ``headers``: Dictionary of key/value pairs to be added to the HTTP headers. ``status``: Will error out if the HTTP status code does not match this value. Set this to `None` to disable checking. ``username``, ``password``: Username and password for basic auth; see :meth:`_prepare_basicauth` for more detail. An important note is that when ``post_params`` is specified, its behavior depends on the ``method``. That is, for `PUT` and `POST` requests, the dictionary is multipart encoded and put into the body of the request. For everything else, it is added as a query string to the URL. ] variable[headers] assign[=] call[name[headers].copy, parameter[]] variable[basicauth] assign[=] call[name[self]._prepare_basicauth, parameter[name[username], name[password]]] if name[basicauth] begin[:] call[name[headers].update, parameter[list[[<ast.Name object at 0x7da1b15b1ab0>]]]] if compare[name[method] in list[[<ast.Constant object at 0x7da1b15b34f0>, <ast.Constant object at 0x7da1b15b26b0>]]] begin[:] <ast.Tuple object at 0x7da1b15b32b0> assign[=] call[name[poster].encode.multipart_encode, parameter[name[post_params]]] variable[body] assign[=] call[constant[].join, parameter[name[datagen]]] call[name[headers].update, parameter[name[form_hdrs]]] variable[uri] assign[=] call[name[self]._prepare_uri, parameter[name[path]]] variable[response] assign[=] call[name[self]._make_request, parameter[name[uri], name[method], name[body], name[headers]]] if name[status] begin[:] variable[real_status] assign[=] call[name[int], parameter[name[response].status_int]] assert[compare[name[real_status] equal[==] call[name[int], parameter[name[status]]]]] return[name[response]]
keyword[def] identifier[retrieve_page] ( identifier[self] , identifier[method] , identifier[path] , identifier[post_params] ={}, identifier[headers] ={}, identifier[status] = literal[int] , identifier[username] = keyword[None] , identifier[password] = keyword[None] , * identifier[args] ,** identifier[kwargs] ): literal[string] identifier[headers] = identifier[headers] . identifier[copy] () identifier[basicauth] = identifier[self] . identifier[_prepare_basicauth] ( identifier[username] , identifier[password] ) keyword[if] identifier[basicauth] : identifier[headers] . identifier[update] ([ identifier[basicauth] ]) keyword[if] identifier[method] keyword[in] [ literal[string] , literal[string] ]: identifier[datagen] , identifier[form_hdrs] = identifier[poster] . identifier[encode] . identifier[multipart_encode] ( identifier[post_params] ) identifier[body] = literal[string] . identifier[join] ( identifier[datagen] ) identifier[headers] . identifier[update] ( identifier[form_hdrs] ) identifier[uri] = identifier[self] . identifier[_prepare_uri] ( identifier[path] ) keyword[else] : identifier[body] = literal[string] identifier[uri] = identifier[self] . identifier[_prepare_uri] ( identifier[path] , identifier[post_params] ) identifier[response] = identifier[self] . identifier[_make_request] ( identifier[uri] , identifier[method] , identifier[body] , identifier[headers] ) keyword[if] identifier[status] : identifier[real_status] = identifier[int] ( identifier[response] . identifier[status_int] ) keyword[assert] identifier[real_status] == identifier[int] ( identifier[status] ), literal[string] %( identifier[status] , identifier[real_status] ) keyword[return] identifier[response]
def retrieve_page(self, method, path, post_params={}, headers={}, status=200, username=None, password=None, *args, **kwargs): """ Makes the actual request. This will also go through and generate the needed steps to make the request, i.e. basic auth. ``method``: Any supported HTTP methods defined in :rfc:`2616`. ``path``: Absolute or relative path. See :meth:`_prepare_uri` for more detail. ``post_params``: Dictionary of key/value pairs to be added as `POST` parameters. ``headers``: Dictionary of key/value pairs to be added to the HTTP headers. ``status``: Will error out if the HTTP status code does not match this value. Set this to `None` to disable checking. ``username``, ``password``: Username and password for basic auth; see :meth:`_prepare_basicauth` for more detail. An important note is that when ``post_params`` is specified, its behavior depends on the ``method``. That is, for `PUT` and `POST` requests, the dictionary is multipart encoded and put into the body of the request. For everything else, it is added as a query string to the URL. """ # Copy headers so that making changes here won't affect the original headers = headers.copy() # Update basic auth information basicauth = self._prepare_basicauth(username, password) if basicauth: headers.update([basicauth]) # depends on [control=['if'], data=[]] # If this is a POST or PUT, we can put the data into the body as # form-data encoded; otherwise, it should be part of the query string. if method in ['PUT', 'POST']: (datagen, form_hdrs) = poster.encode.multipart_encode(post_params) body = ''.join(datagen) headers.update(form_hdrs) uri = self._prepare_uri(path) # depends on [control=['if'], data=[]] else: body = '' uri = self._prepare_uri(path, post_params) # Make the actual request response = self._make_request(uri, method, body, headers) # Assert that the status we received was expected. if status: real_status = int(response.status_int) assert real_status == int(status), 'expected %s, received %s.' % (status, real_status) # depends on [control=['if'], data=[]] return response
def _pred(aclass): """ :param aclass :return: boolean """ isaclass = inspect.isclass(aclass) return isaclass and aclass.__module__ == _pred.__module__
def function[_pred, parameter[aclass]]: constant[ :param aclass :return: boolean ] variable[isaclass] assign[=] call[name[inspect].isclass, parameter[name[aclass]]] return[<ast.BoolOp object at 0x7da2047e8fa0>]
keyword[def] identifier[_pred] ( identifier[aclass] ): literal[string] identifier[isaclass] = identifier[inspect] . identifier[isclass] ( identifier[aclass] ) keyword[return] identifier[isaclass] keyword[and] identifier[aclass] . identifier[__module__] == identifier[_pred] . identifier[__module__]
def _pred(aclass): """ :param aclass :return: boolean """ isaclass = inspect.isclass(aclass) return isaclass and aclass.__module__ == _pred.__module__
def _double_prefix(self): """Grow the given deque by doubling, but don't split the second chunk just because the first one is small. """ new_len = max(len(self._buf[0]) * 2, (len(self._buf[0]) + len(self._buf[1]))) self._merge_prefix(new_len)
def function[_double_prefix, parameter[self]]: constant[Grow the given deque by doubling, but don't split the second chunk just because the first one is small. ] variable[new_len] assign[=] call[name[max], parameter[binary_operation[call[name[len], parameter[call[name[self]._buf][constant[0]]]] * constant[2]], binary_operation[call[name[len], parameter[call[name[self]._buf][constant[0]]]] + call[name[len], parameter[call[name[self]._buf][constant[1]]]]]]] call[name[self]._merge_prefix, parameter[name[new_len]]]
keyword[def] identifier[_double_prefix] ( identifier[self] ): literal[string] identifier[new_len] = identifier[max] ( identifier[len] ( identifier[self] . identifier[_buf] [ literal[int] ])* literal[int] ,( identifier[len] ( identifier[self] . identifier[_buf] [ literal[int] ])+ identifier[len] ( identifier[self] . identifier[_buf] [ literal[int] ]))) identifier[self] . identifier[_merge_prefix] ( identifier[new_len] )
def _double_prefix(self): """Grow the given deque by doubling, but don't split the second chunk just because the first one is small. """ new_len = max(len(self._buf[0]) * 2, len(self._buf[0]) + len(self._buf[1])) self._merge_prefix(new_len)
def path(self, value=None): """ Return or set the path :param string value: the new path to use :returns: string or new :class:`URL` instance """ if value is not None: if not value.startswith('/'): value = '/' + value encoded_value = unicode_quote(value) return URL._mutate(self, path=encoded_value) return self._tuple.path
def function[path, parameter[self, value]]: constant[ Return or set the path :param string value: the new path to use :returns: string or new :class:`URL` instance ] if compare[name[value] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da18eb54a60> begin[:] variable[value] assign[=] binary_operation[constant[/] + name[value]] variable[encoded_value] assign[=] call[name[unicode_quote], parameter[name[value]]] return[call[name[URL]._mutate, parameter[name[self]]]] return[name[self]._tuple.path]
keyword[def] identifier[path] ( identifier[self] , identifier[value] = keyword[None] ): literal[string] keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[value] . identifier[startswith] ( literal[string] ): identifier[value] = literal[string] + identifier[value] identifier[encoded_value] = identifier[unicode_quote] ( identifier[value] ) keyword[return] identifier[URL] . identifier[_mutate] ( identifier[self] , identifier[path] = identifier[encoded_value] ) keyword[return] identifier[self] . identifier[_tuple] . identifier[path]
def path(self, value=None): """ Return or set the path :param string value: the new path to use :returns: string or new :class:`URL` instance """ if value is not None: if not value.startswith('/'): value = '/' + value # depends on [control=['if'], data=[]] encoded_value = unicode_quote(value) return URL._mutate(self, path=encoded_value) # depends on [control=['if'], data=['value']] return self._tuple.path
def rpc_get_name_history_page(self, name, page, **con_info): """ Get the list of history entries for a name or subdomain's history, paginated. Small pages correspond to later history (page = 0 is the page of last updates) Page size is 20 rows. Return {'status': True, 'history': [...]} on success Return {'error': ...} on error """ if not check_name(name) and not check_subdomain(name): return {'error': 'invalid name', 'http_status': 400} if not check_count(page): return {'error': 'invalid page', 'http_status': 400} offset = page * 20 count = (page + 1) * 20 history_data = None if check_name(name): # on-chain name db = get_db_state(self.working_dir) history_data = db.get_name_history(name, offset, count, reverse=True) db.close() else: # off-chain name history_data = get_subdomain_history(name, offset=offset, count=count, json=True, reverse=True) if len(history_data) == 0: # name didn't exist return {'error': 'Not found', 'http_status': 404} return self.success_response({'history': history_data})
def function[rpc_get_name_history_page, parameter[self, name, page]]: constant[ Get the list of history entries for a name or subdomain's history, paginated. Small pages correspond to later history (page = 0 is the page of last updates) Page size is 20 rows. Return {'status': True, 'history': [...]} on success Return {'error': ...} on error ] if <ast.BoolOp object at 0x7da18f00e140> begin[:] return[dictionary[[<ast.Constant object at 0x7da18f00d720>, <ast.Constant object at 0x7da18f00f880>], [<ast.Constant object at 0x7da18f00e650>, <ast.Constant object at 0x7da18f00fc70>]]] if <ast.UnaryOp object at 0x7da18f00f040> begin[:] return[dictionary[[<ast.Constant object at 0x7da18f00cc40>, <ast.Constant object at 0x7da18f00ee00>], [<ast.Constant object at 0x7da18f00cbe0>, <ast.Constant object at 0x7da18f00fd00>]]] variable[offset] assign[=] binary_operation[name[page] * constant[20]] variable[count] assign[=] binary_operation[binary_operation[name[page] + constant[1]] * constant[20]] variable[history_data] assign[=] constant[None] if call[name[check_name], parameter[name[name]]] begin[:] variable[db] assign[=] call[name[get_db_state], parameter[name[self].working_dir]] variable[history_data] assign[=] call[name[db].get_name_history, parameter[name[name], name[offset], name[count]]] call[name[db].close, parameter[]] if compare[call[name[len], parameter[name[history_data]]] equal[==] constant[0]] begin[:] return[dictionary[[<ast.Constant object at 0x7da18f00e320>, <ast.Constant object at 0x7da18f00c3a0>], [<ast.Constant object at 0x7da18f00c5e0>, <ast.Constant object at 0x7da18f00d060>]]] return[call[name[self].success_response, parameter[dictionary[[<ast.Constant object at 0x7da18f00c850>], [<ast.Name object at 0x7da18f00f0a0>]]]]]
keyword[def] identifier[rpc_get_name_history_page] ( identifier[self] , identifier[name] , identifier[page] ,** identifier[con_info] ): literal[string] keyword[if] keyword[not] identifier[check_name] ( identifier[name] ) keyword[and] keyword[not] identifier[check_subdomain] ( identifier[name] ): keyword[return] { literal[string] : literal[string] , literal[string] : literal[int] } keyword[if] keyword[not] identifier[check_count] ( identifier[page] ): keyword[return] { literal[string] : literal[string] , literal[string] : literal[int] } identifier[offset] = identifier[page] * literal[int] identifier[count] =( identifier[page] + literal[int] )* literal[int] identifier[history_data] = keyword[None] keyword[if] identifier[check_name] ( identifier[name] ): identifier[db] = identifier[get_db_state] ( identifier[self] . identifier[working_dir] ) identifier[history_data] = identifier[db] . identifier[get_name_history] ( identifier[name] , identifier[offset] , identifier[count] , identifier[reverse] = keyword[True] ) identifier[db] . identifier[close] () keyword[else] : identifier[history_data] = identifier[get_subdomain_history] ( identifier[name] , identifier[offset] = identifier[offset] , identifier[count] = identifier[count] , identifier[json] = keyword[True] , identifier[reverse] = keyword[True] ) keyword[if] identifier[len] ( identifier[history_data] )== literal[int] : keyword[return] { literal[string] : literal[string] , literal[string] : literal[int] } keyword[return] identifier[self] . identifier[success_response] ({ literal[string] : identifier[history_data] })
def rpc_get_name_history_page(self, name, page, **con_info): """ Get the list of history entries for a name or subdomain's history, paginated. Small pages correspond to later history (page = 0 is the page of last updates) Page size is 20 rows. Return {'status': True, 'history': [...]} on success Return {'error': ...} on error """ if not check_name(name) and (not check_subdomain(name)): return {'error': 'invalid name', 'http_status': 400} # depends on [control=['if'], data=[]] if not check_count(page): return {'error': 'invalid page', 'http_status': 400} # depends on [control=['if'], data=[]] offset = page * 20 count = (page + 1) * 20 history_data = None if check_name(name): # on-chain name db = get_db_state(self.working_dir) history_data = db.get_name_history(name, offset, count, reverse=True) db.close() # depends on [control=['if'], data=[]] else: # off-chain name history_data = get_subdomain_history(name, offset=offset, count=count, json=True, reverse=True) if len(history_data) == 0: # name didn't exist return {'error': 'Not found', 'http_status': 404} # depends on [control=['if'], data=[]] return self.success_response({'history': history_data})
def setup_dir(self): """Change directory for script if necessary.""" cd = self.opts.cd or self.config['crony'].get('directory') if cd: self.logger.debug(f'Adding cd to {cd}') self.cmd = f'cd {cd} && {self.cmd}'
def function[setup_dir, parameter[self]]: constant[Change directory for script if necessary.] variable[cd] assign[=] <ast.BoolOp object at 0x7da20c6a8160> if name[cd] begin[:] call[name[self].logger.debug, parameter[<ast.JoinedStr object at 0x7da20e9b06d0>]] name[self].cmd assign[=] <ast.JoinedStr object at 0x7da20e9b2c50>
keyword[def] identifier[setup_dir] ( identifier[self] ): literal[string] identifier[cd] = identifier[self] . identifier[opts] . identifier[cd] keyword[or] identifier[self] . identifier[config] [ literal[string] ]. identifier[get] ( literal[string] ) keyword[if] identifier[cd] : identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[cmd] = literal[string]
def setup_dir(self): """Change directory for script if necessary.""" cd = self.opts.cd or self.config['crony'].get('directory') if cd: self.logger.debug(f'Adding cd to {cd}') self.cmd = f'cd {cd} && {self.cmd}' # depends on [control=['if'], data=[]]
def messages(self): """ Access the messages :returns: twilio.rest.chat.v1.service.channel.message.MessageList :rtype: twilio.rest.chat.v1.service.channel.message.MessageList """ if self._messages is None: self._messages = MessageList( self._version, service_sid=self._solution['service_sid'], channel_sid=self._solution['sid'], ) return self._messages
def function[messages, parameter[self]]: constant[ Access the messages :returns: twilio.rest.chat.v1.service.channel.message.MessageList :rtype: twilio.rest.chat.v1.service.channel.message.MessageList ] if compare[name[self]._messages is constant[None]] begin[:] name[self]._messages assign[=] call[name[MessageList], parameter[name[self]._version]] return[name[self]._messages]
keyword[def] identifier[messages] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_messages] keyword[is] keyword[None] : identifier[self] . identifier[_messages] = identifier[MessageList] ( identifier[self] . identifier[_version] , identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[channel_sid] = identifier[self] . identifier[_solution] [ literal[string] ], ) keyword[return] identifier[self] . identifier[_messages]
def messages(self): """ Access the messages :returns: twilio.rest.chat.v1.service.channel.message.MessageList :rtype: twilio.rest.chat.v1.service.channel.message.MessageList """ if self._messages is None: self._messages = MessageList(self._version, service_sid=self._solution['service_sid'], channel_sid=self._solution['sid']) # depends on [control=['if'], data=[]] return self._messages
def datadir_exists(name): ''' .. versionadded:: 2016.3.0 Checks if postgres data directory has been initialized CLI Example: .. code-block:: bash salt '*' postgres.datadir_exists '/var/lib/pgsql/data' name Name of the directory to check ''' _version_file = os.path.join(name, 'PG_VERSION') _config_file = os.path.join(name, 'postgresql.conf') return os.path.isfile(_version_file) and os.path.isfile(_config_file)
def function[datadir_exists, parameter[name]]: constant[ .. versionadded:: 2016.3.0 Checks if postgres data directory has been initialized CLI Example: .. code-block:: bash salt '*' postgres.datadir_exists '/var/lib/pgsql/data' name Name of the directory to check ] variable[_version_file] assign[=] call[name[os].path.join, parameter[name[name], constant[PG_VERSION]]] variable[_config_file] assign[=] call[name[os].path.join, parameter[name[name], constant[postgresql.conf]]] return[<ast.BoolOp object at 0x7da20e9b1b10>]
keyword[def] identifier[datadir_exists] ( identifier[name] ): literal[string] identifier[_version_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[name] , literal[string] ) identifier[_config_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[name] , literal[string] ) keyword[return] identifier[os] . identifier[path] . identifier[isfile] ( identifier[_version_file] ) keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[_config_file] )
def datadir_exists(name): """ .. versionadded:: 2016.3.0 Checks if postgres data directory has been initialized CLI Example: .. code-block:: bash salt '*' postgres.datadir_exists '/var/lib/pgsql/data' name Name of the directory to check """ _version_file = os.path.join(name, 'PG_VERSION') _config_file = os.path.join(name, 'postgresql.conf') return os.path.isfile(_version_file) and os.path.isfile(_config_file)
def dump_memdb(self, with_source_contents=True, with_names=True): """Dumps a sourcemap in MemDB format into bytes.""" len_out = _ffi.new('unsigned int *') buf = rustcall( _lib.lsm_view_dump_memdb, self._get_ptr(), len_out, with_source_contents, with_names) try: rv = _ffi.unpack(buf, len_out[0]) finally: _lib.lsm_buffer_free(buf) return rv
def function[dump_memdb, parameter[self, with_source_contents, with_names]]: constant[Dumps a sourcemap in MemDB format into bytes.] variable[len_out] assign[=] call[name[_ffi].new, parameter[constant[unsigned int *]]] variable[buf] assign[=] call[name[rustcall], parameter[name[_lib].lsm_view_dump_memdb, call[name[self]._get_ptr, parameter[]], name[len_out], name[with_source_contents], name[with_names]]] <ast.Try object at 0x7da18ede7be0> return[name[rv]]
keyword[def] identifier[dump_memdb] ( identifier[self] , identifier[with_source_contents] = keyword[True] , identifier[with_names] = keyword[True] ): literal[string] identifier[len_out] = identifier[_ffi] . identifier[new] ( literal[string] ) identifier[buf] = identifier[rustcall] ( identifier[_lib] . identifier[lsm_view_dump_memdb] , identifier[self] . identifier[_get_ptr] (), identifier[len_out] , identifier[with_source_contents] , identifier[with_names] ) keyword[try] : identifier[rv] = identifier[_ffi] . identifier[unpack] ( identifier[buf] , identifier[len_out] [ literal[int] ]) keyword[finally] : identifier[_lib] . identifier[lsm_buffer_free] ( identifier[buf] ) keyword[return] identifier[rv]
def dump_memdb(self, with_source_contents=True, with_names=True): """Dumps a sourcemap in MemDB format into bytes.""" len_out = _ffi.new('unsigned int *') buf = rustcall(_lib.lsm_view_dump_memdb, self._get_ptr(), len_out, with_source_contents, with_names) try: rv = _ffi.unpack(buf, len_out[0]) # depends on [control=['try'], data=[]] finally: _lib.lsm_buffer_free(buf) return rv
def part(f, ii): ''' part(u, ii) for constant or constant potential u yields a constant-potential form of u[ii]. part(f, ii) for potential function f yields a potential function g(x) that is equivalent to f(x[ii]). ''' f = to_potential(f) if is_const_potential(f): return PotentialConstant(f.c[ii]) else: return compose(PotentialPart(ii), to_potential(f))
def function[part, parameter[f, ii]]: constant[ part(u, ii) for constant or constant potential u yields a constant-potential form of u[ii]. part(f, ii) for potential function f yields a potential function g(x) that is equivalent to f(x[ii]). ] variable[f] assign[=] call[name[to_potential], parameter[name[f]]] if call[name[is_const_potential], parameter[name[f]]] begin[:] return[call[name[PotentialConstant], parameter[call[name[f].c][name[ii]]]]]
keyword[def] identifier[part] ( identifier[f] , identifier[ii] ): literal[string] identifier[f] = identifier[to_potential] ( identifier[f] ) keyword[if] identifier[is_const_potential] ( identifier[f] ): keyword[return] identifier[PotentialConstant] ( identifier[f] . identifier[c] [ identifier[ii] ]) keyword[else] : keyword[return] identifier[compose] ( identifier[PotentialPart] ( identifier[ii] ), identifier[to_potential] ( identifier[f] ))
def part(f, ii): """ part(u, ii) for constant or constant potential u yields a constant-potential form of u[ii]. part(f, ii) for potential function f yields a potential function g(x) that is equivalent to f(x[ii]). """ f = to_potential(f) if is_const_potential(f): return PotentialConstant(f.c[ii]) # depends on [control=['if'], data=[]] else: return compose(PotentialPart(ii), to_potential(f))
def get_env(self): """Return the environment dict to use for the Spawner. See also: jupyterhub.Spawner.get_env """ env = super(KubeSpawner, self).get_env() # deprecate image env['JUPYTER_IMAGE_SPEC'] = self.image env['JUPYTER_IMAGE'] = self.image return env
def function[get_env, parameter[self]]: constant[Return the environment dict to use for the Spawner. See also: jupyterhub.Spawner.get_env ] variable[env] assign[=] call[call[name[super], parameter[name[KubeSpawner], name[self]]].get_env, parameter[]] call[name[env]][constant[JUPYTER_IMAGE_SPEC]] assign[=] name[self].image call[name[env]][constant[JUPYTER_IMAGE]] assign[=] name[self].image return[name[env]]
keyword[def] identifier[get_env] ( identifier[self] ): literal[string] identifier[env] = identifier[super] ( identifier[KubeSpawner] , identifier[self] ). identifier[get_env] () identifier[env] [ literal[string] ]= identifier[self] . identifier[image] identifier[env] [ literal[string] ]= identifier[self] . identifier[image] keyword[return] identifier[env]
def get_env(self): """Return the environment dict to use for the Spawner. See also: jupyterhub.Spawner.get_env """ env = super(KubeSpawner, self).get_env() # deprecate image env['JUPYTER_IMAGE_SPEC'] = self.image env['JUPYTER_IMAGE'] = self.image return env
def is_function_or_method(obj): """Check if an object is a function or method. Args: obj: The Python object in question. Returns: True if the object is an function or method. """ return inspect.isfunction(obj) or inspect.ismethod(obj) or is_cython(obj)
def function[is_function_or_method, parameter[obj]]: constant[Check if an object is a function or method. Args: obj: The Python object in question. Returns: True if the object is an function or method. ] return[<ast.BoolOp object at 0x7da20e963070>]
keyword[def] identifier[is_function_or_method] ( identifier[obj] ): literal[string] keyword[return] identifier[inspect] . identifier[isfunction] ( identifier[obj] ) keyword[or] identifier[inspect] . identifier[ismethod] ( identifier[obj] ) keyword[or] identifier[is_cython] ( identifier[obj] )
def is_function_or_method(obj): """Check if an object is a function or method. Args: obj: The Python object in question. Returns: True if the object is an function or method. """ return inspect.isfunction(obj) or inspect.ismethod(obj) or is_cython(obj)
def _get_csv_from_model(models, crumbs, csvs): """ Get csv from model data :param dict models: Metadata :param str crumbs: Crumbs :param dict csvs: Csv :return dict models: Metadata :return dict csvs: Csv """ logger_csvs.info("enter get_csv_from_model: {}".format(crumbs)) _idx = 0 try: for _name, _model in models.items(): if "distributionTable" in _model: models[_name]["distributionTable"], csvs = _get_csv_from_table(_model["distributionTable"], "{}{}{}".format(crumbs, _idx, "distribution"), csvs) if "summaryTable" in _model: models[_name]["summaryTable"], csvs = _get_csv_from_table(_model["summaryTable"], "{}{}{}".format(crumbs, _idx, "summary"), csvs) if "ensembleTable" in _model: models[_name]["ensembleTable"], csvs = _get_csv_from_table(_model["ensembleTable"], "{}{}{}".format(crumbs, _idx, "ensemble"), csvs) _idx += 1 except Exception as e: print("Error: get_csv_from_model: {}, {}".format(crumbs, e)) logger_csvs.error("Error: get_csv_from_model: {}, {}".format(crumbs, e)) return models, csvs
def function[_get_csv_from_model, parameter[models, crumbs, csvs]]: constant[ Get csv from model data :param dict models: Metadata :param str crumbs: Crumbs :param dict csvs: Csv :return dict models: Metadata :return dict csvs: Csv ] call[name[logger_csvs].info, parameter[call[constant[enter get_csv_from_model: {}].format, parameter[name[crumbs]]]]] variable[_idx] assign[=] constant[0] <ast.Try object at 0x7da1b195f8e0> return[tuple[[<ast.Name object at 0x7da207f01a80>, <ast.Name object at 0x7da207f03460>]]]
keyword[def] identifier[_get_csv_from_model] ( identifier[models] , identifier[crumbs] , identifier[csvs] ): literal[string] identifier[logger_csvs] . identifier[info] ( literal[string] . identifier[format] ( identifier[crumbs] )) identifier[_idx] = literal[int] keyword[try] : keyword[for] identifier[_name] , identifier[_model] keyword[in] identifier[models] . identifier[items] (): keyword[if] literal[string] keyword[in] identifier[_model] : identifier[models] [ identifier[_name] ][ literal[string] ], identifier[csvs] = identifier[_get_csv_from_table] ( identifier[_model] [ literal[string] ], literal[string] . identifier[format] ( identifier[crumbs] , identifier[_idx] , literal[string] ), identifier[csvs] ) keyword[if] literal[string] keyword[in] identifier[_model] : identifier[models] [ identifier[_name] ][ literal[string] ], identifier[csvs] = identifier[_get_csv_from_table] ( identifier[_model] [ literal[string] ], literal[string] . identifier[format] ( identifier[crumbs] , identifier[_idx] , literal[string] ), identifier[csvs] ) keyword[if] literal[string] keyword[in] identifier[_model] : identifier[models] [ identifier[_name] ][ literal[string] ], identifier[csvs] = identifier[_get_csv_from_table] ( identifier[_model] [ literal[string] ], literal[string] . identifier[format] ( identifier[crumbs] , identifier[_idx] , literal[string] ), identifier[csvs] ) identifier[_idx] += literal[int] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print] ( literal[string] . identifier[format] ( identifier[crumbs] , identifier[e] )) identifier[logger_csvs] . identifier[error] ( literal[string] . identifier[format] ( identifier[crumbs] , identifier[e] )) keyword[return] identifier[models] , identifier[csvs]
def _get_csv_from_model(models, crumbs, csvs): """ Get csv from model data :param dict models: Metadata :param str crumbs: Crumbs :param dict csvs: Csv :return dict models: Metadata :return dict csvs: Csv """ logger_csvs.info('enter get_csv_from_model: {}'.format(crumbs)) _idx = 0 try: for (_name, _model) in models.items(): if 'distributionTable' in _model: (models[_name]['distributionTable'], csvs) = _get_csv_from_table(_model['distributionTable'], '{}{}{}'.format(crumbs, _idx, 'distribution'), csvs) # depends on [control=['if'], data=['_model']] if 'summaryTable' in _model: (models[_name]['summaryTable'], csvs) = _get_csv_from_table(_model['summaryTable'], '{}{}{}'.format(crumbs, _idx, 'summary'), csvs) # depends on [control=['if'], data=['_model']] if 'ensembleTable' in _model: (models[_name]['ensembleTable'], csvs) = _get_csv_from_table(_model['ensembleTable'], '{}{}{}'.format(crumbs, _idx, 'ensemble'), csvs) # depends on [control=['if'], data=['_model']] _idx += 1 # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]] except Exception as e: print('Error: get_csv_from_model: {}, {}'.format(crumbs, e)) logger_csvs.error('Error: get_csv_from_model: {}, {}'.format(crumbs, e)) # depends on [control=['except'], data=['e']] return (models, csvs)
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, normalize=True, name=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency .. deprecated:: 0.21.0 Parameters ---------- start : string or datetime-like, default None Left bound for generating dates end : string or datetime-like, default None Right bound for generating dates periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'C' (CustomBusinessDay) Frequency strings can have multiples, e.g. '5H' tz : string, default None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing normalize : bool, default False Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex weekmask : string, Default 'Mon Tue Wed Thu Fri' weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list list/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Returns ------- rng : DatetimeIndex """ warnings.warn("cdate_range is deprecated and will be removed in a future " "version, instead use pd.bdate_range(..., freq='{freq}')" .format(freq=freq), FutureWarning, stacklevel=2) if freq == 'C': holidays = kwargs.pop('holidays', []) weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri') freq = CDay(holidays=holidays, weekmask=weekmask) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
def function[cdate_range, parameter[start, end, periods, freq, tz, normalize, name, closed]]: constant[ Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency .. deprecated:: 0.21.0 Parameters ---------- start : string or datetime-like, default None Left bound for generating dates end : string or datetime-like, default None Right bound for generating dates periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'C' (CustomBusinessDay) Frequency strings can have multiples, e.g. '5H' tz : string, default None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing normalize : bool, default False Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex weekmask : string, Default 'Mon Tue Wed Thu Fri' weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list list/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Returns ------- rng : DatetimeIndex ] call[name[warnings].warn, parameter[call[constant[cdate_range is deprecated and will be removed in a future version, instead use pd.bdate_range(..., freq='{freq}')].format, parameter[]], name[FutureWarning]]] if compare[name[freq] equal[==] constant[C]] begin[:] variable[holidays] assign[=] call[name[kwargs].pop, parameter[constant[holidays], list[[]]]] variable[weekmask] assign[=] call[name[kwargs].pop, parameter[constant[weekmask], constant[Mon Tue Wed Thu Fri]]] variable[freq] assign[=] call[name[CDay], parameter[]] return[call[name[date_range], parameter[]]]
keyword[def] identifier[cdate_range] ( identifier[start] = keyword[None] , identifier[end] = keyword[None] , identifier[periods] = keyword[None] , identifier[freq] = literal[string] , identifier[tz] = keyword[None] , identifier[normalize] = keyword[True] , identifier[name] = keyword[None] , identifier[closed] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[warnings] . identifier[warn] ( literal[string] literal[string] . identifier[format] ( identifier[freq] = identifier[freq] ), identifier[FutureWarning] , identifier[stacklevel] = literal[int] ) keyword[if] identifier[freq] == literal[string] : identifier[holidays] = identifier[kwargs] . identifier[pop] ( literal[string] ,[]) identifier[weekmask] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] ) identifier[freq] = identifier[CDay] ( identifier[holidays] = identifier[holidays] , identifier[weekmask] = identifier[weekmask] ) keyword[return] identifier[date_range] ( identifier[start] = identifier[start] , identifier[end] = identifier[end] , identifier[periods] = identifier[periods] , identifier[freq] = identifier[freq] , identifier[tz] = identifier[tz] , identifier[normalize] = identifier[normalize] , identifier[name] = identifier[name] , identifier[closed] = identifier[closed] ,** identifier[kwargs] )
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, normalize=True, name=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency .. deprecated:: 0.21.0 Parameters ---------- start : string or datetime-like, default None Left bound for generating dates end : string or datetime-like, default None Right bound for generating dates periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'C' (CustomBusinessDay) Frequency strings can have multiples, e.g. '5H' tz : string, default None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing normalize : bool, default False Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex weekmask : string, Default 'Mon Tue Wed Thu Fri' weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list list/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Returns ------- rng : DatetimeIndex """ warnings.warn("cdate_range is deprecated and will be removed in a future version, instead use pd.bdate_range(..., freq='{freq}')".format(freq=freq), FutureWarning, stacklevel=2) if freq == 'C': holidays = kwargs.pop('holidays', []) weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri') freq = CDay(holidays=holidays, weekmask=weekmask) # depends on [control=['if'], data=['freq']] return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
def ramp_array(rampdata, ti, gain=1.0, ron=1.0, badpixels=None, dtype='float64', saturation=65631, blank=0, nsig=None, normalize=False): """Loop over the first axis applying ramp processing. *rampdata* is assumed to be a 3D numpy.ndarray containing the result of a nIR observation in folow-up-the-ramp mode. The shape of the array must be of the form N_s x M x N, with N_s being the number of samples. :param fowlerdata: Convertible to a 3D numpy.ndarray :param ti: Integration time. :param gain: Detector gain. :param ron: Detector readout noise in counts. :param badpixels: An optional MxN mask of dtype 'uint8'. :param dtype: The dtype of the float outputs. :param saturation: The saturation level of the detector. :param blank: Invalid values in output are substituted by *blank*. :returns: A tuple of signal, variance of the signal, numper of pixels used and badpixel mask. :raises: ValueError """ import numina.array._nirproc as _nirproc if ti <= 0: raise ValueError("invalid parameter, ti <= 0.0") if gain <= 0: raise ValueError("invalid parameter, gain <= 0.0") if ron <= 0: raise ValueError("invalid parameter, ron < 0.0") if saturation <= 0: raise ValueError("invalid parameter, saturation <= 0") rampdata = numpy.asarray(rampdata) if rampdata.ndim != 3: raise ValueError('rampdata must be 3D') # change byteorder ndtype = rampdata.dtype.newbyteorder('=') rampdata = numpy.asarray(rampdata, dtype=ndtype) # type of the output fdtype = numpy.result_type(rampdata.dtype, dtype) # Type of the mask mdtype = numpy.dtype('uint8') fshape = (rampdata.shape[1], rampdata.shape[2]) if badpixels is None: badpixels = numpy.zeros(fshape, dtype=mdtype) else: if badpixels.shape != fshape: msg = 'shape of badpixels is not compatible with shape of rampdata' raise ValueError(msg) if badpixels.dtype != mdtype: raise ValueError('dtype of badpixels must be uint8') result = numpy.empty(fshape, dtype=fdtype) var = numpy.empty_like(result) npix = numpy.empty(fshape, dtype=mdtype) mask = badpixels.copy() _nirproc._process_ramp_intl( rampdata, ti, gain, ron, badpixels, saturation, blank, result, var, npix, mask ) return result, var, npix, mask
def function[ramp_array, parameter[rampdata, ti, gain, ron, badpixels, dtype, saturation, blank, nsig, normalize]]: constant[Loop over the first axis applying ramp processing. *rampdata* is assumed to be a 3D numpy.ndarray containing the result of a nIR observation in folow-up-the-ramp mode. The shape of the array must be of the form N_s x M x N, with N_s being the number of samples. :param fowlerdata: Convertible to a 3D numpy.ndarray :param ti: Integration time. :param gain: Detector gain. :param ron: Detector readout noise in counts. :param badpixels: An optional MxN mask of dtype 'uint8'. :param dtype: The dtype of the float outputs. :param saturation: The saturation level of the detector. :param blank: Invalid values in output are substituted by *blank*. :returns: A tuple of signal, variance of the signal, numper of pixels used and badpixel mask. :raises: ValueError ] import module[numina.array._nirproc] as alias[_nirproc] if compare[name[ti] less_or_equal[<=] constant[0]] begin[:] <ast.Raise object at 0x7da1b24aff10> if compare[name[gain] less_or_equal[<=] constant[0]] begin[:] <ast.Raise object at 0x7da1b24ad9c0> if compare[name[ron] less_or_equal[<=] constant[0]] begin[:] <ast.Raise object at 0x7da1b24ad030> if compare[name[saturation] less_or_equal[<=] constant[0]] begin[:] <ast.Raise object at 0x7da1b24ae9e0> variable[rampdata] assign[=] call[name[numpy].asarray, parameter[name[rampdata]]] if compare[name[rampdata].ndim not_equal[!=] constant[3]] begin[:] <ast.Raise object at 0x7da1b24afac0> variable[ndtype] assign[=] call[name[rampdata].dtype.newbyteorder, parameter[constant[=]]] variable[rampdata] assign[=] call[name[numpy].asarray, parameter[name[rampdata]]] variable[fdtype] assign[=] call[name[numpy].result_type, parameter[name[rampdata].dtype, name[dtype]]] variable[mdtype] assign[=] call[name[numpy].dtype, parameter[constant[uint8]]] variable[fshape] assign[=] tuple[[<ast.Subscript object at 0x7da1b24ac2e0>, <ast.Subscript object at 0x7da1b24adc00>]] if compare[name[badpixels] is constant[None]] begin[:] variable[badpixels] assign[=] call[name[numpy].zeros, parameter[name[fshape]]] variable[result] assign[=] call[name[numpy].empty, parameter[name[fshape]]] variable[var] assign[=] call[name[numpy].empty_like, parameter[name[result]]] variable[npix] assign[=] call[name[numpy].empty, parameter[name[fshape]]] variable[mask] assign[=] call[name[badpixels].copy, parameter[]] call[name[_nirproc]._process_ramp_intl, parameter[name[rampdata], name[ti], name[gain], name[ron], name[badpixels], name[saturation], name[blank], name[result], name[var], name[npix], name[mask]]] return[tuple[[<ast.Name object at 0x7da1b25eead0>, <ast.Name object at 0x7da1b25ed120>, <ast.Name object at 0x7da1b25ec490>, <ast.Name object at 0x7da1b25ee7d0>]]]
keyword[def] identifier[ramp_array] ( identifier[rampdata] , identifier[ti] , identifier[gain] = literal[int] , identifier[ron] = literal[int] , identifier[badpixels] = keyword[None] , identifier[dtype] = literal[string] , identifier[saturation] = literal[int] , identifier[blank] = literal[int] , identifier[nsig] = keyword[None] , identifier[normalize] = keyword[False] ): literal[string] keyword[import] identifier[numina] . identifier[array] . identifier[_nirproc] keyword[as] identifier[_nirproc] keyword[if] identifier[ti] <= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[gain] <= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[ron] <= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[saturation] <= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[rampdata] = identifier[numpy] . identifier[asarray] ( identifier[rampdata] ) keyword[if] identifier[rampdata] . identifier[ndim] != literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[ndtype] = identifier[rampdata] . identifier[dtype] . identifier[newbyteorder] ( literal[string] ) identifier[rampdata] = identifier[numpy] . identifier[asarray] ( identifier[rampdata] , identifier[dtype] = identifier[ndtype] ) identifier[fdtype] = identifier[numpy] . identifier[result_type] ( identifier[rampdata] . identifier[dtype] , identifier[dtype] ) identifier[mdtype] = identifier[numpy] . identifier[dtype] ( literal[string] ) identifier[fshape] =( identifier[rampdata] . identifier[shape] [ literal[int] ], identifier[rampdata] . identifier[shape] [ literal[int] ]) keyword[if] identifier[badpixels] keyword[is] keyword[None] : identifier[badpixels] = identifier[numpy] . identifier[zeros] ( identifier[fshape] , identifier[dtype] = identifier[mdtype] ) keyword[else] : keyword[if] identifier[badpixels] . identifier[shape] != identifier[fshape] : identifier[msg] = literal[string] keyword[raise] identifier[ValueError] ( identifier[msg] ) keyword[if] identifier[badpixels] . identifier[dtype] != identifier[mdtype] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[result] = identifier[numpy] . identifier[empty] ( identifier[fshape] , identifier[dtype] = identifier[fdtype] ) identifier[var] = identifier[numpy] . identifier[empty_like] ( identifier[result] ) identifier[npix] = identifier[numpy] . identifier[empty] ( identifier[fshape] , identifier[dtype] = identifier[mdtype] ) identifier[mask] = identifier[badpixels] . identifier[copy] () identifier[_nirproc] . identifier[_process_ramp_intl] ( identifier[rampdata] , identifier[ti] , identifier[gain] , identifier[ron] , identifier[badpixels] , identifier[saturation] , identifier[blank] , identifier[result] , identifier[var] , identifier[npix] , identifier[mask] ) keyword[return] identifier[result] , identifier[var] , identifier[npix] , identifier[mask]
def ramp_array(rampdata, ti, gain=1.0, ron=1.0, badpixels=None, dtype='float64', saturation=65631, blank=0, nsig=None, normalize=False): """Loop over the first axis applying ramp processing. *rampdata* is assumed to be a 3D numpy.ndarray containing the result of a nIR observation in folow-up-the-ramp mode. The shape of the array must be of the form N_s x M x N, with N_s being the number of samples. :param fowlerdata: Convertible to a 3D numpy.ndarray :param ti: Integration time. :param gain: Detector gain. :param ron: Detector readout noise in counts. :param badpixels: An optional MxN mask of dtype 'uint8'. :param dtype: The dtype of the float outputs. :param saturation: The saturation level of the detector. :param blank: Invalid values in output are substituted by *blank*. :returns: A tuple of signal, variance of the signal, numper of pixels used and badpixel mask. :raises: ValueError """ import numina.array._nirproc as _nirproc if ti <= 0: raise ValueError('invalid parameter, ti <= 0.0') # depends on [control=['if'], data=[]] if gain <= 0: raise ValueError('invalid parameter, gain <= 0.0') # depends on [control=['if'], data=[]] if ron <= 0: raise ValueError('invalid parameter, ron < 0.0') # depends on [control=['if'], data=[]] if saturation <= 0: raise ValueError('invalid parameter, saturation <= 0') # depends on [control=['if'], data=[]] rampdata = numpy.asarray(rampdata) if rampdata.ndim != 3: raise ValueError('rampdata must be 3D') # depends on [control=['if'], data=[]] # change byteorder ndtype = rampdata.dtype.newbyteorder('=') rampdata = numpy.asarray(rampdata, dtype=ndtype) # type of the output fdtype = numpy.result_type(rampdata.dtype, dtype) # Type of the mask mdtype = numpy.dtype('uint8') fshape = (rampdata.shape[1], rampdata.shape[2]) if badpixels is None: badpixels = numpy.zeros(fshape, dtype=mdtype) # depends on [control=['if'], data=['badpixels']] else: if badpixels.shape != fshape: msg = 'shape of badpixels is not compatible with shape of rampdata' raise ValueError(msg) # depends on [control=['if'], data=[]] if badpixels.dtype != mdtype: raise ValueError('dtype of badpixels must be uint8') # depends on [control=['if'], data=[]] result = numpy.empty(fshape, dtype=fdtype) var = numpy.empty_like(result) npix = numpy.empty(fshape, dtype=mdtype) mask = badpixels.copy() _nirproc._process_ramp_intl(rampdata, ti, gain, ron, badpixels, saturation, blank, result, var, npix, mask) return (result, var, npix, mask)
def rollback(self): """ Rollback this context: - Position the consumer at the initial offsets. """ self.logger.info("Rolling back context: %s", self.initial_offsets) self.update_consumer_offsets(self.initial_offsets)
def function[rollback, parameter[self]]: constant[ Rollback this context: - Position the consumer at the initial offsets. ] call[name[self].logger.info, parameter[constant[Rolling back context: %s], name[self].initial_offsets]] call[name[self].update_consumer_offsets, parameter[name[self].initial_offsets]]
keyword[def] identifier[rollback] ( identifier[self] ): literal[string] identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[initial_offsets] ) identifier[self] . identifier[update_consumer_offsets] ( identifier[self] . identifier[initial_offsets] )
def rollback(self): """ Rollback this context: - Position the consumer at the initial offsets. """ self.logger.info('Rolling back context: %s', self.initial_offsets) self.update_consumer_offsets(self.initial_offsets)
def set(self, key, value): """ Sets a single value in a preconfigured data file. Arguments: key -- The full dot-notated key to set the value for. value -- The value to set. """ d = self.data.data keys = key.split('.') latest = keys.pop() for k in keys: d = d.setdefault(k, {}) schema = Schema().load(self.schema_file) self.data.internal = schema.internal self.parse_value(d, '', key, value, schema.get(key)) self.data.save(self.data_file)
def function[set, parameter[self, key, value]]: constant[ Sets a single value in a preconfigured data file. Arguments: key -- The full dot-notated key to set the value for. value -- The value to set. ] variable[d] assign[=] name[self].data.data variable[keys] assign[=] call[name[key].split, parameter[constant[.]]] variable[latest] assign[=] call[name[keys].pop, parameter[]] for taget[name[k]] in starred[name[keys]] begin[:] variable[d] assign[=] call[name[d].setdefault, parameter[name[k], dictionary[[], []]]] variable[schema] assign[=] call[call[name[Schema], parameter[]].load, parameter[name[self].schema_file]] name[self].data.internal assign[=] name[schema].internal call[name[self].parse_value, parameter[name[d], constant[], name[key], name[value], call[name[schema].get, parameter[name[key]]]]] call[name[self].data.save, parameter[name[self].data_file]]
keyword[def] identifier[set] ( identifier[self] , identifier[key] , identifier[value] ): literal[string] identifier[d] = identifier[self] . identifier[data] . identifier[data] identifier[keys] = identifier[key] . identifier[split] ( literal[string] ) identifier[latest] = identifier[keys] . identifier[pop] () keyword[for] identifier[k] keyword[in] identifier[keys] : identifier[d] = identifier[d] . identifier[setdefault] ( identifier[k] ,{}) identifier[schema] = identifier[Schema] (). identifier[load] ( identifier[self] . identifier[schema_file] ) identifier[self] . identifier[data] . identifier[internal] = identifier[schema] . identifier[internal] identifier[self] . identifier[parse_value] ( identifier[d] , literal[string] , identifier[key] , identifier[value] , identifier[schema] . identifier[get] ( identifier[key] )) identifier[self] . identifier[data] . identifier[save] ( identifier[self] . identifier[data_file] )
def set(self, key, value): """ Sets a single value in a preconfigured data file. Arguments: key -- The full dot-notated key to set the value for. value -- The value to set. """ d = self.data.data keys = key.split('.') latest = keys.pop() for k in keys: d = d.setdefault(k, {}) # depends on [control=['for'], data=['k']] schema = Schema().load(self.schema_file) self.data.internal = schema.internal self.parse_value(d, '', key, value, schema.get(key)) self.data.save(self.data_file)
def register_as_guest(self): """ Register a guest account on this HS. Note: HS must have guest registration enabled. Returns: str: Access Token Raises: MatrixRequestError """ response = self.api.register(auth_body=None, kind='guest') return self._post_registration(response)
def function[register_as_guest, parameter[self]]: constant[ Register a guest account on this HS. Note: HS must have guest registration enabled. Returns: str: Access Token Raises: MatrixRequestError ] variable[response] assign[=] call[name[self].api.register, parameter[]] return[call[name[self]._post_registration, parameter[name[response]]]]
keyword[def] identifier[register_as_guest] ( identifier[self] ): literal[string] identifier[response] = identifier[self] . identifier[api] . identifier[register] ( identifier[auth_body] = keyword[None] , identifier[kind] = literal[string] ) keyword[return] identifier[self] . identifier[_post_registration] ( identifier[response] )
def register_as_guest(self): """ Register a guest account on this HS. Note: HS must have guest registration enabled. Returns: str: Access Token Raises: MatrixRequestError """ response = self.api.register(auth_body=None, kind='guest') return self._post_registration(response)
def at(self, index): """Get the object at an :paramref:`index`. :param int index: the index of the object :return: the object at :paramref:`index` """ keys = list(self._items.keys()) key = keys[index] return self[key]
def function[at, parameter[self, index]]: constant[Get the object at an :paramref:`index`. :param int index: the index of the object :return: the object at :paramref:`index` ] variable[keys] assign[=] call[name[list], parameter[call[name[self]._items.keys, parameter[]]]] variable[key] assign[=] call[name[keys]][name[index]] return[call[name[self]][name[key]]]
keyword[def] identifier[at] ( identifier[self] , identifier[index] ): literal[string] identifier[keys] = identifier[list] ( identifier[self] . identifier[_items] . identifier[keys] ()) identifier[key] = identifier[keys] [ identifier[index] ] keyword[return] identifier[self] [ identifier[key] ]
def at(self, index): """Get the object at an :paramref:`index`. :param int index: the index of the object :return: the object at :paramref:`index` """ keys = list(self._items.keys()) key = keys[index] return self[key]
def create_digest_session(self, alias, url, auth, headers={}, cookies={}, timeout=None, proxies=None, verify=False, debug=0, max_retries=3,backoff_factor=0.10, disable_warnings=0): """ Create Session: create a HTTP session to a server ``url`` Base url of the server ``alias`` Robot Framework alias to identify the session ``headers`` Dictionary of default headers ``cookies`` Dictionary of cookies ``auth`` ['DOMAIN', 'username', 'password'] for NTLM Authentication ``timeout`` Connection timeout ``proxies`` Dictionary that contains proxy urls for HTTP and HTTPS communication ``verify`` Whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to False. ``debug`` Enable http verbosity option more information https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.set_debuglevel ``max_retries`` The maximum number of retries each connection should attempt. ``backoff_factor`` The pause between for each retry ``disable_warnings`` Disable requests warning useful when you have large number of testcases """ digest_auth = requests.auth.HTTPDigestAuth(*auth) if auth else None return self._create_session( alias, url, headers, cookies, digest_auth, timeout, max_retries, backoff_factor, proxies, verify, debug, disable_warnings)
def function[create_digest_session, parameter[self, alias, url, auth, headers, cookies, timeout, proxies, verify, debug, max_retries, backoff_factor, disable_warnings]]: constant[ Create Session: create a HTTP session to a server ``url`` Base url of the server ``alias`` Robot Framework alias to identify the session ``headers`` Dictionary of default headers ``cookies`` Dictionary of cookies ``auth`` ['DOMAIN', 'username', 'password'] for NTLM Authentication ``timeout`` Connection timeout ``proxies`` Dictionary that contains proxy urls for HTTP and HTTPS communication ``verify`` Whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to False. ``debug`` Enable http verbosity option more information https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.set_debuglevel ``max_retries`` The maximum number of retries each connection should attempt. ``backoff_factor`` The pause between for each retry ``disable_warnings`` Disable requests warning useful when you have large number of testcases ] variable[digest_auth] assign[=] <ast.IfExp object at 0x7da2045661d0> return[call[name[self]._create_session, parameter[name[alias], name[url], name[headers], name[cookies], name[digest_auth], name[timeout], name[max_retries], name[backoff_factor], name[proxies], name[verify], name[debug], name[disable_warnings]]]]
keyword[def] identifier[create_digest_session] ( identifier[self] , identifier[alias] , identifier[url] , identifier[auth] , identifier[headers] ={}, identifier[cookies] ={}, identifier[timeout] = keyword[None] , identifier[proxies] = keyword[None] , identifier[verify] = keyword[False] , identifier[debug] = literal[int] , identifier[max_retries] = literal[int] , identifier[backoff_factor] = literal[int] , identifier[disable_warnings] = literal[int] ): literal[string] identifier[digest_auth] = identifier[requests] . identifier[auth] . identifier[HTTPDigestAuth] (* identifier[auth] ) keyword[if] identifier[auth] keyword[else] keyword[None] keyword[return] identifier[self] . identifier[_create_session] ( identifier[alias] , identifier[url] , identifier[headers] , identifier[cookies] , identifier[digest_auth] , identifier[timeout] , identifier[max_retries] , identifier[backoff_factor] , identifier[proxies] , identifier[verify] , identifier[debug] , identifier[disable_warnings] )
def create_digest_session(self, alias, url, auth, headers={}, cookies={}, timeout=None, proxies=None, verify=False, debug=0, max_retries=3, backoff_factor=0.1, disable_warnings=0): """ Create Session: create a HTTP session to a server ``url`` Base url of the server ``alias`` Robot Framework alias to identify the session ``headers`` Dictionary of default headers ``cookies`` Dictionary of cookies ``auth`` ['DOMAIN', 'username', 'password'] for NTLM Authentication ``timeout`` Connection timeout ``proxies`` Dictionary that contains proxy urls for HTTP and HTTPS communication ``verify`` Whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to False. ``debug`` Enable http verbosity option more information https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.set_debuglevel ``max_retries`` The maximum number of retries each connection should attempt. ``backoff_factor`` The pause between for each retry ``disable_warnings`` Disable requests warning useful when you have large number of testcases """ digest_auth = requests.auth.HTTPDigestAuth(*auth) if auth else None return self._create_session(alias, url, headers, cookies, digest_auth, timeout, max_retries, backoff_factor, proxies, verify, debug, disable_warnings)
def ProduceEventTag(self, event_tag): """Produces an event tag. Args: event_tag (EventTag): event tag. """ self._storage_writer.AddEventTag(event_tag) self.number_of_produced_event_tags += 1 self.last_activity_timestamp = time.time()
def function[ProduceEventTag, parameter[self, event_tag]]: constant[Produces an event tag. Args: event_tag (EventTag): event tag. ] call[name[self]._storage_writer.AddEventTag, parameter[name[event_tag]]] <ast.AugAssign object at 0x7da20c7c9ba0> name[self].last_activity_timestamp assign[=] call[name[time].time, parameter[]]
keyword[def] identifier[ProduceEventTag] ( identifier[self] , identifier[event_tag] ): literal[string] identifier[self] . identifier[_storage_writer] . identifier[AddEventTag] ( identifier[event_tag] ) identifier[self] . identifier[number_of_produced_event_tags] += literal[int] identifier[self] . identifier[last_activity_timestamp] = identifier[time] . identifier[time] ()
def ProduceEventTag(self, event_tag): """Produces an event tag. Args: event_tag (EventTag): event tag. """ self._storage_writer.AddEventTag(event_tag) self.number_of_produced_event_tags += 1 self.last_activity_timestamp = time.time()
def to_bytecode(self): """Convert to Bytecode.""" used_blocks = set() for block in self: target_block = block.get_jump() if target_block is not None: used_blocks.add(id(target_block)) labels = {} jumps = [] instructions = [] for block in self: if id(block) in used_blocks: new_label = Label() labels[id(block)] = new_label instructions.append(new_label) for instr in block: # don't copy SetLineno objects if isinstance(instr, (Instr, ConcreteInstr)): instr = instr.copy() if isinstance(instr.arg, BasicBlock): jumps.append(instr) instructions.append(instr) # Map to new labels for instr in jumps: instr.arg = labels[id(instr.arg)] bytecode = _bytecode.Bytecode() bytecode._copy_attr_from(self) bytecode.argnames = list(self.argnames) bytecode[:] = instructions return bytecode
def function[to_bytecode, parameter[self]]: constant[Convert to Bytecode.] variable[used_blocks] assign[=] call[name[set], parameter[]] for taget[name[block]] in starred[name[self]] begin[:] variable[target_block] assign[=] call[name[block].get_jump, parameter[]] if compare[name[target_block] is_not constant[None]] begin[:] call[name[used_blocks].add, parameter[call[name[id], parameter[name[target_block]]]]] variable[labels] assign[=] dictionary[[], []] variable[jumps] assign[=] list[[]] variable[instructions] assign[=] list[[]] for taget[name[block]] in starred[name[self]] begin[:] if compare[call[name[id], parameter[name[block]]] in name[used_blocks]] begin[:] variable[new_label] assign[=] call[name[Label], parameter[]] call[name[labels]][call[name[id], parameter[name[block]]]] assign[=] name[new_label] call[name[instructions].append, parameter[name[new_label]]] for taget[name[instr]] in starred[name[block]] begin[:] if call[name[isinstance], parameter[name[instr], tuple[[<ast.Name object at 0x7da1b0506230>, <ast.Name object at 0x7da1b0507460>]]]] begin[:] variable[instr] assign[=] call[name[instr].copy, parameter[]] if call[name[isinstance], parameter[name[instr].arg, name[BasicBlock]]] begin[:] call[name[jumps].append, parameter[name[instr]]] call[name[instructions].append, parameter[name[instr]]] for taget[name[instr]] in starred[name[jumps]] begin[:] name[instr].arg assign[=] call[name[labels]][call[name[id], parameter[name[instr].arg]]] variable[bytecode] assign[=] call[name[_bytecode].Bytecode, parameter[]] call[name[bytecode]._copy_attr_from, parameter[name[self]]] name[bytecode].argnames assign[=] call[name[list], parameter[name[self].argnames]] call[name[bytecode]][<ast.Slice object at 0x7da1b05beda0>] assign[=] name[instructions] return[name[bytecode]]
keyword[def] identifier[to_bytecode] ( identifier[self] ): literal[string] identifier[used_blocks] = identifier[set] () keyword[for] identifier[block] keyword[in] identifier[self] : identifier[target_block] = identifier[block] . identifier[get_jump] () keyword[if] identifier[target_block] keyword[is] keyword[not] keyword[None] : identifier[used_blocks] . identifier[add] ( identifier[id] ( identifier[target_block] )) identifier[labels] ={} identifier[jumps] =[] identifier[instructions] =[] keyword[for] identifier[block] keyword[in] identifier[self] : keyword[if] identifier[id] ( identifier[block] ) keyword[in] identifier[used_blocks] : identifier[new_label] = identifier[Label] () identifier[labels] [ identifier[id] ( identifier[block] )]= identifier[new_label] identifier[instructions] . identifier[append] ( identifier[new_label] ) keyword[for] identifier[instr] keyword[in] identifier[block] : keyword[if] identifier[isinstance] ( identifier[instr] ,( identifier[Instr] , identifier[ConcreteInstr] )): identifier[instr] = identifier[instr] . identifier[copy] () keyword[if] identifier[isinstance] ( identifier[instr] . identifier[arg] , identifier[BasicBlock] ): identifier[jumps] . identifier[append] ( identifier[instr] ) identifier[instructions] . identifier[append] ( identifier[instr] ) keyword[for] identifier[instr] keyword[in] identifier[jumps] : identifier[instr] . identifier[arg] = identifier[labels] [ identifier[id] ( identifier[instr] . identifier[arg] )] identifier[bytecode] = identifier[_bytecode] . identifier[Bytecode] () identifier[bytecode] . identifier[_copy_attr_from] ( identifier[self] ) identifier[bytecode] . identifier[argnames] = identifier[list] ( identifier[self] . identifier[argnames] ) identifier[bytecode] [:]= identifier[instructions] keyword[return] identifier[bytecode]
def to_bytecode(self): """Convert to Bytecode.""" used_blocks = set() for block in self: target_block = block.get_jump() if target_block is not None: used_blocks.add(id(target_block)) # depends on [control=['if'], data=['target_block']] # depends on [control=['for'], data=['block']] labels = {} jumps = [] instructions = [] for block in self: if id(block) in used_blocks: new_label = Label() labels[id(block)] = new_label instructions.append(new_label) # depends on [control=['if'], data=[]] for instr in block: # don't copy SetLineno objects if isinstance(instr, (Instr, ConcreteInstr)): instr = instr.copy() if isinstance(instr.arg, BasicBlock): jumps.append(instr) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] instructions.append(instr) # depends on [control=['for'], data=['instr']] # depends on [control=['for'], data=['block']] # Map to new labels for instr in jumps: instr.arg = labels[id(instr.arg)] # depends on [control=['for'], data=['instr']] bytecode = _bytecode.Bytecode() bytecode._copy_attr_from(self) bytecode.argnames = list(self.argnames) bytecode[:] = instructions return bytecode
def get(package_str, classname): '''Retrieve from the internal cache a class instance. All arguments are case-insensitive''' if (package_str in _dynamo_cache) and (classname in _dynamo_cache[package_str]): return _dynamo_cache[package_str][classname] return None
def function[get, parameter[package_str, classname]]: constant[Retrieve from the internal cache a class instance. All arguments are case-insensitive] if <ast.BoolOp object at 0x7da1b26751b0> begin[:] return[call[call[name[_dynamo_cache]][name[package_str]]][name[classname]]] return[constant[None]]
keyword[def] identifier[get] ( identifier[package_str] , identifier[classname] ): literal[string] keyword[if] ( identifier[package_str] keyword[in] identifier[_dynamo_cache] ) keyword[and] ( identifier[classname] keyword[in] identifier[_dynamo_cache] [ identifier[package_str] ]): keyword[return] identifier[_dynamo_cache] [ identifier[package_str] ][ identifier[classname] ] keyword[return] keyword[None]
def get(package_str, classname): """Retrieve from the internal cache a class instance. All arguments are case-insensitive""" if package_str in _dynamo_cache and classname in _dynamo_cache[package_str]: return _dynamo_cache[package_str][classname] # depends on [control=['if'], data=[]] return None
async def should_update_bikes(delta: timedelta): """ Checks the most recently cached bike and returns true if it either doesn't exist or :return: Whether the cache should be updated. todo what if there are no bikes added for a week? ... every request will be triggered. """ bike = Bike.get_most_recent_bike() if bike is not None: return bike.cached_date < datetime.now() - delta else: return True
<ast.AsyncFunctionDef object at 0x7da1b0293490>
keyword[async] keyword[def] identifier[should_update_bikes] ( identifier[delta] : identifier[timedelta] ): literal[string] identifier[bike] = identifier[Bike] . identifier[get_most_recent_bike] () keyword[if] identifier[bike] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[bike] . identifier[cached_date] < identifier[datetime] . identifier[now] ()- identifier[delta] keyword[else] : keyword[return] keyword[True]
async def should_update_bikes(delta: timedelta): """ Checks the most recently cached bike and returns true if it either doesn't exist or :return: Whether the cache should be updated. todo what if there are no bikes added for a week? ... every request will be triggered. """ bike = Bike.get_most_recent_bike() if bike is not None: return bike.cached_date < datetime.now() - delta # depends on [control=['if'], data=['bike']] else: return True
def _split_mod_var_names(resource_name): """ Return (module_name, class_name) pair from given string. """ try: dot_index = resource_name.rindex('.') except ValueError: # no dot found return '', resource_name return resource_name[:dot_index], resource_name[dot_index + 1:]
def function[_split_mod_var_names, parameter[resource_name]]: constant[ Return (module_name, class_name) pair from given string. ] <ast.Try object at 0x7da2054a7f10> return[tuple[[<ast.Subscript object at 0x7da1b28ff400>, <ast.Subscript object at 0x7da1b28fccd0>]]]
keyword[def] identifier[_split_mod_var_names] ( identifier[resource_name] ): literal[string] keyword[try] : identifier[dot_index] = identifier[resource_name] . identifier[rindex] ( literal[string] ) keyword[except] identifier[ValueError] : keyword[return] literal[string] , identifier[resource_name] keyword[return] identifier[resource_name] [: identifier[dot_index] ], identifier[resource_name] [ identifier[dot_index] + literal[int] :]
def _split_mod_var_names(resource_name): """ Return (module_name, class_name) pair from given string. """ try: dot_index = resource_name.rindex('.') # depends on [control=['try'], data=[]] except ValueError: # no dot found return ('', resource_name) # depends on [control=['except'], data=[]] return (resource_name[:dot_index], resource_name[dot_index + 1:])
def setup_ui(self, ): """Setup the ui :returns: None :rtype: None :raises: None """ labels = self.reftrack.get_option_labels() self.browser = ComboBoxBrowser(len(labels), headers=labels) self.browser_vbox.addWidget(self.browser)
def function[setup_ui, parameter[self]]: constant[Setup the ui :returns: None :rtype: None :raises: None ] variable[labels] assign[=] call[name[self].reftrack.get_option_labels, parameter[]] name[self].browser assign[=] call[name[ComboBoxBrowser], parameter[call[name[len], parameter[name[labels]]]]] call[name[self].browser_vbox.addWidget, parameter[name[self].browser]]
keyword[def] identifier[setup_ui] ( identifier[self] ,): literal[string] identifier[labels] = identifier[self] . identifier[reftrack] . identifier[get_option_labels] () identifier[self] . identifier[browser] = identifier[ComboBoxBrowser] ( identifier[len] ( identifier[labels] ), identifier[headers] = identifier[labels] ) identifier[self] . identifier[browser_vbox] . identifier[addWidget] ( identifier[self] . identifier[browser] )
def setup_ui(self): """Setup the ui :returns: None :rtype: None :raises: None """ labels = self.reftrack.get_option_labels() self.browser = ComboBoxBrowser(len(labels), headers=labels) self.browser_vbox.addWidget(self.browser)
def wait_until_done(self, timeout=None): """Wait for the background load to complete.""" start = datetime.now() if not self.__th: raise IndraDBRestResponseError("There is no thread waiting to " "complete.") self.__th.join(timeout) now = datetime.now() dt = now - start if self.__th.is_alive(): logger.warning("Timed out after %0.3f seconds waiting for " "statement load to complete." % dt.total_seconds()) ret = False else: logger.info("Waited %0.3f seconds for statements to finish loading." % dt.total_seconds()) ret = True return ret
def function[wait_until_done, parameter[self, timeout]]: constant[Wait for the background load to complete.] variable[start] assign[=] call[name[datetime].now, parameter[]] if <ast.UnaryOp object at 0x7da20c6e6d40> begin[:] <ast.Raise object at 0x7da20c6e5810> call[name[self].__th.join, parameter[name[timeout]]] variable[now] assign[=] call[name[datetime].now, parameter[]] variable[dt] assign[=] binary_operation[name[now] - name[start]] if call[name[self].__th.is_alive, parameter[]] begin[:] call[name[logger].warning, parameter[binary_operation[constant[Timed out after %0.3f seconds waiting for statement load to complete.] <ast.Mod object at 0x7da2590d6920> call[name[dt].total_seconds, parameter[]]]]] variable[ret] assign[=] constant[False] return[name[ret]]
keyword[def] identifier[wait_until_done] ( identifier[self] , identifier[timeout] = keyword[None] ): literal[string] identifier[start] = identifier[datetime] . identifier[now] () keyword[if] keyword[not] identifier[self] . identifier[__th] : keyword[raise] identifier[IndraDBRestResponseError] ( literal[string] literal[string] ) identifier[self] . identifier[__th] . identifier[join] ( identifier[timeout] ) identifier[now] = identifier[datetime] . identifier[now] () identifier[dt] = identifier[now] - identifier[start] keyword[if] identifier[self] . identifier[__th] . identifier[is_alive] (): identifier[logger] . identifier[warning] ( literal[string] literal[string] % identifier[dt] . identifier[total_seconds] ()) identifier[ret] = keyword[False] keyword[else] : identifier[logger] . identifier[info] ( literal[string] % identifier[dt] . identifier[total_seconds] ()) identifier[ret] = keyword[True] keyword[return] identifier[ret]
def wait_until_done(self, timeout=None): """Wait for the background load to complete.""" start = datetime.now() if not self.__th: raise IndraDBRestResponseError('There is no thread waiting to complete.') # depends on [control=['if'], data=[]] self.__th.join(timeout) now = datetime.now() dt = now - start if self.__th.is_alive(): logger.warning('Timed out after %0.3f seconds waiting for statement load to complete.' % dt.total_seconds()) ret = False # depends on [control=['if'], data=[]] else: logger.info('Waited %0.3f seconds for statements to finish loading.' % dt.total_seconds()) ret = True return ret
def process_param(self): """ Reads and processes RETURNVALUE stream. This stream is used to send OUTPUT parameters from RPC to client. Stream format url: http://msdn.microsoft.com/en-us/library/dd303881.aspx """ self.log_response_message('got RETURNVALUE message') r = self._reader if tds_base.IS_TDS72_PLUS(self): ordinal = r.get_usmallint() else: r.get_usmallint() # ignore size ordinal = self._out_params_indexes[self.return_value_index] name = r.read_ucs2(r.get_byte()) r.get_byte() # 1 - OUTPUT of sp, 2 - result of udf param = tds_base.Column() param.column_name = name self.get_type_info(param) param.value = param.serializer.read(r) self.output_params[ordinal] = param self.return_value_index += 1
def function[process_param, parameter[self]]: constant[ Reads and processes RETURNVALUE stream. This stream is used to send OUTPUT parameters from RPC to client. Stream format url: http://msdn.microsoft.com/en-us/library/dd303881.aspx ] call[name[self].log_response_message, parameter[constant[got RETURNVALUE message]]] variable[r] assign[=] name[self]._reader if call[name[tds_base].IS_TDS72_PLUS, parameter[name[self]]] begin[:] variable[ordinal] assign[=] call[name[r].get_usmallint, parameter[]] variable[name] assign[=] call[name[r].read_ucs2, parameter[call[name[r].get_byte, parameter[]]]] call[name[r].get_byte, parameter[]] variable[param] assign[=] call[name[tds_base].Column, parameter[]] name[param].column_name assign[=] name[name] call[name[self].get_type_info, parameter[name[param]]] name[param].value assign[=] call[name[param].serializer.read, parameter[name[r]]] call[name[self].output_params][name[ordinal]] assign[=] name[param] <ast.AugAssign object at 0x7da1b0537eb0>
keyword[def] identifier[process_param] ( identifier[self] ): literal[string] identifier[self] . identifier[log_response_message] ( literal[string] ) identifier[r] = identifier[self] . identifier[_reader] keyword[if] identifier[tds_base] . identifier[IS_TDS72_PLUS] ( identifier[self] ): identifier[ordinal] = identifier[r] . identifier[get_usmallint] () keyword[else] : identifier[r] . identifier[get_usmallint] () identifier[ordinal] = identifier[self] . identifier[_out_params_indexes] [ identifier[self] . identifier[return_value_index] ] identifier[name] = identifier[r] . identifier[read_ucs2] ( identifier[r] . identifier[get_byte] ()) identifier[r] . identifier[get_byte] () identifier[param] = identifier[tds_base] . identifier[Column] () identifier[param] . identifier[column_name] = identifier[name] identifier[self] . identifier[get_type_info] ( identifier[param] ) identifier[param] . identifier[value] = identifier[param] . identifier[serializer] . identifier[read] ( identifier[r] ) identifier[self] . identifier[output_params] [ identifier[ordinal] ]= identifier[param] identifier[self] . identifier[return_value_index] += literal[int]
def process_param(self): """ Reads and processes RETURNVALUE stream. This stream is used to send OUTPUT parameters from RPC to client. Stream format url: http://msdn.microsoft.com/en-us/library/dd303881.aspx """ self.log_response_message('got RETURNVALUE message') r = self._reader if tds_base.IS_TDS72_PLUS(self): ordinal = r.get_usmallint() # depends on [control=['if'], data=[]] else: r.get_usmallint() # ignore size ordinal = self._out_params_indexes[self.return_value_index] name = r.read_ucs2(r.get_byte()) r.get_byte() # 1 - OUTPUT of sp, 2 - result of udf param = tds_base.Column() param.column_name = name self.get_type_info(param) param.value = param.serializer.read(r) self.output_params[ordinal] = param self.return_value_index += 1
def get_field_groups(layer_purpose, layer_subcategory=None): """Obtain list of field groups from layer purpose and subcategory. :param layer_purpose: The layer purpose. :type layer_purpose: str :param layer_subcategory: Exposure or hazard value. :type layer_subcategory: str :returns: List of layer groups. :rtype: list """ layer_purpose_dict = definition(layer_purpose) if not layer_purpose_dict: return [] field_groups = deepcopy(layer_purpose_dict.get('field_groups', [])) if layer_purpose in [ layer_purpose_exposure['key'], layer_purpose_hazard['key']]: if layer_subcategory: subcategory = definition(layer_subcategory) if 'field_groups' in subcategory: field_groups += deepcopy(subcategory['field_groups']) return field_groups
def function[get_field_groups, parameter[layer_purpose, layer_subcategory]]: constant[Obtain list of field groups from layer purpose and subcategory. :param layer_purpose: The layer purpose. :type layer_purpose: str :param layer_subcategory: Exposure or hazard value. :type layer_subcategory: str :returns: List of layer groups. :rtype: list ] variable[layer_purpose_dict] assign[=] call[name[definition], parameter[name[layer_purpose]]] if <ast.UnaryOp object at 0x7da1b0c3fa00> begin[:] return[list[[]]] variable[field_groups] assign[=] call[name[deepcopy], parameter[call[name[layer_purpose_dict].get, parameter[constant[field_groups], list[[]]]]]] if compare[name[layer_purpose] in list[[<ast.Subscript object at 0x7da1b0c3d420>, <ast.Subscript object at 0x7da1b0c3cd00>]]] begin[:] if name[layer_subcategory] begin[:] variable[subcategory] assign[=] call[name[definition], parameter[name[layer_subcategory]]] if compare[constant[field_groups] in name[subcategory]] begin[:] <ast.AugAssign object at 0x7da1b0c3c9a0> return[name[field_groups]]
keyword[def] identifier[get_field_groups] ( identifier[layer_purpose] , identifier[layer_subcategory] = keyword[None] ): literal[string] identifier[layer_purpose_dict] = identifier[definition] ( identifier[layer_purpose] ) keyword[if] keyword[not] identifier[layer_purpose_dict] : keyword[return] [] identifier[field_groups] = identifier[deepcopy] ( identifier[layer_purpose_dict] . identifier[get] ( literal[string] ,[])) keyword[if] identifier[layer_purpose] keyword[in] [ identifier[layer_purpose_exposure] [ literal[string] ], identifier[layer_purpose_hazard] [ literal[string] ]]: keyword[if] identifier[layer_subcategory] : identifier[subcategory] = identifier[definition] ( identifier[layer_subcategory] ) keyword[if] literal[string] keyword[in] identifier[subcategory] : identifier[field_groups] += identifier[deepcopy] ( identifier[subcategory] [ literal[string] ]) keyword[return] identifier[field_groups]
def get_field_groups(layer_purpose, layer_subcategory=None): """Obtain list of field groups from layer purpose and subcategory. :param layer_purpose: The layer purpose. :type layer_purpose: str :param layer_subcategory: Exposure or hazard value. :type layer_subcategory: str :returns: List of layer groups. :rtype: list """ layer_purpose_dict = definition(layer_purpose) if not layer_purpose_dict: return [] # depends on [control=['if'], data=[]] field_groups = deepcopy(layer_purpose_dict.get('field_groups', [])) if layer_purpose in [layer_purpose_exposure['key'], layer_purpose_hazard['key']]: if layer_subcategory: subcategory = definition(layer_subcategory) if 'field_groups' in subcategory: field_groups += deepcopy(subcategory['field_groups']) # depends on [control=['if'], data=['subcategory']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return field_groups
def complete_vhwa_command(self, command): """Signals that the Video HW Acceleration command has completed. in command of type str Pointer to VBOXVHWACMD containing the completed command. """ if not isinstance(command, basestring): raise TypeError("command can only be an instance of type basestring") self._call("completeVHWACommand", in_p=[command])
def function[complete_vhwa_command, parameter[self, command]]: constant[Signals that the Video HW Acceleration command has completed. in command of type str Pointer to VBOXVHWACMD containing the completed command. ] if <ast.UnaryOp object at 0x7da204344730> begin[:] <ast.Raise object at 0x7da204347160> call[name[self]._call, parameter[constant[completeVHWACommand]]]
keyword[def] identifier[complete_vhwa_command] ( identifier[self] , identifier[command] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[command] , identifier[basestring] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[self] . identifier[_call] ( literal[string] , identifier[in_p] =[ identifier[command] ])
def complete_vhwa_command(self, command): """Signals that the Video HW Acceleration command has completed. in command of type str Pointer to VBOXVHWACMD containing the completed command. """ if not isinstance(command, basestring): raise TypeError('command can only be an instance of type basestring') # depends on [control=['if'], data=[]] self._call('completeVHWACommand', in_p=[command])
def radius(self): ''' Radius of the ellipse, Point class. ''' try: return self._radius except AttributeError: pass self._radius = Point(1, 1, 0) return self._radius
def function[radius, parameter[self]]: constant[ Radius of the ellipse, Point class. ] <ast.Try object at 0x7da1b11d8160> name[self]._radius assign[=] call[name[Point], parameter[constant[1], constant[1], constant[0]]] return[name[self]._radius]
keyword[def] identifier[radius] ( identifier[self] ): literal[string] keyword[try] : keyword[return] identifier[self] . identifier[_radius] keyword[except] identifier[AttributeError] : keyword[pass] identifier[self] . identifier[_radius] = identifier[Point] ( literal[int] , literal[int] , literal[int] ) keyword[return] identifier[self] . identifier[_radius]
def radius(self): """ Radius of the ellipse, Point class. """ try: return self._radius # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] self._radius = Point(1, 1, 0) return self._radius
def fgmc(log_fg_ratios, mu_log_vt, sigma_log_vt, Rf, maxfg): ''' Function to fit the likelihood Fixme ''' Lb = np.random.uniform(0., maxfg, len(Rf)) pquit = 0 while pquit < 0.1: # quit when the posterior on Lf is very close to its prior nsamp = len(Lb) Rf_sel = np.random.choice(Rf, nsamp) vt = np.random.lognormal(mu_log_vt, sigma_log_vt, len(Rf_sel)) Lf = Rf_sel * vt log_Lf, log_Lb = log(Lf), log(Lb) plR = 0 for lfr in log_fg_ratios: plR += np.logaddexp(lfr + log_Lf, log_Lb) plR -= (Lf + Lb) plRn = plR - max(plR) idx = np.exp(plRn) > np.random.random(len(plRn)) pquit = ss.stats.ks_2samp(Lb, Lb[idx])[1] Lb = Lb[idx] return Rf_sel[idx], Lf[idx], Lb
def function[fgmc, parameter[log_fg_ratios, mu_log_vt, sigma_log_vt, Rf, maxfg]]: constant[ Function to fit the likelihood Fixme ] variable[Lb] assign[=] call[name[np].random.uniform, parameter[constant[0.0], name[maxfg], call[name[len], parameter[name[Rf]]]]] variable[pquit] assign[=] constant[0] while compare[name[pquit] less[<] constant[0.1]] begin[:] variable[nsamp] assign[=] call[name[len], parameter[name[Lb]]] variable[Rf_sel] assign[=] call[name[np].random.choice, parameter[name[Rf], name[nsamp]]] variable[vt] assign[=] call[name[np].random.lognormal, parameter[name[mu_log_vt], name[sigma_log_vt], call[name[len], parameter[name[Rf_sel]]]]] variable[Lf] assign[=] binary_operation[name[Rf_sel] * name[vt]] <ast.Tuple object at 0x7da2044c2d70> assign[=] tuple[[<ast.Call object at 0x7da2044c11b0>, <ast.Call object at 0x7da2044c3f70>]] variable[plR] assign[=] constant[0] for taget[name[lfr]] in starred[name[log_fg_ratios]] begin[:] <ast.AugAssign object at 0x7da2044c0220> <ast.AugAssign object at 0x7da2044c1b40> variable[plRn] assign[=] binary_operation[name[plR] - call[name[max], parameter[name[plR]]]] variable[idx] assign[=] compare[call[name[np].exp, parameter[name[plRn]]] greater[>] call[name[np].random.random, parameter[call[name[len], parameter[name[plRn]]]]]] variable[pquit] assign[=] call[call[name[ss].stats.ks_2samp, parameter[name[Lb], call[name[Lb]][name[idx]]]]][constant[1]] variable[Lb] assign[=] call[name[Lb]][name[idx]] return[tuple[[<ast.Subscript object at 0x7da207f00160>, <ast.Subscript object at 0x7da207f01e40>, <ast.Name object at 0x7da207f027a0>]]]
keyword[def] identifier[fgmc] ( identifier[log_fg_ratios] , identifier[mu_log_vt] , identifier[sigma_log_vt] , identifier[Rf] , identifier[maxfg] ): literal[string] identifier[Lb] = identifier[np] . identifier[random] . identifier[uniform] ( literal[int] , identifier[maxfg] , identifier[len] ( identifier[Rf] )) identifier[pquit] = literal[int] keyword[while] identifier[pquit] < literal[int] : identifier[nsamp] = identifier[len] ( identifier[Lb] ) identifier[Rf_sel] = identifier[np] . identifier[random] . identifier[choice] ( identifier[Rf] , identifier[nsamp] ) identifier[vt] = identifier[np] . identifier[random] . identifier[lognormal] ( identifier[mu_log_vt] , identifier[sigma_log_vt] , identifier[len] ( identifier[Rf_sel] )) identifier[Lf] = identifier[Rf_sel] * identifier[vt] identifier[log_Lf] , identifier[log_Lb] = identifier[log] ( identifier[Lf] ), identifier[log] ( identifier[Lb] ) identifier[plR] = literal[int] keyword[for] identifier[lfr] keyword[in] identifier[log_fg_ratios] : identifier[plR] += identifier[np] . identifier[logaddexp] ( identifier[lfr] + identifier[log_Lf] , identifier[log_Lb] ) identifier[plR] -=( identifier[Lf] + identifier[Lb] ) identifier[plRn] = identifier[plR] - identifier[max] ( identifier[plR] ) identifier[idx] = identifier[np] . identifier[exp] ( identifier[plRn] )> identifier[np] . identifier[random] . identifier[random] ( identifier[len] ( identifier[plRn] )) identifier[pquit] = identifier[ss] . identifier[stats] . identifier[ks_2samp] ( identifier[Lb] , identifier[Lb] [ identifier[idx] ])[ literal[int] ] identifier[Lb] = identifier[Lb] [ identifier[idx] ] keyword[return] identifier[Rf_sel] [ identifier[idx] ], identifier[Lf] [ identifier[idx] ], identifier[Lb]
def fgmc(log_fg_ratios, mu_log_vt, sigma_log_vt, Rf, maxfg): """ Function to fit the likelihood Fixme """ Lb = np.random.uniform(0.0, maxfg, len(Rf)) pquit = 0 while pquit < 0.1: # quit when the posterior on Lf is very close to its prior nsamp = len(Lb) Rf_sel = np.random.choice(Rf, nsamp) vt = np.random.lognormal(mu_log_vt, sigma_log_vt, len(Rf_sel)) Lf = Rf_sel * vt (log_Lf, log_Lb) = (log(Lf), log(Lb)) plR = 0 for lfr in log_fg_ratios: plR += np.logaddexp(lfr + log_Lf, log_Lb) # depends on [control=['for'], data=['lfr']] plR -= Lf + Lb plRn = plR - max(plR) idx = np.exp(plRn) > np.random.random(len(plRn)) pquit = ss.stats.ks_2samp(Lb, Lb[idx])[1] Lb = Lb[idx] # depends on [control=['while'], data=['pquit']] return (Rf_sel[idx], Lf[idx], Lb)
def list_tags(DomainName=None, ARN=None, region=None, key=None, keyid=None, profile=None): ''' List tags of a trail Returns: tags: - {...} - {...} CLI Example: .. code-block:: bash salt myminion boto_cloudtrail.list_tags my_trail ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if ARN is None: if DomainName is None: raise SaltInvocationError('One (but not both) of ARN or ' 'domain must be specified.') domaindata = status(DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile) if not domaindata or 'domain' not in domaindata: log.warning('Domain tags not updated') return {'tagged': False} ARN = domaindata.get('domain', {}).get('ARN') elif DomainName is not None: raise SaltInvocationError('One (but not both) of ARN or ' 'domain must be specified.') ret = conn.list_tags(ARN=ARN) log.warning(ret) tlist = ret.get('TagList', []) tagdict = {} for tag in tlist: tagdict[tag.get('Key')] = tag.get('Value') return {'tags': tagdict} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
def function[list_tags, parameter[DomainName, ARN, region, key, keyid, profile]]: constant[ List tags of a trail Returns: tags: - {...} - {...} CLI Example: .. code-block:: bash salt myminion boto_cloudtrail.list_tags my_trail ] <ast.Try object at 0x7da18f58d420>
keyword[def] identifier[list_tags] ( identifier[DomainName] = keyword[None] , identifier[ARN] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ): literal[string] keyword[try] : identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) keyword[if] identifier[ARN] keyword[is] keyword[None] : keyword[if] identifier[DomainName] keyword[is] keyword[None] : keyword[raise] identifier[SaltInvocationError] ( literal[string] literal[string] ) identifier[domaindata] = identifier[status] ( identifier[DomainName] = identifier[DomainName] , identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) keyword[if] keyword[not] identifier[domaindata] keyword[or] literal[string] keyword[not] keyword[in] identifier[domaindata] : identifier[log] . identifier[warning] ( literal[string] ) keyword[return] { literal[string] : keyword[False] } identifier[ARN] = identifier[domaindata] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ) keyword[elif] identifier[DomainName] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[SaltInvocationError] ( literal[string] literal[string] ) identifier[ret] = identifier[conn] . identifier[list_tags] ( identifier[ARN] = identifier[ARN] ) identifier[log] . identifier[warning] ( identifier[ret] ) identifier[tlist] = identifier[ret] . identifier[get] ( literal[string] ,[]) identifier[tagdict] ={} keyword[for] identifier[tag] keyword[in] identifier[tlist] : identifier[tagdict] [ identifier[tag] . identifier[get] ( literal[string] )]= identifier[tag] . identifier[get] ( literal[string] ) keyword[return] { literal[string] : identifier[tagdict] } keyword[except] identifier[ClientError] keyword[as] identifier[e] : keyword[return] { literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )}
def list_tags(DomainName=None, ARN=None, region=None, key=None, keyid=None, profile=None): """ List tags of a trail Returns: tags: - {...} - {...} CLI Example: .. code-block:: bash salt myminion boto_cloudtrail.list_tags my_trail """ try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if ARN is None: if DomainName is None: raise SaltInvocationError('One (but not both) of ARN or domain must be specified.') # depends on [control=['if'], data=[]] domaindata = status(DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile) if not domaindata or 'domain' not in domaindata: log.warning('Domain tags not updated') return {'tagged': False} # depends on [control=['if'], data=[]] ARN = domaindata.get('domain', {}).get('ARN') # depends on [control=['if'], data=['ARN']] elif DomainName is not None: raise SaltInvocationError('One (but not both) of ARN or domain must be specified.') # depends on [control=['if'], data=[]] ret = conn.list_tags(ARN=ARN) log.warning(ret) tlist = ret.get('TagList', []) tagdict = {} for tag in tlist: tagdict[tag.get('Key')] = tag.get('Value') # depends on [control=['for'], data=['tag']] return {'tags': tagdict} # depends on [control=['try'], data=[]] except ClientError as e: return {'error': __utils__['boto3.get_error'](e)} # depends on [control=['except'], data=['e']]
def param_get(param): """ ``param_get <instance_number> <param_symbol>`` get the value of the request control e.g.:: param_get 0 gain :param Lv2Param param: Parameter that will be get your current value """ instance = param.effect.instance return 'param_get {} {}'.format(instance, param.symbol)
def function[param_get, parameter[param]]: constant[ ``param_get <instance_number> <param_symbol>`` get the value of the request control e.g.:: param_get 0 gain :param Lv2Param param: Parameter that will be get your current value ] variable[instance] assign[=] name[param].effect.instance return[call[constant[param_get {} {}].format, parameter[name[instance], name[param].symbol]]]
keyword[def] identifier[param_get] ( identifier[param] ): literal[string] identifier[instance] = identifier[param] . identifier[effect] . identifier[instance] keyword[return] literal[string] . identifier[format] ( identifier[instance] , identifier[param] . identifier[symbol] )
def param_get(param): """ ``param_get <instance_number> <param_symbol>`` get the value of the request control e.g.:: param_get 0 gain :param Lv2Param param: Parameter that will be get your current value """ instance = param.effect.instance return 'param_get {} {}'.format(instance, param.symbol)
def read_param_file_to_dict(file_name): """Loads a text file to a python dictionary using '=' as the delimiter :param file_name: the name and path of the text file """ data = loadtxt(file_name, delimiter='=', dtype=scipy.string0) data_dict = dict(data) for key in data_dict.keys(): data_dict[key] = data_dict[key].strip() data_dict[key.strip()] = data_dict[key] del data_dict[key] return data_dict
def function[read_param_file_to_dict, parameter[file_name]]: constant[Loads a text file to a python dictionary using '=' as the delimiter :param file_name: the name and path of the text file ] variable[data] assign[=] call[name[loadtxt], parameter[name[file_name]]] variable[data_dict] assign[=] call[name[dict], parameter[name[data]]] for taget[name[key]] in starred[call[name[data_dict].keys, parameter[]]] begin[:] call[name[data_dict]][name[key]] assign[=] call[call[name[data_dict]][name[key]].strip, parameter[]] call[name[data_dict]][call[name[key].strip, parameter[]]] assign[=] call[name[data_dict]][name[key]] <ast.Delete object at 0x7da20c6a8460> return[name[data_dict]]
keyword[def] identifier[read_param_file_to_dict] ( identifier[file_name] ): literal[string] identifier[data] = identifier[loadtxt] ( identifier[file_name] , identifier[delimiter] = literal[string] , identifier[dtype] = identifier[scipy] . identifier[string0] ) identifier[data_dict] = identifier[dict] ( identifier[data] ) keyword[for] identifier[key] keyword[in] identifier[data_dict] . identifier[keys] (): identifier[data_dict] [ identifier[key] ]= identifier[data_dict] [ identifier[key] ]. identifier[strip] () identifier[data_dict] [ identifier[key] . identifier[strip] ()]= identifier[data_dict] [ identifier[key] ] keyword[del] identifier[data_dict] [ identifier[key] ] keyword[return] identifier[data_dict]
def read_param_file_to_dict(file_name): """Loads a text file to a python dictionary using '=' as the delimiter :param file_name: the name and path of the text file """ data = loadtxt(file_name, delimiter='=', dtype=scipy.string0) data_dict = dict(data) for key in data_dict.keys(): data_dict[key] = data_dict[key].strip() data_dict[key.strip()] = data_dict[key] del data_dict[key] # depends on [control=['for'], data=['key']] return data_dict
def _highlight_line_difflib(self, line, next): """ Highlight inline changes in both lines. """ if line['action'] == 'del': old, new = line, next else: old, new = next, line oldwords = re.split(r'(\W)', old['line']) newwords = re.split(r'(\W)', new['line']) sequence = difflib.SequenceMatcher(None, oldwords, newwords) oldfragments, newfragments = [], [] for tag, i1, i2, j1, j2 in sequence.get_opcodes(): oldfrag = ''.join(oldwords[i1:i2]) newfrag = ''.join(newwords[j1:j2]) if tag != 'equal': if oldfrag: oldfrag = '<del>%s</del>' % oldfrag if newfrag: newfrag = '<ins>%s</ins>' % newfrag oldfragments.append(oldfrag) newfragments.append(newfrag) old['line'] = "".join(oldfragments) new['line'] = "".join(newfragments)
def function[_highlight_line_difflib, parameter[self, line, next]]: constant[ Highlight inline changes in both lines. ] if compare[call[name[line]][constant[action]] equal[==] constant[del]] begin[:] <ast.Tuple object at 0x7da2054a4730> assign[=] tuple[[<ast.Name object at 0x7da2054a52d0>, <ast.Name object at 0x7da2054a6d40>]] variable[oldwords] assign[=] call[name[re].split, parameter[constant[(\W)], call[name[old]][constant[line]]]] variable[newwords] assign[=] call[name[re].split, parameter[constant[(\W)], call[name[new]][constant[line]]]] variable[sequence] assign[=] call[name[difflib].SequenceMatcher, parameter[constant[None], name[oldwords], name[newwords]]] <ast.Tuple object at 0x7da18f00ec50> assign[=] tuple[[<ast.List object at 0x7da1b264a1a0>, <ast.List object at 0x7da1b2649ed0>]] for taget[tuple[[<ast.Name object at 0x7da1b26484f0>, <ast.Name object at 0x7da1b264b7f0>, <ast.Name object at 0x7da1b26491e0>, <ast.Name object at 0x7da1b2649630>, <ast.Name object at 0x7da1b264ae30>]]] in starred[call[name[sequence].get_opcodes, parameter[]]] begin[:] variable[oldfrag] assign[=] call[constant[].join, parameter[call[name[oldwords]][<ast.Slice object at 0x7da1b264b430>]]] variable[newfrag] assign[=] call[constant[].join, parameter[call[name[newwords]][<ast.Slice object at 0x7da1b264b790>]]] if compare[name[tag] not_equal[!=] constant[equal]] begin[:] if name[oldfrag] begin[:] variable[oldfrag] assign[=] binary_operation[constant[<del>%s</del>] <ast.Mod object at 0x7da2590d6920> name[oldfrag]] if name[newfrag] begin[:] variable[newfrag] assign[=] binary_operation[constant[<ins>%s</ins>] <ast.Mod object at 0x7da2590d6920> name[newfrag]] call[name[oldfragments].append, parameter[name[oldfrag]]] call[name[newfragments].append, parameter[name[newfrag]]] call[name[old]][constant[line]] assign[=] call[constant[].join, parameter[name[oldfragments]]] call[name[new]][constant[line]] assign[=] call[constant[].join, parameter[name[newfragments]]]
keyword[def] identifier[_highlight_line_difflib] ( identifier[self] , identifier[line] , identifier[next] ): literal[string] keyword[if] identifier[line] [ literal[string] ]== literal[string] : identifier[old] , identifier[new] = identifier[line] , identifier[next] keyword[else] : identifier[old] , identifier[new] = identifier[next] , identifier[line] identifier[oldwords] = identifier[re] . identifier[split] ( literal[string] , identifier[old] [ literal[string] ]) identifier[newwords] = identifier[re] . identifier[split] ( literal[string] , identifier[new] [ literal[string] ]) identifier[sequence] = identifier[difflib] . identifier[SequenceMatcher] ( keyword[None] , identifier[oldwords] , identifier[newwords] ) identifier[oldfragments] , identifier[newfragments] =[],[] keyword[for] identifier[tag] , identifier[i1] , identifier[i2] , identifier[j1] , identifier[j2] keyword[in] identifier[sequence] . identifier[get_opcodes] (): identifier[oldfrag] = literal[string] . identifier[join] ( identifier[oldwords] [ identifier[i1] : identifier[i2] ]) identifier[newfrag] = literal[string] . identifier[join] ( identifier[newwords] [ identifier[j1] : identifier[j2] ]) keyword[if] identifier[tag] != literal[string] : keyword[if] identifier[oldfrag] : identifier[oldfrag] = literal[string] % identifier[oldfrag] keyword[if] identifier[newfrag] : identifier[newfrag] = literal[string] % identifier[newfrag] identifier[oldfragments] . identifier[append] ( identifier[oldfrag] ) identifier[newfragments] . identifier[append] ( identifier[newfrag] ) identifier[old] [ literal[string] ]= literal[string] . identifier[join] ( identifier[oldfragments] ) identifier[new] [ literal[string] ]= literal[string] . identifier[join] ( identifier[newfragments] )
def _highlight_line_difflib(self, line, next): """ Highlight inline changes in both lines. """ if line['action'] == 'del': (old, new) = (line, next) # depends on [control=['if'], data=[]] else: (old, new) = (next, line) oldwords = re.split('(\\W)', old['line']) newwords = re.split('(\\W)', new['line']) sequence = difflib.SequenceMatcher(None, oldwords, newwords) (oldfragments, newfragments) = ([], []) for (tag, i1, i2, j1, j2) in sequence.get_opcodes(): oldfrag = ''.join(oldwords[i1:i2]) newfrag = ''.join(newwords[j1:j2]) if tag != 'equal': if oldfrag: oldfrag = '<del>%s</del>' % oldfrag # depends on [control=['if'], data=[]] if newfrag: newfrag = '<ins>%s</ins>' % newfrag # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] oldfragments.append(oldfrag) newfragments.append(newfrag) # depends on [control=['for'], data=[]] old['line'] = ''.join(oldfragments) new['line'] = ''.join(newfragments)
def execute(self, query, *multiparams, **params): """Executes a SQL query with optional parameters. query - a SQL query string or any sqlalchemy expression. *multiparams/**params - represent bound parameter values to be used in the execution. Typically, the format is a dictionary passed to *multiparams: await conn.execute( table.insert(), {"id":1, "value":"v1"}, ) ...or individual key/values interpreted by **params:: await conn.execute( table.insert(), id=1, value="v1" ) In the case that a plain SQL string is passed, a tuple or individual values in \*multiparams may be passed:: await conn.execute( "INSERT INTO table (id, value) VALUES (%d, %s)", (1, "v1") ) await conn.execute( "INSERT INTO table (id, value) VALUES (%s, %s)", 1, "v1" ) Returns ResultProxy instance with results of SQL query execution. """ coro = self._execute(query, *multiparams, **params) return _SAConnectionContextManager(coro)
def function[execute, parameter[self, query]]: constant[Executes a SQL query with optional parameters. query - a SQL query string or any sqlalchemy expression. *multiparams/**params - represent bound parameter values to be used in the execution. Typically, the format is a dictionary passed to *multiparams: await conn.execute( table.insert(), {"id":1, "value":"v1"}, ) ...or individual key/values interpreted by **params:: await conn.execute( table.insert(), id=1, value="v1" ) In the case that a plain SQL string is passed, a tuple or individual values in \*multiparams may be passed:: await conn.execute( "INSERT INTO table (id, value) VALUES (%d, %s)", (1, "v1") ) await conn.execute( "INSERT INTO table (id, value) VALUES (%s, %s)", 1, "v1" ) Returns ResultProxy instance with results of SQL query execution. ] variable[coro] assign[=] call[name[self]._execute, parameter[name[query], <ast.Starred object at 0x7da1b1d8ee60>]] return[call[name[_SAConnectionContextManager], parameter[name[coro]]]]
keyword[def] identifier[execute] ( identifier[self] , identifier[query] ,* identifier[multiparams] ,** identifier[params] ): literal[string] identifier[coro] = identifier[self] . identifier[_execute] ( identifier[query] ,* identifier[multiparams] ,** identifier[params] ) keyword[return] identifier[_SAConnectionContextManager] ( identifier[coro] )
def execute(self, query, *multiparams, **params): """Executes a SQL query with optional parameters. query - a SQL query string or any sqlalchemy expression. *multiparams/**params - represent bound parameter values to be used in the execution. Typically, the format is a dictionary passed to *multiparams: await conn.execute( table.insert(), {"id":1, "value":"v1"}, ) ...or individual key/values interpreted by **params:: await conn.execute( table.insert(), id=1, value="v1" ) In the case that a plain SQL string is passed, a tuple or individual values in \\*multiparams may be passed:: await conn.execute( "INSERT INTO table (id, value) VALUES (%d, %s)", (1, "v1") ) await conn.execute( "INSERT INTO table (id, value) VALUES (%s, %s)", 1, "v1" ) Returns ResultProxy instance with results of SQL query execution. """ coro = self._execute(query, *multiparams, **params) return _SAConnectionContextManager(coro)
def unpack_variable(var): """Unpack an NCStream Variable into information we can use.""" # If we actually get a structure instance, handle turning that into a variable if var.dataType == stream.STRUCTURE: return None, struct_to_dtype(var), 'Structure' elif var.dataType == stream.SEQUENCE: log.warning('Sequence support not implemented!') dt = data_type_to_numpy(var.dataType, var.unsigned) if var.dataType == stream.OPAQUE: type_name = 'opaque' elif var.dataType == stream.STRING: type_name = 'string' else: type_name = dt.name if var.data: log.debug('Storing variable data: %s %s', dt, var.data) if var.dataType == stream.STRING: data = var.data else: # Always sent big endian data = np.frombuffer(var.data, dtype=dt.newbyteorder('>')) else: data = None return data, dt, type_name
def function[unpack_variable, parameter[var]]: constant[Unpack an NCStream Variable into information we can use.] if compare[name[var].dataType equal[==] name[stream].STRUCTURE] begin[:] return[tuple[[<ast.Constant object at 0x7da1b1176620>, <ast.Call object at 0x7da1b11747f0>, <ast.Constant object at 0x7da1b1176a10>]]] variable[dt] assign[=] call[name[data_type_to_numpy], parameter[name[var].dataType, name[var].unsigned]] if compare[name[var].dataType equal[==] name[stream].OPAQUE] begin[:] variable[type_name] assign[=] constant[opaque] if name[var].data begin[:] call[name[log].debug, parameter[constant[Storing variable data: %s %s], name[dt], name[var].data]] if compare[name[var].dataType equal[==] name[stream].STRING] begin[:] variable[data] assign[=] name[var].data return[tuple[[<ast.Name object at 0x7da1b10213f0>, <ast.Name object at 0x7da1b1020520>, <ast.Name object at 0x7da1b1021bd0>]]]
keyword[def] identifier[unpack_variable] ( identifier[var] ): literal[string] keyword[if] identifier[var] . identifier[dataType] == identifier[stream] . identifier[STRUCTURE] : keyword[return] keyword[None] , identifier[struct_to_dtype] ( identifier[var] ), literal[string] keyword[elif] identifier[var] . identifier[dataType] == identifier[stream] . identifier[SEQUENCE] : identifier[log] . identifier[warning] ( literal[string] ) identifier[dt] = identifier[data_type_to_numpy] ( identifier[var] . identifier[dataType] , identifier[var] . identifier[unsigned] ) keyword[if] identifier[var] . identifier[dataType] == identifier[stream] . identifier[OPAQUE] : identifier[type_name] = literal[string] keyword[elif] identifier[var] . identifier[dataType] == identifier[stream] . identifier[STRING] : identifier[type_name] = literal[string] keyword[else] : identifier[type_name] = identifier[dt] . identifier[name] keyword[if] identifier[var] . identifier[data] : identifier[log] . identifier[debug] ( literal[string] , identifier[dt] , identifier[var] . identifier[data] ) keyword[if] identifier[var] . identifier[dataType] == identifier[stream] . identifier[STRING] : identifier[data] = identifier[var] . identifier[data] keyword[else] : identifier[data] = identifier[np] . identifier[frombuffer] ( identifier[var] . identifier[data] , identifier[dtype] = identifier[dt] . identifier[newbyteorder] ( literal[string] )) keyword[else] : identifier[data] = keyword[None] keyword[return] identifier[data] , identifier[dt] , identifier[type_name]
def unpack_variable(var): """Unpack an NCStream Variable into information we can use.""" # If we actually get a structure instance, handle turning that into a variable if var.dataType == stream.STRUCTURE: return (None, struct_to_dtype(var), 'Structure') # depends on [control=['if'], data=[]] elif var.dataType == stream.SEQUENCE: log.warning('Sequence support not implemented!') # depends on [control=['if'], data=[]] dt = data_type_to_numpy(var.dataType, var.unsigned) if var.dataType == stream.OPAQUE: type_name = 'opaque' # depends on [control=['if'], data=[]] elif var.dataType == stream.STRING: type_name = 'string' # depends on [control=['if'], data=[]] else: type_name = dt.name if var.data: log.debug('Storing variable data: %s %s', dt, var.data) if var.dataType == stream.STRING: data = var.data # depends on [control=['if'], data=[]] else: # Always sent big endian data = np.frombuffer(var.data, dtype=dt.newbyteorder('>')) # depends on [control=['if'], data=[]] else: data = None return (data, dt, type_name)
def generateDHCPOptionsTemplate(self, address_family): """ Generate boilerplate dictionary to hold dhcp options :param str address_family: dhcpv4 or dhcpv6 :return: dict containing valid option set for address family """ from ns1.ipam import DHCPOptions options = {} for option in DHCPOptions.OPTIONS[address_family]: options[option] = "" return options
def function[generateDHCPOptionsTemplate, parameter[self, address_family]]: constant[ Generate boilerplate dictionary to hold dhcp options :param str address_family: dhcpv4 or dhcpv6 :return: dict containing valid option set for address family ] from relative_module[ns1.ipam] import module[DHCPOptions] variable[options] assign[=] dictionary[[], []] for taget[name[option]] in starred[call[name[DHCPOptions].OPTIONS][name[address_family]]] begin[:] call[name[options]][name[option]] assign[=] constant[] return[name[options]]
keyword[def] identifier[generateDHCPOptionsTemplate] ( identifier[self] , identifier[address_family] ): literal[string] keyword[from] identifier[ns1] . identifier[ipam] keyword[import] identifier[DHCPOptions] identifier[options] ={} keyword[for] identifier[option] keyword[in] identifier[DHCPOptions] . identifier[OPTIONS] [ identifier[address_family] ]: identifier[options] [ identifier[option] ]= literal[string] keyword[return] identifier[options]
def generateDHCPOptionsTemplate(self, address_family): """ Generate boilerplate dictionary to hold dhcp options :param str address_family: dhcpv4 or dhcpv6 :return: dict containing valid option set for address family """ from ns1.ipam import DHCPOptions options = {} for option in DHCPOptions.OPTIONS[address_family]: options[option] = '' # depends on [control=['for'], data=['option']] return options
def middleware_class(api=None): """Registers a middleware class""" def decorator(middleware_class): apply_to_api = hug.API(api) if api else hug.api.from_object(middleware_class) apply_to_api.http.add_middleware(middleware_class()) return middleware_class return decorator
def function[middleware_class, parameter[api]]: constant[Registers a middleware class] def function[decorator, parameter[middleware_class]]: variable[apply_to_api] assign[=] <ast.IfExp object at 0x7da18bcca950> call[name[apply_to_api].http.add_middleware, parameter[call[name[middleware_class], parameter[]]]] return[name[middleware_class]] return[name[decorator]]
keyword[def] identifier[middleware_class] ( identifier[api] = keyword[None] ): literal[string] keyword[def] identifier[decorator] ( identifier[middleware_class] ): identifier[apply_to_api] = identifier[hug] . identifier[API] ( identifier[api] ) keyword[if] identifier[api] keyword[else] identifier[hug] . identifier[api] . identifier[from_object] ( identifier[middleware_class] ) identifier[apply_to_api] . identifier[http] . identifier[add_middleware] ( identifier[middleware_class] ()) keyword[return] identifier[middleware_class] keyword[return] identifier[decorator]
def middleware_class(api=None): """Registers a middleware class""" def decorator(middleware_class): apply_to_api = hug.API(api) if api else hug.api.from_object(middleware_class) apply_to_api.http.add_middleware(middleware_class()) return middleware_class return decorator
def clear(self): """ clear all tree data """ self._delete_child_storage(self.root_node) self._delete_node_storage(self.root_node) self.root_node = BLANK_NODE
def function[clear, parameter[self]]: constant[ clear all tree data ] call[name[self]._delete_child_storage, parameter[name[self].root_node]] call[name[self]._delete_node_storage, parameter[name[self].root_node]] name[self].root_node assign[=] name[BLANK_NODE]
keyword[def] identifier[clear] ( identifier[self] ): literal[string] identifier[self] . identifier[_delete_child_storage] ( identifier[self] . identifier[root_node] ) identifier[self] . identifier[_delete_node_storage] ( identifier[self] . identifier[root_node] ) identifier[self] . identifier[root_node] = identifier[BLANK_NODE]
def clear(self): """ clear all tree data """ self._delete_child_storage(self.root_node) self._delete_node_storage(self.root_node) self.root_node = BLANK_NODE
def console_map_string_to_font(s: str, fontCharX: int, fontCharY: int) -> None: """Remap a string of codes to a contiguous set of tiles. Args: s (AnyStr): A string of character codes to map to new values. The null character `'\\x00'` will prematurely end this function. fontCharX (int): The starting X tile coordinate on the loaded tileset. 0 is the leftmost tile. fontCharY (int): The starting Y tile coordinate on the loaded tileset. 0 is the topmost tile. """ lib.TCOD_console_map_string_to_font_utf(_unicode(s), fontCharX, fontCharY)
def function[console_map_string_to_font, parameter[s, fontCharX, fontCharY]]: constant[Remap a string of codes to a contiguous set of tiles. Args: s (AnyStr): A string of character codes to map to new values. The null character `'\x00'` will prematurely end this function. fontCharX (int): The starting X tile coordinate on the loaded tileset. 0 is the leftmost tile. fontCharY (int): The starting Y tile coordinate on the loaded tileset. 0 is the topmost tile. ] call[name[lib].TCOD_console_map_string_to_font_utf, parameter[call[name[_unicode], parameter[name[s]]], name[fontCharX], name[fontCharY]]]
keyword[def] identifier[console_map_string_to_font] ( identifier[s] : identifier[str] , identifier[fontCharX] : identifier[int] , identifier[fontCharY] : identifier[int] )-> keyword[None] : literal[string] identifier[lib] . identifier[TCOD_console_map_string_to_font_utf] ( identifier[_unicode] ( identifier[s] ), identifier[fontCharX] , identifier[fontCharY] )
def console_map_string_to_font(s: str, fontCharX: int, fontCharY: int) -> None: """Remap a string of codes to a contiguous set of tiles. Args: s (AnyStr): A string of character codes to map to new values. The null character `'\\x00'` will prematurely end this function. fontCharX (int): The starting X tile coordinate on the loaded tileset. 0 is the leftmost tile. fontCharY (int): The starting Y tile coordinate on the loaded tileset. 0 is the topmost tile. """ lib.TCOD_console_map_string_to_font_utf(_unicode(s), fontCharX, fontCharY)
def train_async(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None): """Train model. The output can be used for batch prediction or online deployment. Args: input_dir: A directory path containing preprocessed results. Can be local or GCS path. batch_size: size of batch used for training. max_steps: number of steps to train. output_dir: The output directory to use. Can be local or GCS path. checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used. cloud: a google.datalab.ml.CloudTrainingConfig object to let it run in cloud. If None, it runs locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait. """ with warnings.catch_warnings(): warnings.simplefilter("ignore") if cloud is None: return _local.Local.train(input_dir, batch_size, max_steps, output_dir, checkpoint) return _cloud.Cloud.train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud)
def function[train_async, parameter[input_dir, batch_size, max_steps, output_dir, checkpoint, cloud]]: constant[Train model. The output can be used for batch prediction or online deployment. Args: input_dir: A directory path containing preprocessed results. Can be local or GCS path. batch_size: size of batch used for training. max_steps: number of steps to train. output_dir: The output directory to use. Can be local or GCS path. checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used. cloud: a google.datalab.ml.CloudTrainingConfig object to let it run in cloud. If None, it runs locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait. ] with call[name[warnings].catch_warnings, parameter[]] begin[:] call[name[warnings].simplefilter, parameter[constant[ignore]]] if compare[name[cloud] is constant[None]] begin[:] return[call[name[_local].Local.train, parameter[name[input_dir], name[batch_size], name[max_steps], name[output_dir], name[checkpoint]]]] return[call[name[_cloud].Cloud.train, parameter[name[input_dir], name[batch_size], name[max_steps], name[output_dir], name[checkpoint], name[cloud]]]]
keyword[def] identifier[train_async] ( identifier[input_dir] , identifier[batch_size] , identifier[max_steps] , identifier[output_dir] , identifier[checkpoint] = keyword[None] , identifier[cloud] = keyword[None] ): literal[string] keyword[with] identifier[warnings] . identifier[catch_warnings] (): identifier[warnings] . identifier[simplefilter] ( literal[string] ) keyword[if] identifier[cloud] keyword[is] keyword[None] : keyword[return] identifier[_local] . identifier[Local] . identifier[train] ( identifier[input_dir] , identifier[batch_size] , identifier[max_steps] , identifier[output_dir] , identifier[checkpoint] ) keyword[return] identifier[_cloud] . identifier[Cloud] . identifier[train] ( identifier[input_dir] , identifier[batch_size] , identifier[max_steps] , identifier[output_dir] , identifier[checkpoint] , identifier[cloud] )
def train_async(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None): """Train model. The output can be used for batch prediction or online deployment. Args: input_dir: A directory path containing preprocessed results. Can be local or GCS path. batch_size: size of batch used for training. max_steps: number of steps to train. output_dir: The output directory to use. Can be local or GCS path. checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used. cloud: a google.datalab.ml.CloudTrainingConfig object to let it run in cloud. If None, it runs locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait. """ with warnings.catch_warnings(): warnings.simplefilter('ignore') if cloud is None: return _local.Local.train(input_dir, batch_size, max_steps, output_dir, checkpoint) # depends on [control=['if'], data=[]] return _cloud.Cloud.train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud) # depends on [control=['with'], data=[]]
def get_config(cls, key, default=None): """ Shortcut to access the application's config in your class :param key: The key to access :param default: The default value when None :returns mixed: """ return cls._app.config.get(key, default)
def function[get_config, parameter[cls, key, default]]: constant[ Shortcut to access the application's config in your class :param key: The key to access :param default: The default value when None :returns mixed: ] return[call[name[cls]._app.config.get, parameter[name[key], name[default]]]]
keyword[def] identifier[get_config] ( identifier[cls] , identifier[key] , identifier[default] = keyword[None] ): literal[string] keyword[return] identifier[cls] . identifier[_app] . identifier[config] . identifier[get] ( identifier[key] , identifier[default] )
def get_config(cls, key, default=None): """ Shortcut to access the application's config in your class :param key: The key to access :param default: The default value when None :returns mixed: """ return cls._app.config.get(key, default)
def getMaxWidth(self, rows): 'Return the maximum length of any cell in column or its header.' w = 0 if len(rows) > 0: w = max(max(len(self.getDisplayValue(r)) for r in rows), len(self.name))+2 return max(w, len(self.name))
def function[getMaxWidth, parameter[self, rows]]: constant[Return the maximum length of any cell in column or its header.] variable[w] assign[=] constant[0] if compare[call[name[len], parameter[name[rows]]] greater[>] constant[0]] begin[:] variable[w] assign[=] binary_operation[call[name[max], parameter[call[name[max], parameter[<ast.GeneratorExp object at 0x7da1b26adb10>]], call[name[len], parameter[name[self].name]]]] + constant[2]] return[call[name[max], parameter[name[w], call[name[len], parameter[name[self].name]]]]]
keyword[def] identifier[getMaxWidth] ( identifier[self] , identifier[rows] ): literal[string] identifier[w] = literal[int] keyword[if] identifier[len] ( identifier[rows] )> literal[int] : identifier[w] = identifier[max] ( identifier[max] ( identifier[len] ( identifier[self] . identifier[getDisplayValue] ( identifier[r] )) keyword[for] identifier[r] keyword[in] identifier[rows] ), identifier[len] ( identifier[self] . identifier[name] ))+ literal[int] keyword[return] identifier[max] ( identifier[w] , identifier[len] ( identifier[self] . identifier[name] ))
def getMaxWidth(self, rows): """Return the maximum length of any cell in column or its header.""" w = 0 if len(rows) > 0: w = max(max((len(self.getDisplayValue(r)) for r in rows)), len(self.name)) + 2 # depends on [control=['if'], data=[]] return max(w, len(self.name))
def token(config, token): """Store and fetch a GitHub access token""" if not token: info_out( "To generate a personal API token, go to:\n\n\t" "https://github.com/settings/tokens\n\n" "To read more about it, go to:\n\n\t" "https://help.github.com/articles/creating-an-access" "-token-for-command-line-use/\n\n" 'Remember to enable "repo" in the scopes.' ) token = getpass.getpass("GitHub API Token: ").strip() url = urllib.parse.urljoin(config.github_url, "/user") assert url.startswith("https://"), url response = requests.get(url, headers={"Authorization": "token {}".format(token)}) if response.status_code == 200: update( config.configfile, { "GITHUB": { "github_url": config.github_url, "token": token, "login": response.json()["login"], } }, ) name = response.json()["name"] or response.json()["login"] success_out("Hi! {}".format(name)) else: error_out("Failed - {} ({})".format(response.status_code, response.content))
def function[token, parameter[config, token]]: constant[Store and fetch a GitHub access token] if <ast.UnaryOp object at 0x7da1b2428d30> begin[:] call[name[info_out], parameter[constant[To generate a personal API token, go to: https://github.com/settings/tokens To read more about it, go to: https://help.github.com/articles/creating-an-access-token-for-command-line-use/ Remember to enable "repo" in the scopes.]]] variable[token] assign[=] call[call[name[getpass].getpass, parameter[constant[GitHub API Token: ]]].strip, parameter[]] variable[url] assign[=] call[name[urllib].parse.urljoin, parameter[name[config].github_url, constant[/user]]] assert[call[name[url].startswith, parameter[constant[https://]]]] variable[response] assign[=] call[name[requests].get, parameter[name[url]]] if compare[name[response].status_code equal[==] constant[200]] begin[:] call[name[update], parameter[name[config].configfile, dictionary[[<ast.Constant object at 0x7da1b2362a10>], [<ast.Dict object at 0x7da1b23601c0>]]]] variable[name] assign[=] <ast.BoolOp object at 0x7da1b2361210> call[name[success_out], parameter[call[constant[Hi! {}].format, parameter[name[name]]]]]
keyword[def] identifier[token] ( identifier[config] , identifier[token] ): literal[string] keyword[if] keyword[not] identifier[token] : identifier[info_out] ( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ) identifier[token] = identifier[getpass] . identifier[getpass] ( literal[string] ). identifier[strip] () identifier[url] = identifier[urllib] . identifier[parse] . identifier[urljoin] ( identifier[config] . identifier[github_url] , literal[string] ) keyword[assert] identifier[url] . identifier[startswith] ( literal[string] ), identifier[url] identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] ={ literal[string] : literal[string] . identifier[format] ( identifier[token] )}) keyword[if] identifier[response] . identifier[status_code] == literal[int] : identifier[update] ( identifier[config] . identifier[configfile] , { literal[string] :{ literal[string] : identifier[config] . identifier[github_url] , literal[string] : identifier[token] , literal[string] : identifier[response] . identifier[json] ()[ literal[string] ], } }, ) identifier[name] = identifier[response] . identifier[json] ()[ literal[string] ] keyword[or] identifier[response] . identifier[json] ()[ literal[string] ] identifier[success_out] ( literal[string] . identifier[format] ( identifier[name] )) keyword[else] : identifier[error_out] ( literal[string] . identifier[format] ( identifier[response] . identifier[status_code] , identifier[response] . identifier[content] ))
def token(config, token): """Store and fetch a GitHub access token""" if not token: info_out('To generate a personal API token, go to:\n\n\thttps://github.com/settings/tokens\n\nTo read more about it, go to:\n\n\thttps://help.github.com/articles/creating-an-access-token-for-command-line-use/\n\nRemember to enable "repo" in the scopes.') token = getpass.getpass('GitHub API Token: ').strip() # depends on [control=['if'], data=[]] url = urllib.parse.urljoin(config.github_url, '/user') assert url.startswith('https://'), url response = requests.get(url, headers={'Authorization': 'token {}'.format(token)}) if response.status_code == 200: update(config.configfile, {'GITHUB': {'github_url': config.github_url, 'token': token, 'login': response.json()['login']}}) name = response.json()['name'] or response.json()['login'] success_out('Hi! {}'.format(name)) # depends on [control=['if'], data=[]] else: error_out('Failed - {} ({})'.format(response.status_code, response.content))
def fromLatex(tex, *args, **kwargs): """Creates abstraction using Latex :param str tex: Latex :return: TreeOfContents object """ source = TexSoup(tex) return TOC('[document]', source=source, descendants=list(source.descendants), *args, **kwargs)
def function[fromLatex, parameter[tex]]: constant[Creates abstraction using Latex :param str tex: Latex :return: TreeOfContents object ] variable[source] assign[=] call[name[TexSoup], parameter[name[tex]]] return[call[name[TOC], parameter[constant[[document]], <ast.Starred object at 0x7da1b04191b0>]]]
keyword[def] identifier[fromLatex] ( identifier[tex] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[source] = identifier[TexSoup] ( identifier[tex] ) keyword[return] identifier[TOC] ( literal[string] , identifier[source] = identifier[source] , identifier[descendants] = identifier[list] ( identifier[source] . identifier[descendants] ),* identifier[args] ,** identifier[kwargs] )
def fromLatex(tex, *args, **kwargs): """Creates abstraction using Latex :param str tex: Latex :return: TreeOfContents object """ source = TexSoup(tex) return TOC('[document]', *args, source=source, descendants=list(source.descendants), **kwargs)
def build_strain_specific_models(self, save_models=False): """Using the orthologous genes matrix, create and modify the strain specific models based on if orthologous genes exist. Also store the sequences directly in the reference GEM-PRO protein sequence attribute for the strains. """ if len(self.df_orthology_matrix) == 0: raise RuntimeError('Empty orthology matrix') # Create an emptied copy of the reference GEM-PRO for strain_gempro in tqdm(self.strains): log.debug('{}: building strain specific model'.format(strain_gempro.id)) # For each genome, load the metabolic model or genes from the reference GEM-PRO logging.disable(logging.WARNING) if self._empty_reference_gempro.model: strain_gempro.load_cobra_model(self._empty_reference_gempro.model) elif self._empty_reference_gempro.genes: strain_gempro.genes = [x.id for x in self._empty_reference_gempro.genes] logging.disable(logging.NOTSET) # Get a list of genes which do not have orthology in the strain not_in_strain = self.df_orthology_matrix[pd.isnull(self.df_orthology_matrix[strain_gempro.id])][strain_gempro.id].index.tolist() # Mark genes non-functional self._pare_down_model(strain_gempro=strain_gempro, genes_to_remove=not_in_strain) # Load sequences into the base and strain models self._load_strain_sequences(strain_gempro=strain_gempro) if save_models: cobra.io.save_json_model(model=strain_gempro.model, filename=op.join(self.model_dir, '{}.json'.format(strain_gempro.id))) strain_gempro.save_pickle(op.join(self.model_dir, '{}_gp.pckl'.format(strain_gempro.id))) log.info('Created {} new strain-specific models and loaded in sequences'.format(len(self.strains)))
def function[build_strain_specific_models, parameter[self, save_models]]: constant[Using the orthologous genes matrix, create and modify the strain specific models based on if orthologous genes exist. Also store the sequences directly in the reference GEM-PRO protein sequence attribute for the strains. ] if compare[call[name[len], parameter[name[self].df_orthology_matrix]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da18eb56350> for taget[name[strain_gempro]] in starred[call[name[tqdm], parameter[name[self].strains]]] begin[:] call[name[log].debug, parameter[call[constant[{}: building strain specific model].format, parameter[name[strain_gempro].id]]]] call[name[logging].disable, parameter[name[logging].WARNING]] if name[self]._empty_reference_gempro.model begin[:] call[name[strain_gempro].load_cobra_model, parameter[name[self]._empty_reference_gempro.model]] call[name[logging].disable, parameter[name[logging].NOTSET]] variable[not_in_strain] assign[=] call[call[call[name[self].df_orthology_matrix][call[name[pd].isnull, parameter[call[name[self].df_orthology_matrix][name[strain_gempro].id]]]]][name[strain_gempro].id].index.tolist, parameter[]] call[name[self]._pare_down_model, parameter[]] call[name[self]._load_strain_sequences, parameter[]] if name[save_models] begin[:] call[name[cobra].io.save_json_model, parameter[]] call[name[strain_gempro].save_pickle, parameter[call[name[op].join, parameter[name[self].model_dir, call[constant[{}_gp.pckl].format, parameter[name[strain_gempro].id]]]]]] call[name[log].info, parameter[call[constant[Created {} new strain-specific models and loaded in sequences].format, parameter[call[name[len], parameter[name[self].strains]]]]]]
keyword[def] identifier[build_strain_specific_models] ( identifier[self] , identifier[save_models] = keyword[False] ): literal[string] keyword[if] identifier[len] ( identifier[self] . identifier[df_orthology_matrix] )== literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[for] identifier[strain_gempro] keyword[in] identifier[tqdm] ( identifier[self] . identifier[strains] ): identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[strain_gempro] . identifier[id] )) identifier[logging] . identifier[disable] ( identifier[logging] . identifier[WARNING] ) keyword[if] identifier[self] . identifier[_empty_reference_gempro] . identifier[model] : identifier[strain_gempro] . identifier[load_cobra_model] ( identifier[self] . identifier[_empty_reference_gempro] . identifier[model] ) keyword[elif] identifier[self] . identifier[_empty_reference_gempro] . identifier[genes] : identifier[strain_gempro] . identifier[genes] =[ identifier[x] . identifier[id] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[_empty_reference_gempro] . identifier[genes] ] identifier[logging] . identifier[disable] ( identifier[logging] . identifier[NOTSET] ) identifier[not_in_strain] = identifier[self] . identifier[df_orthology_matrix] [ identifier[pd] . identifier[isnull] ( identifier[self] . identifier[df_orthology_matrix] [ identifier[strain_gempro] . identifier[id] ])][ identifier[strain_gempro] . identifier[id] ]. identifier[index] . identifier[tolist] () identifier[self] . identifier[_pare_down_model] ( identifier[strain_gempro] = identifier[strain_gempro] , identifier[genes_to_remove] = identifier[not_in_strain] ) identifier[self] . identifier[_load_strain_sequences] ( identifier[strain_gempro] = identifier[strain_gempro] ) keyword[if] identifier[save_models] : identifier[cobra] . identifier[io] . identifier[save_json_model] ( identifier[model] = identifier[strain_gempro] . identifier[model] , identifier[filename] = identifier[op] . identifier[join] ( identifier[self] . identifier[model_dir] , literal[string] . identifier[format] ( identifier[strain_gempro] . identifier[id] ))) identifier[strain_gempro] . identifier[save_pickle] ( identifier[op] . identifier[join] ( identifier[self] . identifier[model_dir] , literal[string] . identifier[format] ( identifier[strain_gempro] . identifier[id] ))) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[self] . identifier[strains] )))
def build_strain_specific_models(self, save_models=False): """Using the orthologous genes matrix, create and modify the strain specific models based on if orthologous genes exist. Also store the sequences directly in the reference GEM-PRO protein sequence attribute for the strains. """ if len(self.df_orthology_matrix) == 0: raise RuntimeError('Empty orthology matrix') # depends on [control=['if'], data=[]] # Create an emptied copy of the reference GEM-PRO for strain_gempro in tqdm(self.strains): log.debug('{}: building strain specific model'.format(strain_gempro.id)) # For each genome, load the metabolic model or genes from the reference GEM-PRO logging.disable(logging.WARNING) if self._empty_reference_gempro.model: strain_gempro.load_cobra_model(self._empty_reference_gempro.model) # depends on [control=['if'], data=[]] elif self._empty_reference_gempro.genes: strain_gempro.genes = [x.id for x in self._empty_reference_gempro.genes] # depends on [control=['if'], data=[]] logging.disable(logging.NOTSET) # Get a list of genes which do not have orthology in the strain not_in_strain = self.df_orthology_matrix[pd.isnull(self.df_orthology_matrix[strain_gempro.id])][strain_gempro.id].index.tolist() # Mark genes non-functional self._pare_down_model(strain_gempro=strain_gempro, genes_to_remove=not_in_strain) # Load sequences into the base and strain models self._load_strain_sequences(strain_gempro=strain_gempro) if save_models: cobra.io.save_json_model(model=strain_gempro.model, filename=op.join(self.model_dir, '{}.json'.format(strain_gempro.id))) strain_gempro.save_pickle(op.join(self.model_dir, '{}_gp.pckl'.format(strain_gempro.id))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['strain_gempro']] log.info('Created {} new strain-specific models and loaded in sequences'.format(len(self.strains)))
def get_friends_list(self, steamID, relationship='all', format=None): """Request the friends list of a given steam ID filtered by role. steamID: The user ID relationship: Type of friend to request (all, friend) format: Return format. None defaults to json. (json, xml, vdf) """ parameters = {'steamid' : steamID, 'relationship' : relationship} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetFriendsList', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
def function[get_friends_list, parameter[self, steamID, relationship, format]]: constant[Request the friends list of a given steam ID filtered by role. steamID: The user ID relationship: Type of friend to request (all, friend) format: Return format. None defaults to json. (json, xml, vdf) ] variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da2041d9c90>, <ast.Constant object at 0x7da2041db790>], [<ast.Name object at 0x7da2041dbfd0>, <ast.Name object at 0x7da2041d8130>]] if compare[name[format] is_not constant[None]] begin[:] call[name[parameters]][constant[format]] assign[=] name[format] variable[url] assign[=] call[name[self].create_request_url, parameter[name[self].interface, constant[GetFriendsList], constant[1], name[parameters]]] variable[data] assign[=] call[name[self].retrieve_request, parameter[name[url]]] return[call[name[self].return_data, parameter[name[data]]]]
keyword[def] identifier[get_friends_list] ( identifier[self] , identifier[steamID] , identifier[relationship] = literal[string] , identifier[format] = keyword[None] ): literal[string] identifier[parameters] ={ literal[string] : identifier[steamID] , literal[string] : identifier[relationship] } keyword[if] identifier[format] keyword[is] keyword[not] keyword[None] : identifier[parameters] [ literal[string] ]= identifier[format] identifier[url] = identifier[self] . identifier[create_request_url] ( identifier[self] . identifier[interface] , literal[string] , literal[int] , identifier[parameters] ) identifier[data] = identifier[self] . identifier[retrieve_request] ( identifier[url] ) keyword[return] identifier[self] . identifier[return_data] ( identifier[data] , identifier[format] = identifier[format] )
def get_friends_list(self, steamID, relationship='all', format=None): """Request the friends list of a given steam ID filtered by role. steamID: The user ID relationship: Type of friend to request (all, friend) format: Return format. None defaults to json. (json, xml, vdf) """ parameters = {'steamid': steamID, 'relationship': relationship} if format is not None: parameters['format'] = format # depends on [control=['if'], data=['format']] url = self.create_request_url(self.interface, 'GetFriendsList', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
def entry_point(items=tuple()): """ External entry point which calls main() and if Stop is raised, calls sys.exit() """ try: if not items: from .example import ExampleCommand from .version import Version items = [(ExampleCommand.NAME, ExampleCommand), (Version.NAME, Version)] main("yaclifw", items=items) except Stop as stop: print(stop) sys.exit(stop.rc) except SystemExit: raise except KeyboardInterrupt: print("Cancelled") sys.exit(1) except Exception: traceback.print_exc() sys.exit(1)
def function[entry_point, parameter[items]]: constant[ External entry point which calls main() and if Stop is raised, calls sys.exit() ] <ast.Try object at 0x7da20cabf640>
keyword[def] identifier[entry_point] ( identifier[items] = identifier[tuple] ()): literal[string] keyword[try] : keyword[if] keyword[not] identifier[items] : keyword[from] . identifier[example] keyword[import] identifier[ExampleCommand] keyword[from] . identifier[version] keyword[import] identifier[Version] identifier[items] =[( identifier[ExampleCommand] . identifier[NAME] , identifier[ExampleCommand] ), ( identifier[Version] . identifier[NAME] , identifier[Version] )] identifier[main] ( literal[string] , identifier[items] = identifier[items] ) keyword[except] identifier[Stop] keyword[as] identifier[stop] : identifier[print] ( identifier[stop] ) identifier[sys] . identifier[exit] ( identifier[stop] . identifier[rc] ) keyword[except] identifier[SystemExit] : keyword[raise] keyword[except] identifier[KeyboardInterrupt] : identifier[print] ( literal[string] ) identifier[sys] . identifier[exit] ( literal[int] ) keyword[except] identifier[Exception] : identifier[traceback] . identifier[print_exc] () identifier[sys] . identifier[exit] ( literal[int] )
def entry_point(items=tuple()): """ External entry point which calls main() and if Stop is raised, calls sys.exit() """ try: if not items: from .example import ExampleCommand from .version import Version items = [(ExampleCommand.NAME, ExampleCommand), (Version.NAME, Version)] # depends on [control=['if'], data=[]] main('yaclifw', items=items) # depends on [control=['try'], data=[]] except Stop as stop: print(stop) sys.exit(stop.rc) # depends on [control=['except'], data=['stop']] except SystemExit: raise # depends on [control=['except'], data=[]] except KeyboardInterrupt: print('Cancelled') sys.exit(1) # depends on [control=['except'], data=[]] except Exception: traceback.print_exc() sys.exit(1) # depends on [control=['except'], data=[]]
def prob(self, word: str) -> float: """ Return probability of an input word, according to the spelling dictionary :param str word: A word to check its probability of occurrence """ return self.__WORDS[word] / self.__WORDS_TOTAL
def function[prob, parameter[self, word]]: constant[ Return probability of an input word, according to the spelling dictionary :param str word: A word to check its probability of occurrence ] return[binary_operation[call[name[self].__WORDS][name[word]] / name[self].__WORDS_TOTAL]]
keyword[def] identifier[prob] ( identifier[self] , identifier[word] : identifier[str] )-> identifier[float] : literal[string] keyword[return] identifier[self] . identifier[__WORDS] [ identifier[word] ]/ identifier[self] . identifier[__WORDS_TOTAL]
def prob(self, word: str) -> float: """ Return probability of an input word, according to the spelling dictionary :param str word: A word to check its probability of occurrence """ return self.__WORDS[word] / self.__WORDS_TOTAL
def stop(self): """Stop the process.""" logger.info("stopping process") self.watcher.stop() os.kill(self.child_pid, signal.SIGTERM)
def function[stop, parameter[self]]: constant[Stop the process.] call[name[logger].info, parameter[constant[stopping process]]] call[name[self].watcher.stop, parameter[]] call[name[os].kill, parameter[name[self].child_pid, name[signal].SIGTERM]]
keyword[def] identifier[stop] ( identifier[self] ): literal[string] identifier[logger] . identifier[info] ( literal[string] ) identifier[self] . identifier[watcher] . identifier[stop] () identifier[os] . identifier[kill] ( identifier[self] . identifier[child_pid] , identifier[signal] . identifier[SIGTERM] )
def stop(self): """Stop the process.""" logger.info('stopping process') self.watcher.stop() os.kill(self.child_pid, signal.SIGTERM)
def _num_tasks_per_fetch_process(self): """ How many Celery tasks should be sent to each worker process. :return: Number of tasks that should be used per process :rtype: int """ return max(1, int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism)))
def function[_num_tasks_per_fetch_process, parameter[self]]: constant[ How many Celery tasks should be sent to each worker process. :return: Number of tasks that should be used per process :rtype: int ] return[call[name[max], parameter[constant[1], call[name[int], parameter[call[name[math].ceil, parameter[binary_operation[binary_operation[constant[1.0] * call[name[len], parameter[name[self].tasks]]] / name[self]._sync_parallelism]]]]]]]]
keyword[def] identifier[_num_tasks_per_fetch_process] ( identifier[self] ): literal[string] keyword[return] identifier[max] ( literal[int] , identifier[int] ( identifier[math] . identifier[ceil] ( literal[int] * identifier[len] ( identifier[self] . identifier[tasks] )/ identifier[self] . identifier[_sync_parallelism] )))
def _num_tasks_per_fetch_process(self): """ How many Celery tasks should be sent to each worker process. :return: Number of tasks that should be used per process :rtype: int """ return max(1, int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism)))
def _http_get_json(self, url): """ Make an HTTP GET request to the specified URL, check that it returned a JSON response, and returned the data parsed from that response. Parameters ---------- url The URL to GET. Returns ------- Dictionary of data parsed from a JSON HTTP response. Exceptions ---------- * PythonKCMeetupsBadJson * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsNotJson * PythonKCMeetupsRateLimitExceeded """ response = self._http_get(url) content_type = response.headers['content-type'] parsed_mimetype = mimeparse.parse_mime_type(content_type) if parsed_mimetype[1] not in ('json', 'javascript'): raise PythonKCMeetupsNotJson(content_type) try: return json.loads(response.content) except ValueError as e: raise PythonKCMeetupsBadJson(e)
def function[_http_get_json, parameter[self, url]]: constant[ Make an HTTP GET request to the specified URL, check that it returned a JSON response, and returned the data parsed from that response. Parameters ---------- url The URL to GET. Returns ------- Dictionary of data parsed from a JSON HTTP response. Exceptions ---------- * PythonKCMeetupsBadJson * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsNotJson * PythonKCMeetupsRateLimitExceeded ] variable[response] assign[=] call[name[self]._http_get, parameter[name[url]]] variable[content_type] assign[=] call[name[response].headers][constant[content-type]] variable[parsed_mimetype] assign[=] call[name[mimeparse].parse_mime_type, parameter[name[content_type]]] if compare[call[name[parsed_mimetype]][constant[1]] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da18bccb910>, <ast.Constant object at 0x7da18bccab30>]]] begin[:] <ast.Raise object at 0x7da18bcc8c10> <ast.Try object at 0x7da18bcc9930>
keyword[def] identifier[_http_get_json] ( identifier[self] , identifier[url] ): literal[string] identifier[response] = identifier[self] . identifier[_http_get] ( identifier[url] ) identifier[content_type] = identifier[response] . identifier[headers] [ literal[string] ] identifier[parsed_mimetype] = identifier[mimeparse] . identifier[parse_mime_type] ( identifier[content_type] ) keyword[if] identifier[parsed_mimetype] [ literal[int] ] keyword[not] keyword[in] ( literal[string] , literal[string] ): keyword[raise] identifier[PythonKCMeetupsNotJson] ( identifier[content_type] ) keyword[try] : keyword[return] identifier[json] . identifier[loads] ( identifier[response] . identifier[content] ) keyword[except] identifier[ValueError] keyword[as] identifier[e] : keyword[raise] identifier[PythonKCMeetupsBadJson] ( identifier[e] )
def _http_get_json(self, url): """ Make an HTTP GET request to the specified URL, check that it returned a JSON response, and returned the data parsed from that response. Parameters ---------- url The URL to GET. Returns ------- Dictionary of data parsed from a JSON HTTP response. Exceptions ---------- * PythonKCMeetupsBadJson * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsNotJson * PythonKCMeetupsRateLimitExceeded """ response = self._http_get(url) content_type = response.headers['content-type'] parsed_mimetype = mimeparse.parse_mime_type(content_type) if parsed_mimetype[1] not in ('json', 'javascript'): raise PythonKCMeetupsNotJson(content_type) # depends on [control=['if'], data=[]] try: return json.loads(response.content) # depends on [control=['try'], data=[]] except ValueError as e: raise PythonKCMeetupsBadJson(e) # depends on [control=['except'], data=['e']]
def simple_sepa_transfer(self, account: SEPAAccount, iban: str, bic: str, recipient_name: str, amount: Decimal, account_name: str, reason: str, endtoend_id='NOTPROVIDED'): """ Simple SEPA transfer. :param account: SEPAAccount to start the transfer from. :param iban: Recipient's IBAN :param bic: Recipient's BIC :param recipient_name: Recipient name :param amount: Amount as a ``Decimal`` :param account_name: Sender account name :param reason: Transfer reason :param endtoend_id: End-to-end-Id (defaults to ``NOTPROVIDED``) :return: Returns either a NeedRetryResponse or TransactionResponse """ config = { "name": account_name, "IBAN": account.iban, "BIC": account.bic, "batch": False, "currency": "EUR", } sepa = SepaTransfer(config, 'pain.001.001.03') payment = { "name": recipient_name, "IBAN": iban, "BIC": bic, "amount": round(Decimal(amount) * 100), # in cents "execution_date": datetime.date(1999, 1, 1), "description": reason, "endtoend_id": endtoend_id, } sepa.add_payment(payment) xml = sepa.export().decode() return self.sepa_transfer(account, xml)
def function[simple_sepa_transfer, parameter[self, account, iban, bic, recipient_name, amount, account_name, reason, endtoend_id]]: constant[ Simple SEPA transfer. :param account: SEPAAccount to start the transfer from. :param iban: Recipient's IBAN :param bic: Recipient's BIC :param recipient_name: Recipient name :param amount: Amount as a ``Decimal`` :param account_name: Sender account name :param reason: Transfer reason :param endtoend_id: End-to-end-Id (defaults to ``NOTPROVIDED``) :return: Returns either a NeedRetryResponse or TransactionResponse ] variable[config] assign[=] dictionary[[<ast.Constant object at 0x7da18ede4b80>, <ast.Constant object at 0x7da18ede7100>, <ast.Constant object at 0x7da18ede7e20>, <ast.Constant object at 0x7da18ede6800>, <ast.Constant object at 0x7da18ede6380>], [<ast.Name object at 0x7da18ede79a0>, <ast.Attribute object at 0x7da18ede4790>, <ast.Attribute object at 0x7da18ede60b0>, <ast.Constant object at 0x7da18ede6e30>, <ast.Constant object at 0x7da18ede7fd0>]] variable[sepa] assign[=] call[name[SepaTransfer], parameter[name[config], constant[pain.001.001.03]]] variable[payment] assign[=] dictionary[[<ast.Constant object at 0x7da18ede5b40>, <ast.Constant object at 0x7da18ede6680>, <ast.Constant object at 0x7da18ede7ca0>, <ast.Constant object at 0x7da18ede4910>, <ast.Constant object at 0x7da18ede6620>, <ast.Constant object at 0x7da18ede5d50>, <ast.Constant object at 0x7da18ede4520>], [<ast.Name object at 0x7da18ede6170>, <ast.Name object at 0x7da18ede5720>, <ast.Name object at 0x7da18ede7e80>, <ast.Call object at 0x7da18ede6bc0>, <ast.Call object at 0x7da18ede4e20>, <ast.Name object at 0x7da18ede6c20>, <ast.Name object at 0x7da18ede51b0>]] call[name[sepa].add_payment, parameter[name[payment]]] variable[xml] assign[=] call[call[name[sepa].export, parameter[]].decode, parameter[]] return[call[name[self].sepa_transfer, parameter[name[account], name[xml]]]]
keyword[def] identifier[simple_sepa_transfer] ( identifier[self] , identifier[account] : identifier[SEPAAccount] , identifier[iban] : identifier[str] , identifier[bic] : identifier[str] , identifier[recipient_name] : identifier[str] , identifier[amount] : identifier[Decimal] , identifier[account_name] : identifier[str] , identifier[reason] : identifier[str] , identifier[endtoend_id] = literal[string] ): literal[string] identifier[config] ={ literal[string] : identifier[account_name] , literal[string] : identifier[account] . identifier[iban] , literal[string] : identifier[account] . identifier[bic] , literal[string] : keyword[False] , literal[string] : literal[string] , } identifier[sepa] = identifier[SepaTransfer] ( identifier[config] , literal[string] ) identifier[payment] ={ literal[string] : identifier[recipient_name] , literal[string] : identifier[iban] , literal[string] : identifier[bic] , literal[string] : identifier[round] ( identifier[Decimal] ( identifier[amount] )* literal[int] ), literal[string] : identifier[datetime] . identifier[date] ( literal[int] , literal[int] , literal[int] ), literal[string] : identifier[reason] , literal[string] : identifier[endtoend_id] , } identifier[sepa] . identifier[add_payment] ( identifier[payment] ) identifier[xml] = identifier[sepa] . identifier[export] (). identifier[decode] () keyword[return] identifier[self] . identifier[sepa_transfer] ( identifier[account] , identifier[xml] )
def simple_sepa_transfer(self, account: SEPAAccount, iban: str, bic: str, recipient_name: str, amount: Decimal, account_name: str, reason: str, endtoend_id='NOTPROVIDED'): """ Simple SEPA transfer. :param account: SEPAAccount to start the transfer from. :param iban: Recipient's IBAN :param bic: Recipient's BIC :param recipient_name: Recipient name :param amount: Amount as a ``Decimal`` :param account_name: Sender account name :param reason: Transfer reason :param endtoend_id: End-to-end-Id (defaults to ``NOTPROVIDED``) :return: Returns either a NeedRetryResponse or TransactionResponse """ config = {'name': account_name, 'IBAN': account.iban, 'BIC': account.bic, 'batch': False, 'currency': 'EUR'} sepa = SepaTransfer(config, 'pain.001.001.03') # in cents payment = {'name': recipient_name, 'IBAN': iban, 'BIC': bic, 'amount': round(Decimal(amount) * 100), 'execution_date': datetime.date(1999, 1, 1), 'description': reason, 'endtoend_id': endtoend_id} sepa.add_payment(payment) xml = sepa.export().decode() return self.sepa_transfer(account, xml)
def find_courses(self, partial): """Finds all courses by a given substring. This is case-insensitive. """ partial = partial.lower() keys = self.courses.keys() keys = [k for k in keys if k.lower().find(partial) != -1] courses = [self.courses[k] for k in keys] return list(set(courses))
def function[find_courses, parameter[self, partial]]: constant[Finds all courses by a given substring. This is case-insensitive. ] variable[partial] assign[=] call[name[partial].lower, parameter[]] variable[keys] assign[=] call[name[self].courses.keys, parameter[]] variable[keys] assign[=] <ast.ListComp object at 0x7da204620c40> variable[courses] assign[=] <ast.ListComp object at 0x7da204622410> return[call[name[list], parameter[call[name[set], parameter[name[courses]]]]]]
keyword[def] identifier[find_courses] ( identifier[self] , identifier[partial] ): literal[string] identifier[partial] = identifier[partial] . identifier[lower] () identifier[keys] = identifier[self] . identifier[courses] . identifier[keys] () identifier[keys] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[keys] keyword[if] identifier[k] . identifier[lower] (). identifier[find] ( identifier[partial] )!=- literal[int] ] identifier[courses] =[ identifier[self] . identifier[courses] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[keys] ] keyword[return] identifier[list] ( identifier[set] ( identifier[courses] ))
def find_courses(self, partial): """Finds all courses by a given substring. This is case-insensitive. """ partial = partial.lower() keys = self.courses.keys() keys = [k for k in keys if k.lower().find(partial) != -1] courses = [self.courses[k] for k in keys] return list(set(courses))
def smart_content_encoding(self): """Smart content encoding.""" encoding = self.content_encoding if not encoding: base_list = self.basename.split('.') while (not encoding) and len(base_list) > 1: _, encoding = mimetypes.guess_type('.'.join(base_list)) base_list.pop() return encoding
def function[smart_content_encoding, parameter[self]]: constant[Smart content encoding.] variable[encoding] assign[=] name[self].content_encoding if <ast.UnaryOp object at 0x7da18f00cd90> begin[:] variable[base_list] assign[=] call[name[self].basename.split, parameter[constant[.]]] while <ast.BoolOp object at 0x7da18f00f910> begin[:] <ast.Tuple object at 0x7da18f00c3a0> assign[=] call[name[mimetypes].guess_type, parameter[call[constant[.].join, parameter[name[base_list]]]]] call[name[base_list].pop, parameter[]] return[name[encoding]]
keyword[def] identifier[smart_content_encoding] ( identifier[self] ): literal[string] identifier[encoding] = identifier[self] . identifier[content_encoding] keyword[if] keyword[not] identifier[encoding] : identifier[base_list] = identifier[self] . identifier[basename] . identifier[split] ( literal[string] ) keyword[while] ( keyword[not] identifier[encoding] ) keyword[and] identifier[len] ( identifier[base_list] )> literal[int] : identifier[_] , identifier[encoding] = identifier[mimetypes] . identifier[guess_type] ( literal[string] . identifier[join] ( identifier[base_list] )) identifier[base_list] . identifier[pop] () keyword[return] identifier[encoding]
def smart_content_encoding(self): """Smart content encoding.""" encoding = self.content_encoding if not encoding: base_list = self.basename.split('.') while not encoding and len(base_list) > 1: (_, encoding) = mimetypes.guess_type('.'.join(base_list)) base_list.pop() # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] return encoding
def time_signature_event(self, meter=(4, 4)): """Return a time signature event for meter.""" numer = a2b_hex('%02x' % meter[0]) denom = a2b_hex('%02x' % int(log(meter[1], 2))) return self.delta_time + META_EVENT + TIME_SIGNATURE + '\x04' + numer\ + denom + '\x18\x08'
def function[time_signature_event, parameter[self, meter]]: constant[Return a time signature event for meter.] variable[numer] assign[=] call[name[a2b_hex], parameter[binary_operation[constant[%02x] <ast.Mod object at 0x7da2590d6920> call[name[meter]][constant[0]]]]] variable[denom] assign[=] call[name[a2b_hex], parameter[binary_operation[constant[%02x] <ast.Mod object at 0x7da2590d6920> call[name[int], parameter[call[name[log], parameter[call[name[meter]][constant[1]], constant[2]]]]]]]] return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[self].delta_time + name[META_EVENT]] + name[TIME_SIGNATURE]] + constant[]] + name[numer]] + name[denom]] + constant[]]]
keyword[def] identifier[time_signature_event] ( identifier[self] , identifier[meter] =( literal[int] , literal[int] )): literal[string] identifier[numer] = identifier[a2b_hex] ( literal[string] % identifier[meter] [ literal[int] ]) identifier[denom] = identifier[a2b_hex] ( literal[string] % identifier[int] ( identifier[log] ( identifier[meter] [ literal[int] ], literal[int] ))) keyword[return] identifier[self] . identifier[delta_time] + identifier[META_EVENT] + identifier[TIME_SIGNATURE] + literal[string] + identifier[numer] + identifier[denom] + literal[string]
def time_signature_event(self, meter=(4, 4)): """Return a time signature event for meter.""" numer = a2b_hex('%02x' % meter[0]) denom = a2b_hex('%02x' % int(log(meter[1], 2))) return self.delta_time + META_EVENT + TIME_SIGNATURE + '\x04' + numer + denom + '\x18\x08'
def delete_grading_period_accounts(self, id, account_id): """ Delete a grading period. <b>204 No Content</b> response code is returned if the deletion was successful. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("DELETE /api/v1/accounts/{account_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/accounts/{account_id}/grading_periods/{id}".format(**path), data=data, params=params, no_data=True)
def function[delete_grading_period_accounts, parameter[self, id, account_id]]: constant[ Delete a grading period. <b>204 No Content</b> response code is returned if the deletion was successful. ] variable[path] assign[=] dictionary[[], []] variable[data] assign[=] dictionary[[], []] variable[params] assign[=] dictionary[[], []] constant[ID] call[name[path]][constant[account_id]] assign[=] name[account_id] constant[ID] call[name[path]][constant[id]] assign[=] name[id] call[name[self].logger.debug, parameter[call[constant[DELETE /api/v1/accounts/{account_id}/grading_periods/{id} with query params: {params} and form data: {data}].format, parameter[]]]] return[call[name[self].generic_request, parameter[constant[DELETE], call[constant[/api/v1/accounts/{account_id}/grading_periods/{id}].format, parameter[]]]]]
keyword[def] identifier[delete_grading_period_accounts] ( identifier[self] , identifier[id] , identifier[account_id] ): literal[string] identifier[path] ={} identifier[data] ={} identifier[params] ={} literal[string] identifier[path] [ literal[string] ]= identifier[account_id] literal[string] identifier[path] [ literal[string] ]= identifier[id] identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] )) keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[no_data] = keyword[True] )
def delete_grading_period_accounts(self, id, account_id): """ Delete a grading period. <b>204 No Content</b> response code is returned if the deletion was successful. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id 'ID' path['account_id'] = account_id # REQUIRED - PATH - id 'ID' path['id'] = id self.logger.debug('DELETE /api/v1/accounts/{account_id}/grading_periods/{id} with query params: {params} and form data: {data}'.format(params=params, data=data, **path)) return self.generic_request('DELETE', '/api/v1/accounts/{account_id}/grading_periods/{id}'.format(**path), data=data, params=params, no_data=True)
def initialize(self, conf, ctx): """Initialization steps: 1. Prepare elasticsearch connection, including details for indexing. """ config = get_config()['ElasticsearchIndexBolt'] elasticsearch_class = import_name(config['elasticsearch_class']) self.es = elasticsearch_class(**config['elasticsearch_init']) self.index = config['index'] self.doc_type = config['doc_type']
def function[initialize, parameter[self, conf, ctx]]: constant[Initialization steps: 1. Prepare elasticsearch connection, including details for indexing. ] variable[config] assign[=] call[call[name[get_config], parameter[]]][constant[ElasticsearchIndexBolt]] variable[elasticsearch_class] assign[=] call[name[import_name], parameter[call[name[config]][constant[elasticsearch_class]]]] name[self].es assign[=] call[name[elasticsearch_class], parameter[]] name[self].index assign[=] call[name[config]][constant[index]] name[self].doc_type assign[=] call[name[config]][constant[doc_type]]
keyword[def] identifier[initialize] ( identifier[self] , identifier[conf] , identifier[ctx] ): literal[string] identifier[config] = identifier[get_config] ()[ literal[string] ] identifier[elasticsearch_class] = identifier[import_name] ( identifier[config] [ literal[string] ]) identifier[self] . identifier[es] = identifier[elasticsearch_class] (** identifier[config] [ literal[string] ]) identifier[self] . identifier[index] = identifier[config] [ literal[string] ] identifier[self] . identifier[doc_type] = identifier[config] [ literal[string] ]
def initialize(self, conf, ctx): """Initialization steps: 1. Prepare elasticsearch connection, including details for indexing. """ config = get_config()['ElasticsearchIndexBolt'] elasticsearch_class = import_name(config['elasticsearch_class']) self.es = elasticsearch_class(**config['elasticsearch_init']) self.index = config['index'] self.doc_type = config['doc_type']
def robust_zscore(mat, ctrl_mat=None, min_mad=0.1): ''' Robustly z-score a pandas df along the rows. Args: mat (pandas df): Matrix of data that z-scoring will be applied to ctrl_mat (pandas df): Optional matrix from which to compute medians and MADs (e.g. vehicle control) min_mad (float): Minimum MAD to threshold to; tiny MAD values will cause z-scores to blow up Returns: zscore_df (pandas_df): z-scored data ''' # If optional df exists, calc medians and mads from it if ctrl_mat is not None: medians = ctrl_mat.median(axis=1) median_devs = abs(ctrl_mat.subtract(medians, axis=0)) # Else just use plate medians else: medians = mat.median(axis=1) median_devs = abs(mat.subtract(medians, axis=0)) sub = mat.subtract(medians, axis='index') mads = median_devs.median(axis=1) # Threshold mads mads = mads.clip(lower=min_mad) # Must multiply values by 1.4826 to make MAD comparable to SD # (https://en.wikipedia.org/wiki/Median_absolute_deviation) zscore_df = sub.divide(mads * 1.4826, axis='index') return zscore_df.round(rounding_precision)
def function[robust_zscore, parameter[mat, ctrl_mat, min_mad]]: constant[ Robustly z-score a pandas df along the rows. Args: mat (pandas df): Matrix of data that z-scoring will be applied to ctrl_mat (pandas df): Optional matrix from which to compute medians and MADs (e.g. vehicle control) min_mad (float): Minimum MAD to threshold to; tiny MAD values will cause z-scores to blow up Returns: zscore_df (pandas_df): z-scored data ] if compare[name[ctrl_mat] is_not constant[None]] begin[:] variable[medians] assign[=] call[name[ctrl_mat].median, parameter[]] variable[median_devs] assign[=] call[name[abs], parameter[call[name[ctrl_mat].subtract, parameter[name[medians]]]]] variable[sub] assign[=] call[name[mat].subtract, parameter[name[medians]]] variable[mads] assign[=] call[name[median_devs].median, parameter[]] variable[mads] assign[=] call[name[mads].clip, parameter[]] variable[zscore_df] assign[=] call[name[sub].divide, parameter[binary_operation[name[mads] * constant[1.4826]]]] return[call[name[zscore_df].round, parameter[name[rounding_precision]]]]
keyword[def] identifier[robust_zscore] ( identifier[mat] , identifier[ctrl_mat] = keyword[None] , identifier[min_mad] = literal[int] ): literal[string] keyword[if] identifier[ctrl_mat] keyword[is] keyword[not] keyword[None] : identifier[medians] = identifier[ctrl_mat] . identifier[median] ( identifier[axis] = literal[int] ) identifier[median_devs] = identifier[abs] ( identifier[ctrl_mat] . identifier[subtract] ( identifier[medians] , identifier[axis] = literal[int] )) keyword[else] : identifier[medians] = identifier[mat] . identifier[median] ( identifier[axis] = literal[int] ) identifier[median_devs] = identifier[abs] ( identifier[mat] . identifier[subtract] ( identifier[medians] , identifier[axis] = literal[int] )) identifier[sub] = identifier[mat] . identifier[subtract] ( identifier[medians] , identifier[axis] = literal[string] ) identifier[mads] = identifier[median_devs] . identifier[median] ( identifier[axis] = literal[int] ) identifier[mads] = identifier[mads] . identifier[clip] ( identifier[lower] = identifier[min_mad] ) identifier[zscore_df] = identifier[sub] . identifier[divide] ( identifier[mads] * literal[int] , identifier[axis] = literal[string] ) keyword[return] identifier[zscore_df] . identifier[round] ( identifier[rounding_precision] )
def robust_zscore(mat, ctrl_mat=None, min_mad=0.1): """ Robustly z-score a pandas df along the rows. Args: mat (pandas df): Matrix of data that z-scoring will be applied to ctrl_mat (pandas df): Optional matrix from which to compute medians and MADs (e.g. vehicle control) min_mad (float): Minimum MAD to threshold to; tiny MAD values will cause z-scores to blow up Returns: zscore_df (pandas_df): z-scored data """ # If optional df exists, calc medians and mads from it if ctrl_mat is not None: medians = ctrl_mat.median(axis=1) median_devs = abs(ctrl_mat.subtract(medians, axis=0)) # depends on [control=['if'], data=['ctrl_mat']] else: # Else just use plate medians medians = mat.median(axis=1) median_devs = abs(mat.subtract(medians, axis=0)) sub = mat.subtract(medians, axis='index') mads = median_devs.median(axis=1) # Threshold mads mads = mads.clip(lower=min_mad) # Must multiply values by 1.4826 to make MAD comparable to SD # (https://en.wikipedia.org/wiki/Median_absolute_deviation) zscore_df = sub.divide(mads * 1.4826, axis='index') return zscore_df.round(rounding_precision)
def _count_inversions(a, b): '''Count the number of inversions in two numpy arrays: # points i, j where a[i] >= b[j] Parameters ---------- a, b : np.ndarray, shape=(n,) (m,) The arrays to be compared. This implementation is optimized for arrays with many repeated values. Returns ------- inversions : int The number of detected inversions ''' a, a_counts = np.unique(a, return_counts=True) b, b_counts = np.unique(b, return_counts=True) inversions = 0 i = 0 j = 0 while i < len(a) and j < len(b): if a[i] < b[j]: i += 1 elif a[i] >= b[j]: inversions += np.sum(a_counts[i:]) * b_counts[j] j += 1 return inversions
def function[_count_inversions, parameter[a, b]]: constant[Count the number of inversions in two numpy arrays: # points i, j where a[i] >= b[j] Parameters ---------- a, b : np.ndarray, shape=(n,) (m,) The arrays to be compared. This implementation is optimized for arrays with many repeated values. Returns ------- inversions : int The number of detected inversions ] <ast.Tuple object at 0x7da20e957610> assign[=] call[name[np].unique, parameter[name[a]]] <ast.Tuple object at 0x7da20e9547f0> assign[=] call[name[np].unique, parameter[name[b]]] variable[inversions] assign[=] constant[0] variable[i] assign[=] constant[0] variable[j] assign[=] constant[0] while <ast.BoolOp object at 0x7da20e9560e0> begin[:] if compare[call[name[a]][name[i]] less[<] call[name[b]][name[j]]] begin[:] <ast.AugAssign object at 0x7da20e954460> return[name[inversions]]
keyword[def] identifier[_count_inversions] ( identifier[a] , identifier[b] ): literal[string] identifier[a] , identifier[a_counts] = identifier[np] . identifier[unique] ( identifier[a] , identifier[return_counts] = keyword[True] ) identifier[b] , identifier[b_counts] = identifier[np] . identifier[unique] ( identifier[b] , identifier[return_counts] = keyword[True] ) identifier[inversions] = literal[int] identifier[i] = literal[int] identifier[j] = literal[int] keyword[while] identifier[i] < identifier[len] ( identifier[a] ) keyword[and] identifier[j] < identifier[len] ( identifier[b] ): keyword[if] identifier[a] [ identifier[i] ]< identifier[b] [ identifier[j] ]: identifier[i] += literal[int] keyword[elif] identifier[a] [ identifier[i] ]>= identifier[b] [ identifier[j] ]: identifier[inversions] += identifier[np] . identifier[sum] ( identifier[a_counts] [ identifier[i] :])* identifier[b_counts] [ identifier[j] ] identifier[j] += literal[int] keyword[return] identifier[inversions]
def _count_inversions(a, b): """Count the number of inversions in two numpy arrays: # points i, j where a[i] >= b[j] Parameters ---------- a, b : np.ndarray, shape=(n,) (m,) The arrays to be compared. This implementation is optimized for arrays with many repeated values. Returns ------- inversions : int The number of detected inversions """ (a, a_counts) = np.unique(a, return_counts=True) (b, b_counts) = np.unique(b, return_counts=True) inversions = 0 i = 0 j = 0 while i < len(a) and j < len(b): if a[i] < b[j]: i += 1 # depends on [control=['if'], data=[]] elif a[i] >= b[j]: inversions += np.sum(a_counts[i:]) * b_counts[j] j += 1 # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] return inversions
def list(gandi, id, vhosts, dates, fqdns, limit): """ List hosted certificates. """ justify = 10 options = {'items_per_page': limit, 'state': 'created'} output_keys = [] if id: output_keys.append('id') output_keys.append('subject') if dates: output_keys.extend(['date_created', 'date_expire']) justify = 12 if fqdns: output_keys.append('fqdns') if vhosts: output_keys.append('vhosts') result = gandi.hostedcert.list(options) for num, hcert in enumerate(result): if num: gandi.separator_line() if fqdns or vhosts: hcert = gandi.hostedcert.info(hcert['id']) output_hostedcert(gandi, hcert, output_keys, justify) return result
def function[list, parameter[gandi, id, vhosts, dates, fqdns, limit]]: constant[ List hosted certificates. ] variable[justify] assign[=] constant[10] variable[options] assign[=] dictionary[[<ast.Constant object at 0x7da1b2347d30>, <ast.Constant object at 0x7da1b2345630>], [<ast.Name object at 0x7da1b2347af0>, <ast.Constant object at 0x7da1b2345ab0>]] variable[output_keys] assign[=] list[[]] if name[id] begin[:] call[name[output_keys].append, parameter[constant[id]]] call[name[output_keys].append, parameter[constant[subject]]] if name[dates] begin[:] call[name[output_keys].extend, parameter[list[[<ast.Constant object at 0x7da1b2345f00>, <ast.Constant object at 0x7da1b2346890>]]]] variable[justify] assign[=] constant[12] if name[fqdns] begin[:] call[name[output_keys].append, parameter[constant[fqdns]]] if name[vhosts] begin[:] call[name[output_keys].append, parameter[constant[vhosts]]] variable[result] assign[=] call[name[gandi].hostedcert.list, parameter[name[options]]] for taget[tuple[[<ast.Name object at 0x7da1b2345d20>, <ast.Name object at 0x7da1b2346da0>]]] in starred[call[name[enumerate], parameter[name[result]]]] begin[:] if name[num] begin[:] call[name[gandi].separator_line, parameter[]] if <ast.BoolOp object at 0x7da1b2347c10> begin[:] variable[hcert] assign[=] call[name[gandi].hostedcert.info, parameter[call[name[hcert]][constant[id]]]] call[name[output_hostedcert], parameter[name[gandi], name[hcert], name[output_keys], name[justify]]] return[name[result]]
keyword[def] identifier[list] ( identifier[gandi] , identifier[id] , identifier[vhosts] , identifier[dates] , identifier[fqdns] , identifier[limit] ): literal[string] identifier[justify] = literal[int] identifier[options] ={ literal[string] : identifier[limit] , literal[string] : literal[string] } identifier[output_keys] =[] keyword[if] identifier[id] : identifier[output_keys] . identifier[append] ( literal[string] ) identifier[output_keys] . identifier[append] ( literal[string] ) keyword[if] identifier[dates] : identifier[output_keys] . identifier[extend] ([ literal[string] , literal[string] ]) identifier[justify] = literal[int] keyword[if] identifier[fqdns] : identifier[output_keys] . identifier[append] ( literal[string] ) keyword[if] identifier[vhosts] : identifier[output_keys] . identifier[append] ( literal[string] ) identifier[result] = identifier[gandi] . identifier[hostedcert] . identifier[list] ( identifier[options] ) keyword[for] identifier[num] , identifier[hcert] keyword[in] identifier[enumerate] ( identifier[result] ): keyword[if] identifier[num] : identifier[gandi] . identifier[separator_line] () keyword[if] identifier[fqdns] keyword[or] identifier[vhosts] : identifier[hcert] = identifier[gandi] . identifier[hostedcert] . identifier[info] ( identifier[hcert] [ literal[string] ]) identifier[output_hostedcert] ( identifier[gandi] , identifier[hcert] , identifier[output_keys] , identifier[justify] ) keyword[return] identifier[result]
def list(gandi, id, vhosts, dates, fqdns, limit): """ List hosted certificates. """ justify = 10 options = {'items_per_page': limit, 'state': 'created'} output_keys = [] if id: output_keys.append('id') # depends on [control=['if'], data=[]] output_keys.append('subject') if dates: output_keys.extend(['date_created', 'date_expire']) justify = 12 # depends on [control=['if'], data=[]] if fqdns: output_keys.append('fqdns') # depends on [control=['if'], data=[]] if vhosts: output_keys.append('vhosts') # depends on [control=['if'], data=[]] result = gandi.hostedcert.list(options) for (num, hcert) in enumerate(result): if num: gandi.separator_line() # depends on [control=['if'], data=[]] if fqdns or vhosts: hcert = gandi.hostedcert.info(hcert['id']) # depends on [control=['if'], data=[]] output_hostedcert(gandi, hcert, output_keys, justify) # depends on [control=['for'], data=[]] return result
def disable_logging(logger=None): """Context manager to temporarily suppress all logging for a given logger or the root logger if no particular logger is specified. """ logger = logger or logging.getLogger() disabled = logger.disabled logger.disabled = True yield logger.disabled = disabled
def function[disable_logging, parameter[logger]]: constant[Context manager to temporarily suppress all logging for a given logger or the root logger if no particular logger is specified. ] variable[logger] assign[=] <ast.BoolOp object at 0x7da18dc9a590> variable[disabled] assign[=] name[logger].disabled name[logger].disabled assign[=] constant[True] <ast.Yield object at 0x7da2041d87c0> name[logger].disabled assign[=] name[disabled]
keyword[def] identifier[disable_logging] ( identifier[logger] = keyword[None] ): literal[string] identifier[logger] = identifier[logger] keyword[or] identifier[logging] . identifier[getLogger] () identifier[disabled] = identifier[logger] . identifier[disabled] identifier[logger] . identifier[disabled] = keyword[True] keyword[yield] identifier[logger] . identifier[disabled] = identifier[disabled]
def disable_logging(logger=None): """Context manager to temporarily suppress all logging for a given logger or the root logger if no particular logger is specified. """ logger = logger or logging.getLogger() disabled = logger.disabled logger.disabled = True yield logger.disabled = disabled
def set_group_name(group, old_name, new_name): """ Group was renamed. """ for datastore in _get_datastores(): datastore.set_group_name(group, old_name, new_name)
def function[set_group_name, parameter[group, old_name, new_name]]: constant[ Group was renamed. ] for taget[name[datastore]] in starred[call[name[_get_datastores], parameter[]]] begin[:] call[name[datastore].set_group_name, parameter[name[group], name[old_name], name[new_name]]]
keyword[def] identifier[set_group_name] ( identifier[group] , identifier[old_name] , identifier[new_name] ): literal[string] keyword[for] identifier[datastore] keyword[in] identifier[_get_datastores] (): identifier[datastore] . identifier[set_group_name] ( identifier[group] , identifier[old_name] , identifier[new_name] )
def set_group_name(group, old_name, new_name): """ Group was renamed. """ for datastore in _get_datastores(): datastore.set_group_name(group, old_name, new_name) # depends on [control=['for'], data=['datastore']]
def version_str(version): """Convert a version tuple or string to a string. Should be returned in the form: major.minor.release """ if isinstance(version, str): return version elif isinstance(version, tuple): return '.'.join([str(int(x)) for x in version]) else: raise ValueError("Invalid version: %s" % version)
def function[version_str, parameter[version]]: constant[Convert a version tuple or string to a string. Should be returned in the form: major.minor.release ] if call[name[isinstance], parameter[name[version], name[str]]] begin[:] return[name[version]]
keyword[def] identifier[version_str] ( identifier[version] ): literal[string] keyword[if] identifier[isinstance] ( identifier[version] , identifier[str] ): keyword[return] identifier[version] keyword[elif] identifier[isinstance] ( identifier[version] , identifier[tuple] ): keyword[return] literal[string] . identifier[join] ([ identifier[str] ( identifier[int] ( identifier[x] )) keyword[for] identifier[x] keyword[in] identifier[version] ]) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[version] )
def version_str(version): """Convert a version tuple or string to a string. Should be returned in the form: major.minor.release """ if isinstance(version, str): return version # depends on [control=['if'], data=[]] elif isinstance(version, tuple): return '.'.join([str(int(x)) for x in version]) # depends on [control=['if'], data=[]] else: raise ValueError('Invalid version: %s' % version)
def get_suggested_type_names(schema, output_type, field_name): """Go through all of the implementations of type, as well as the interfaces that they implement. If any of those types include the provided field, suggest them, sorted by how often the type is referenced, starting with Interfaces.""" if isinstance(output_type, (GraphQLInterfaceType, GraphQLUnionType)): suggested_object_types = [] interface_usage_count = OrderedDict() for possible_type in schema.get_possible_types(output_type): if not possible_type.fields.get(field_name): return # This object type defines this field. suggested_object_types.append(possible_type.name) for possible_interface in possible_type.interfaces: if not possible_interface.fields.get(field_name): continue # This interface type defines this field. interface_usage_count[possible_interface.name] = ( interface_usage_count.get(possible_interface.name, 0) + 1 ) # Suggest interface types based on how common they are. suggested_interface_types = sorted( list(interface_usage_count.keys()), key=lambda k: interface_usage_count[k], reverse=True, ) # Suggest both interface and object types. suggested_interface_types.extend(suggested_object_types) return suggested_interface_types # Otherwise, must be an Object type, which does not have possible fields. return []
def function[get_suggested_type_names, parameter[schema, output_type, field_name]]: constant[Go through all of the implementations of type, as well as the interfaces that they implement. If any of those types include the provided field, suggest them, sorted by how often the type is referenced, starting with Interfaces.] if call[name[isinstance], parameter[name[output_type], tuple[[<ast.Name object at 0x7da18f09cb20>, <ast.Name object at 0x7da18bccb790>]]]] begin[:] variable[suggested_object_types] assign[=] list[[]] variable[interface_usage_count] assign[=] call[name[OrderedDict], parameter[]] for taget[name[possible_type]] in starred[call[name[schema].get_possible_types, parameter[name[output_type]]]] begin[:] if <ast.UnaryOp object at 0x7da18bccb940> begin[:] return[None] call[name[suggested_object_types].append, parameter[name[possible_type].name]] for taget[name[possible_interface]] in starred[name[possible_type].interfaces] begin[:] if <ast.UnaryOp object at 0x7da18bcca350> begin[:] continue call[name[interface_usage_count]][name[possible_interface].name] assign[=] binary_operation[call[name[interface_usage_count].get, parameter[name[possible_interface].name, constant[0]]] + constant[1]] variable[suggested_interface_types] assign[=] call[name[sorted], parameter[call[name[list], parameter[call[name[interface_usage_count].keys, parameter[]]]]]] call[name[suggested_interface_types].extend, parameter[name[suggested_object_types]]] return[name[suggested_interface_types]] return[list[[]]]
keyword[def] identifier[get_suggested_type_names] ( identifier[schema] , identifier[output_type] , identifier[field_name] ): literal[string] keyword[if] identifier[isinstance] ( identifier[output_type] ,( identifier[GraphQLInterfaceType] , identifier[GraphQLUnionType] )): identifier[suggested_object_types] =[] identifier[interface_usage_count] = identifier[OrderedDict] () keyword[for] identifier[possible_type] keyword[in] identifier[schema] . identifier[get_possible_types] ( identifier[output_type] ): keyword[if] keyword[not] identifier[possible_type] . identifier[fields] . identifier[get] ( identifier[field_name] ): keyword[return] identifier[suggested_object_types] . identifier[append] ( identifier[possible_type] . identifier[name] ) keyword[for] identifier[possible_interface] keyword[in] identifier[possible_type] . identifier[interfaces] : keyword[if] keyword[not] identifier[possible_interface] . identifier[fields] . identifier[get] ( identifier[field_name] ): keyword[continue] identifier[interface_usage_count] [ identifier[possible_interface] . identifier[name] ]=( identifier[interface_usage_count] . identifier[get] ( identifier[possible_interface] . identifier[name] , literal[int] )+ literal[int] ) identifier[suggested_interface_types] = identifier[sorted] ( identifier[list] ( identifier[interface_usage_count] . identifier[keys] ()), identifier[key] = keyword[lambda] identifier[k] : identifier[interface_usage_count] [ identifier[k] ], identifier[reverse] = keyword[True] , ) identifier[suggested_interface_types] . identifier[extend] ( identifier[suggested_object_types] ) keyword[return] identifier[suggested_interface_types] keyword[return] []
def get_suggested_type_names(schema, output_type, field_name): """Go through all of the implementations of type, as well as the interfaces that they implement. If any of those types include the provided field, suggest them, sorted by how often the type is referenced, starting with Interfaces.""" if isinstance(output_type, (GraphQLInterfaceType, GraphQLUnionType)): suggested_object_types = [] interface_usage_count = OrderedDict() for possible_type in schema.get_possible_types(output_type): if not possible_type.fields.get(field_name): return # depends on [control=['if'], data=[]] # This object type defines this field. suggested_object_types.append(possible_type.name) for possible_interface in possible_type.interfaces: if not possible_interface.fields.get(field_name): continue # depends on [control=['if'], data=[]] # This interface type defines this field. interface_usage_count[possible_interface.name] = interface_usage_count.get(possible_interface.name, 0) + 1 # depends on [control=['for'], data=['possible_interface']] # depends on [control=['for'], data=['possible_type']] # Suggest interface types based on how common they are. suggested_interface_types = sorted(list(interface_usage_count.keys()), key=lambda k: interface_usage_count[k], reverse=True) # Suggest both interface and object types. suggested_interface_types.extend(suggested_object_types) return suggested_interface_types # depends on [control=['if'], data=[]] # Otherwise, must be an Object type, which does not have possible fields. return []
def dump(self, f): """Write Wavefront data to file. Takes File object or filename.""" try: f.write(self._data) except AttributeError: with open(f, 'w') as wf: wf.write(self._data)
def function[dump, parameter[self, f]]: constant[Write Wavefront data to file. Takes File object or filename.] <ast.Try object at 0x7da20c796140>
keyword[def] identifier[dump] ( identifier[self] , identifier[f] ): literal[string] keyword[try] : identifier[f] . identifier[write] ( identifier[self] . identifier[_data] ) keyword[except] identifier[AttributeError] : keyword[with] identifier[open] ( identifier[f] , literal[string] ) keyword[as] identifier[wf] : identifier[wf] . identifier[write] ( identifier[self] . identifier[_data] )
def dump(self, f): """Write Wavefront data to file. Takes File object or filename.""" try: f.write(self._data) # depends on [control=['try'], data=[]] except AttributeError: with open(f, 'w') as wf: wf.write(self._data) # depends on [control=['with'], data=['wf']] # depends on [control=['except'], data=[]]
def enable_cache(): """Enable requests library cache.""" try: import requests_cache except ImportError as err: sys.stderr.write('Failed to enable cache: {0}\n'.format(str(err))) return if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR) requests_cache.install_cache(CACHE_FILE)
def function[enable_cache, parameter[]]: constant[Enable requests library cache.] <ast.Try object at 0x7da18fe93eb0> if <ast.UnaryOp object at 0x7da18fe900a0> begin[:] call[name[os].makedirs, parameter[name[CACHE_DIR]]] call[name[requests_cache].install_cache, parameter[name[CACHE_FILE]]]
keyword[def] identifier[enable_cache] (): literal[string] keyword[try] : keyword[import] identifier[requests_cache] keyword[except] identifier[ImportError] keyword[as] identifier[err] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] . identifier[format] ( identifier[str] ( identifier[err] ))) keyword[return] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[CACHE_DIR] ): identifier[os] . identifier[makedirs] ( identifier[CACHE_DIR] ) identifier[requests_cache] . identifier[install_cache] ( identifier[CACHE_FILE] )
def enable_cache(): """Enable requests library cache.""" try: import requests_cache # depends on [control=['try'], data=[]] except ImportError as err: sys.stderr.write('Failed to enable cache: {0}\n'.format(str(err))) return # depends on [control=['except'], data=['err']] if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR) # depends on [control=['if'], data=[]] requests_cache.install_cache(CACHE_FILE)
def parse(self, data): # type: (bytes) -> None ''' Parse the passed in data into a UDF Logical Volume Implementation Use. Parameters: data - The data to parse. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Implementation Use already initialized') (impl_id, self.num_files, self.num_dirs, self.min_udf_read_revision, self.min_udf_write_revision, self.max_udf_write_revision) = struct.unpack_from(self.FMT, data, 0) self.impl_id = UDFEntityID() self.impl_id.parse(impl_id) self.impl_use = data[46:] self._initialized = True
def function[parse, parameter[self, data]]: constant[ Parse the passed in data into a UDF Logical Volume Implementation Use. Parameters: data - The data to parse. Returns: Nothing. ] if name[self]._initialized begin[:] <ast.Raise object at 0x7da1b0f0d930> <ast.Tuple object at 0x7da1b0f0e5f0> assign[=] call[name[struct].unpack_from, parameter[name[self].FMT, name[data], constant[0]]] name[self].impl_id assign[=] call[name[UDFEntityID], parameter[]] call[name[self].impl_id.parse, parameter[name[impl_id]]] name[self].impl_use assign[=] call[name[data]][<ast.Slice object at 0x7da1b0f0eb60>] name[self]._initialized assign[=] constant[True]
keyword[def] identifier[parse] ( identifier[self] , identifier[data] ): literal[string] keyword[if] identifier[self] . identifier[_initialized] : keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] ) ( identifier[impl_id] , identifier[self] . identifier[num_files] , identifier[self] . identifier[num_dirs] , identifier[self] . identifier[min_udf_read_revision] , identifier[self] . identifier[min_udf_write_revision] , identifier[self] . identifier[max_udf_write_revision] )= identifier[struct] . identifier[unpack_from] ( identifier[self] . identifier[FMT] , identifier[data] , literal[int] ) identifier[self] . identifier[impl_id] = identifier[UDFEntityID] () identifier[self] . identifier[impl_id] . identifier[parse] ( identifier[impl_id] ) identifier[self] . identifier[impl_use] = identifier[data] [ literal[int] :] identifier[self] . identifier[_initialized] = keyword[True]
def parse(self, data): # type: (bytes) -> None '\n Parse the passed in data into a UDF Logical Volume Implementation Use.\n\n Parameters:\n data - The data to parse.\n Returns:\n Nothing.\n ' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Implementation Use already initialized') # depends on [control=['if'], data=[]] (impl_id, self.num_files, self.num_dirs, self.min_udf_read_revision, self.min_udf_write_revision, self.max_udf_write_revision) = struct.unpack_from(self.FMT, data, 0) self.impl_id = UDFEntityID() self.impl_id.parse(impl_id) self.impl_use = data[46:] self._initialized = True
def delta_e_cie1994(color1, color2, K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015): """ Calculates the Delta E (CIE1994) of two colors. K_l: 0.045 graphic arts 0.048 textiles K_2: 0.015 graphic arts 0.014 textiles K_L: 1 default 2 textiles """ color1_vector = _get_lab_color1_vector(color1) color2_matrix = _get_lab_color2_matrix(color2) delta_e = color_diff_matrix.delta_e_cie1994( color1_vector, color2_matrix, K_L=K_L, K_C=K_C, K_H=K_H, K_1=K_1, K_2=K_2)[0] return numpy.asscalar(delta_e)
def function[delta_e_cie1994, parameter[color1, color2, K_L, K_C, K_H, K_1, K_2]]: constant[ Calculates the Delta E (CIE1994) of two colors. K_l: 0.045 graphic arts 0.048 textiles K_2: 0.015 graphic arts 0.014 textiles K_L: 1 default 2 textiles ] variable[color1_vector] assign[=] call[name[_get_lab_color1_vector], parameter[name[color1]]] variable[color2_matrix] assign[=] call[name[_get_lab_color2_matrix], parameter[name[color2]]] variable[delta_e] assign[=] call[call[name[color_diff_matrix].delta_e_cie1994, parameter[name[color1_vector], name[color2_matrix]]]][constant[0]] return[call[name[numpy].asscalar, parameter[name[delta_e]]]]
keyword[def] identifier[delta_e_cie1994] ( identifier[color1] , identifier[color2] , identifier[K_L] = literal[int] , identifier[K_C] = literal[int] , identifier[K_H] = literal[int] , identifier[K_1] = literal[int] , identifier[K_2] = literal[int] ): literal[string] identifier[color1_vector] = identifier[_get_lab_color1_vector] ( identifier[color1] ) identifier[color2_matrix] = identifier[_get_lab_color2_matrix] ( identifier[color2] ) identifier[delta_e] = identifier[color_diff_matrix] . identifier[delta_e_cie1994] ( identifier[color1_vector] , identifier[color2_matrix] , identifier[K_L] = identifier[K_L] , identifier[K_C] = identifier[K_C] , identifier[K_H] = identifier[K_H] , identifier[K_1] = identifier[K_1] , identifier[K_2] = identifier[K_2] )[ literal[int] ] keyword[return] identifier[numpy] . identifier[asscalar] ( identifier[delta_e] )
def delta_e_cie1994(color1, color2, K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015): """ Calculates the Delta E (CIE1994) of two colors. K_l: 0.045 graphic arts 0.048 textiles K_2: 0.015 graphic arts 0.014 textiles K_L: 1 default 2 textiles """ color1_vector = _get_lab_color1_vector(color1) color2_matrix = _get_lab_color2_matrix(color2) delta_e = color_diff_matrix.delta_e_cie1994(color1_vector, color2_matrix, K_L=K_L, K_C=K_C, K_H=K_H, K_1=K_1, K_2=K_2)[0] return numpy.asscalar(delta_e)
def run(self): """Primary entry point to the plugin, runs once per file.""" paths = [x for x in practices.__dict__.values() if hasattr(x, 'code')] for node in ast.walk(self.tree): try: lineno, col_offset = node.lineno, node.col_offset except AttributeError: # Not all nodes have coordinates, e.g.: ast.Module continue for checker in paths: if checker(node): message = self.build_message(checker) yield lineno, col_offset, message, type(self)
def function[run, parameter[self]]: constant[Primary entry point to the plugin, runs once per file.] variable[paths] assign[=] <ast.ListComp object at 0x7da1b0a79000> for taget[name[node]] in starred[call[name[ast].walk, parameter[name[self].tree]]] begin[:] <ast.Try object at 0x7da1b0a800a0> for taget[name[checker]] in starred[name[paths]] begin[:] if call[name[checker], parameter[name[node]]] begin[:] variable[message] assign[=] call[name[self].build_message, parameter[name[checker]]] <ast.Yield object at 0x7da1b0a82a70>
keyword[def] identifier[run] ( identifier[self] ): literal[string] identifier[paths] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[practices] . identifier[__dict__] . identifier[values] () keyword[if] identifier[hasattr] ( identifier[x] , literal[string] )] keyword[for] identifier[node] keyword[in] identifier[ast] . identifier[walk] ( identifier[self] . identifier[tree] ): keyword[try] : identifier[lineno] , identifier[col_offset] = identifier[node] . identifier[lineno] , identifier[node] . identifier[col_offset] keyword[except] identifier[AttributeError] : keyword[continue] keyword[for] identifier[checker] keyword[in] identifier[paths] : keyword[if] identifier[checker] ( identifier[node] ): identifier[message] = identifier[self] . identifier[build_message] ( identifier[checker] ) keyword[yield] identifier[lineno] , identifier[col_offset] , identifier[message] , identifier[type] ( identifier[self] )
def run(self): """Primary entry point to the plugin, runs once per file.""" paths = [x for x in practices.__dict__.values() if hasattr(x, 'code')] for node in ast.walk(self.tree): try: (lineno, col_offset) = (node.lineno, node.col_offset) # depends on [control=['try'], data=[]] except AttributeError: # Not all nodes have coordinates, e.g.: ast.Module continue # depends on [control=['except'], data=[]] for checker in paths: if checker(node): message = self.build_message(checker) yield (lineno, col_offset, message, type(self)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['checker']] # depends on [control=['for'], data=['node']]
def rpc_put_zonefiles( self, zonefile_datas, **con_info ): """ Replicate one or more zonefiles, given as serialized strings. Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files) Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure) Returns {'error': ...} on error Takes at most 5 zonefiles """ conf = get_blockstack_opts() if not is_atlas_enabled(conf): return {'error': 'No data', 'http_status': 400} if 'zonefiles' not in conf: return {'error': 'No zonefiles directory (likely a configuration error)', 'http_status': 400} if type(zonefile_datas) != list: return {'error': 'Invalid data', 'http_status': 400} if len(zonefile_datas) > 5: return {'error': 'Too many zonefiles', 'http_status': 400} for zfd in zonefile_datas: if not check_string(zfd, max_length=((4 * RPC_MAX_ZONEFILE_LEN) / 3) + 3, pattern=OP_BASE64_EMPTY_PATTERN): return {'error': 'Invalid zone file payload (exceeds {} bytes and/or not base64-encoded)'.format(RPC_MAX_ZONEFILE_LEN)} zonefile_dir = conf.get("zonefiles", None) saved = [] for zonefile_data in zonefile_datas: # decode try: zonefile_data = base64.b64decode( zonefile_data ) except: log.debug("Invalid base64 zonefile") saved.append(0) continue if len(zonefile_data) > RPC_MAX_ZONEFILE_LEN: log.debug("Zonefile too long") saved.append(0) continue # is this zone file already discovered? zonefile_hash = get_zonefile_data_hash(str(zonefile_data)) zfinfos = atlasdb_get_zonefiles_by_hash(zonefile_hash, path=conf['atlasdb_path']) if not zfinfos: # nope log.debug("Unknown zonefile hash {}".format(zonefile_hash)) saved.append(0) continue # keep this zone file rc = store_atlas_zonefile_data( str(zonefile_data), zonefile_dir ) if not rc: log.error("Failed to store zonefile {}".format(zonefile_hash)) saved.append(0) continue # mark this zone file as present, so we don't ask anyone else for it was_present = atlasdb_set_zonefile_present(zonefile_hash, True, path=conf['atlasdb_path']) if was_present: # we already got this zone file # only process it if it's outside our recovery range recovery_start, recovery_end = get_recovery_range(self.working_dir) current_block = virtualchain_hooks.get_last_block(self.working_dir) if recovery_start is not None and recovery_end is not None and recovery_end < current_block: # no need to process log.debug("Already have zonefile {}".format(zonefile_hash)) saved.append(1) continue if self.subdomain_index: # got new zonefile # let the subdomain indexer know, along with giving it the minimum block height min_block_height = min([zfi['block_height'] for zfi in zfinfos]) log.debug("Enqueue {} from {} for subdomain processing".format(zonefile_hash, min_block_height)) self.subdomain_index.enqueue_zonefile(zonefile_hash, min_block_height) log.debug("Stored new zonefile {}".format(zonefile_hash)) saved.append(1) log.debug("Saved {} zonefile(s)".format(sum(saved))) log.debug("Reply: {}".format({'saved': saved})) return self.success_response( {'saved': saved} )
def function[rpc_put_zonefiles, parameter[self, zonefile_datas]]: constant[ Replicate one or more zonefiles, given as serialized strings. Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files) Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure) Returns {'error': ...} on error Takes at most 5 zonefiles ] variable[conf] assign[=] call[name[get_blockstack_opts], parameter[]] if <ast.UnaryOp object at 0x7da1b17d5330> begin[:] return[dictionary[[<ast.Constant object at 0x7da1b17d69e0>, <ast.Constant object at 0x7da1b17d68f0>], [<ast.Constant object at 0x7da1b17d5f90>, <ast.Constant object at 0x7da1b17d6830>]]] if compare[constant[zonefiles] <ast.NotIn object at 0x7da2590d7190> name[conf]] begin[:] return[dictionary[[<ast.Constant object at 0x7da1b17d6890>, <ast.Constant object at 0x7da1b17d6590>], [<ast.Constant object at 0x7da1b17d5000>, <ast.Constant object at 0x7da1b17d47c0>]]] if compare[call[name[type], parameter[name[zonefile_datas]]] not_equal[!=] name[list]] begin[:] return[dictionary[[<ast.Constant object at 0x7da1b17d6e00>, <ast.Constant object at 0x7da1b17d6fb0>], [<ast.Constant object at 0x7da1b17d7160>, <ast.Constant object at 0x7da1b17d7e80>]]] if compare[call[name[len], parameter[name[zonefile_datas]]] greater[>] constant[5]] begin[:] return[dictionary[[<ast.Constant object at 0x7da1b17d42b0>, <ast.Constant object at 0x7da1b17d7f10>], [<ast.Constant object at 0x7da1b17d57b0>, <ast.Constant object at 0x7da1b17d6a70>]]] for taget[name[zfd]] in starred[name[zonefile_datas]] begin[:] if <ast.UnaryOp object at 0x7da1b17d67d0> begin[:] return[dictionary[[<ast.Constant object at 0x7da1b17d5b40>], [<ast.Call object at 0x7da1b17d48e0>]]] variable[zonefile_dir] assign[=] call[name[conf].get, parameter[constant[zonefiles], constant[None]]] variable[saved] assign[=] list[[]] for taget[name[zonefile_data]] in starred[name[zonefile_datas]] begin[:] <ast.Try object at 0x7da1b17d4c70> if compare[call[name[len], parameter[name[zonefile_data]]] greater[>] name[RPC_MAX_ZONEFILE_LEN]] begin[:] call[name[log].debug, parameter[constant[Zonefile too long]]] call[name[saved].append, parameter[constant[0]]] continue variable[zonefile_hash] assign[=] call[name[get_zonefile_data_hash], parameter[call[name[str], parameter[name[zonefile_data]]]]] variable[zfinfos] assign[=] call[name[atlasdb_get_zonefiles_by_hash], parameter[name[zonefile_hash]]] if <ast.UnaryOp object at 0x7da1b17d4610> begin[:] call[name[log].debug, parameter[call[constant[Unknown zonefile hash {}].format, parameter[name[zonefile_hash]]]]] call[name[saved].append, parameter[constant[0]]] continue variable[rc] assign[=] call[name[store_atlas_zonefile_data], parameter[call[name[str], parameter[name[zonefile_data]]], name[zonefile_dir]]] if <ast.UnaryOp object at 0x7da1b17d5060> begin[:] call[name[log].error, parameter[call[constant[Failed to store zonefile {}].format, parameter[name[zonefile_hash]]]]] call[name[saved].append, parameter[constant[0]]] continue variable[was_present] assign[=] call[name[atlasdb_set_zonefile_present], parameter[name[zonefile_hash], constant[True]]] if name[was_present] begin[:] <ast.Tuple object at 0x7da1b17d6b00> assign[=] call[name[get_recovery_range], parameter[name[self].working_dir]] variable[current_block] assign[=] call[name[virtualchain_hooks].get_last_block, parameter[name[self].working_dir]] if <ast.BoolOp object at 0x7da1b17d6ec0> begin[:] call[name[log].debug, parameter[call[constant[Already have zonefile {}].format, parameter[name[zonefile_hash]]]]] call[name[saved].append, parameter[constant[1]]] continue if name[self].subdomain_index begin[:] variable[min_block_height] assign[=] call[name[min], parameter[<ast.ListComp object at 0x7da1b17d5660>]] call[name[log].debug, parameter[call[constant[Enqueue {} from {} for subdomain processing].format, parameter[name[zonefile_hash], name[min_block_height]]]]] call[name[self].subdomain_index.enqueue_zonefile, parameter[name[zonefile_hash], name[min_block_height]]] call[name[log].debug, parameter[call[constant[Stored new zonefile {}].format, parameter[name[zonefile_hash]]]]] call[name[saved].append, parameter[constant[1]]] call[name[log].debug, parameter[call[constant[Saved {} zonefile(s)].format, parameter[call[name[sum], parameter[name[saved]]]]]]] call[name[log].debug, parameter[call[constant[Reply: {}].format, parameter[dictionary[[<ast.Constant object at 0x7da1b1633970>], [<ast.Name object at 0x7da1b1630ca0>]]]]]] return[call[name[self].success_response, parameter[dictionary[[<ast.Constant object at 0x7da1b1631180>], [<ast.Name object at 0x7da1b1630ac0>]]]]]
keyword[def] identifier[rpc_put_zonefiles] ( identifier[self] , identifier[zonefile_datas] ,** identifier[con_info] ): literal[string] identifier[conf] = identifier[get_blockstack_opts] () keyword[if] keyword[not] identifier[is_atlas_enabled] ( identifier[conf] ): keyword[return] { literal[string] : literal[string] , literal[string] : literal[int] } keyword[if] literal[string] keyword[not] keyword[in] identifier[conf] : keyword[return] { literal[string] : literal[string] , literal[string] : literal[int] } keyword[if] identifier[type] ( identifier[zonefile_datas] )!= identifier[list] : keyword[return] { literal[string] : literal[string] , literal[string] : literal[int] } keyword[if] identifier[len] ( identifier[zonefile_datas] )> literal[int] : keyword[return] { literal[string] : literal[string] , literal[string] : literal[int] } keyword[for] identifier[zfd] keyword[in] identifier[zonefile_datas] : keyword[if] keyword[not] identifier[check_string] ( identifier[zfd] , identifier[max_length] =(( literal[int] * identifier[RPC_MAX_ZONEFILE_LEN] )/ literal[int] )+ literal[int] , identifier[pattern] = identifier[OP_BASE64_EMPTY_PATTERN] ): keyword[return] { literal[string] : literal[string] . identifier[format] ( identifier[RPC_MAX_ZONEFILE_LEN] )} identifier[zonefile_dir] = identifier[conf] . identifier[get] ( literal[string] , keyword[None] ) identifier[saved] =[] keyword[for] identifier[zonefile_data] keyword[in] identifier[zonefile_datas] : keyword[try] : identifier[zonefile_data] = identifier[base64] . identifier[b64decode] ( identifier[zonefile_data] ) keyword[except] : identifier[log] . identifier[debug] ( literal[string] ) identifier[saved] . identifier[append] ( literal[int] ) keyword[continue] keyword[if] identifier[len] ( identifier[zonefile_data] )> identifier[RPC_MAX_ZONEFILE_LEN] : identifier[log] . identifier[debug] ( literal[string] ) identifier[saved] . identifier[append] ( literal[int] ) keyword[continue] identifier[zonefile_hash] = identifier[get_zonefile_data_hash] ( identifier[str] ( identifier[zonefile_data] )) identifier[zfinfos] = identifier[atlasdb_get_zonefiles_by_hash] ( identifier[zonefile_hash] , identifier[path] = identifier[conf] [ literal[string] ]) keyword[if] keyword[not] identifier[zfinfos] : identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[zonefile_hash] )) identifier[saved] . identifier[append] ( literal[int] ) keyword[continue] identifier[rc] = identifier[store_atlas_zonefile_data] ( identifier[str] ( identifier[zonefile_data] ), identifier[zonefile_dir] ) keyword[if] keyword[not] identifier[rc] : identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[zonefile_hash] )) identifier[saved] . identifier[append] ( literal[int] ) keyword[continue] identifier[was_present] = identifier[atlasdb_set_zonefile_present] ( identifier[zonefile_hash] , keyword[True] , identifier[path] = identifier[conf] [ literal[string] ]) keyword[if] identifier[was_present] : identifier[recovery_start] , identifier[recovery_end] = identifier[get_recovery_range] ( identifier[self] . identifier[working_dir] ) identifier[current_block] = identifier[virtualchain_hooks] . identifier[get_last_block] ( identifier[self] . identifier[working_dir] ) keyword[if] identifier[recovery_start] keyword[is] keyword[not] keyword[None] keyword[and] identifier[recovery_end] keyword[is] keyword[not] keyword[None] keyword[and] identifier[recovery_end] < identifier[current_block] : identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[zonefile_hash] )) identifier[saved] . identifier[append] ( literal[int] ) keyword[continue] keyword[if] identifier[self] . identifier[subdomain_index] : identifier[min_block_height] = identifier[min] ([ identifier[zfi] [ literal[string] ] keyword[for] identifier[zfi] keyword[in] identifier[zfinfos] ]) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[zonefile_hash] , identifier[min_block_height] )) identifier[self] . identifier[subdomain_index] . identifier[enqueue_zonefile] ( identifier[zonefile_hash] , identifier[min_block_height] ) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[zonefile_hash] )) identifier[saved] . identifier[append] ( literal[int] ) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[sum] ( identifier[saved] ))) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ({ literal[string] : identifier[saved] })) keyword[return] identifier[self] . identifier[success_response] ({ literal[string] : identifier[saved] })
def rpc_put_zonefiles(self, zonefile_datas, **con_info): """ Replicate one or more zonefiles, given as serialized strings. Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files) Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure) Returns {'error': ...} on error Takes at most 5 zonefiles """ conf = get_blockstack_opts() if not is_atlas_enabled(conf): return {'error': 'No data', 'http_status': 400} # depends on [control=['if'], data=[]] if 'zonefiles' not in conf: return {'error': 'No zonefiles directory (likely a configuration error)', 'http_status': 400} # depends on [control=['if'], data=[]] if type(zonefile_datas) != list: return {'error': 'Invalid data', 'http_status': 400} # depends on [control=['if'], data=[]] if len(zonefile_datas) > 5: return {'error': 'Too many zonefiles', 'http_status': 400} # depends on [control=['if'], data=[]] for zfd in zonefile_datas: if not check_string(zfd, max_length=4 * RPC_MAX_ZONEFILE_LEN / 3 + 3, pattern=OP_BASE64_EMPTY_PATTERN): return {'error': 'Invalid zone file payload (exceeds {} bytes and/or not base64-encoded)'.format(RPC_MAX_ZONEFILE_LEN)} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['zfd']] zonefile_dir = conf.get('zonefiles', None) saved = [] for zonefile_data in zonefile_datas: # decode try: zonefile_data = base64.b64decode(zonefile_data) # depends on [control=['try'], data=[]] except: log.debug('Invalid base64 zonefile') saved.append(0) continue # depends on [control=['except'], data=[]] if len(zonefile_data) > RPC_MAX_ZONEFILE_LEN: log.debug('Zonefile too long') saved.append(0) continue # depends on [control=['if'], data=[]] # is this zone file already discovered? zonefile_hash = get_zonefile_data_hash(str(zonefile_data)) zfinfos = atlasdb_get_zonefiles_by_hash(zonefile_hash, path=conf['atlasdb_path']) if not zfinfos: # nope log.debug('Unknown zonefile hash {}'.format(zonefile_hash)) saved.append(0) continue # depends on [control=['if'], data=[]] # keep this zone file rc = store_atlas_zonefile_data(str(zonefile_data), zonefile_dir) if not rc: log.error('Failed to store zonefile {}'.format(zonefile_hash)) saved.append(0) continue # depends on [control=['if'], data=[]] # mark this zone file as present, so we don't ask anyone else for it was_present = atlasdb_set_zonefile_present(zonefile_hash, True, path=conf['atlasdb_path']) if was_present: # we already got this zone file # only process it if it's outside our recovery range (recovery_start, recovery_end) = get_recovery_range(self.working_dir) current_block = virtualchain_hooks.get_last_block(self.working_dir) if recovery_start is not None and recovery_end is not None and (recovery_end < current_block): # no need to process log.debug('Already have zonefile {}'.format(zonefile_hash)) saved.append(1) continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if self.subdomain_index: # got new zonefile # let the subdomain indexer know, along with giving it the minimum block height min_block_height = min([zfi['block_height'] for zfi in zfinfos]) log.debug('Enqueue {} from {} for subdomain processing'.format(zonefile_hash, min_block_height)) self.subdomain_index.enqueue_zonefile(zonefile_hash, min_block_height) # depends on [control=['if'], data=[]] log.debug('Stored new zonefile {}'.format(zonefile_hash)) saved.append(1) # depends on [control=['for'], data=['zonefile_data']] log.debug('Saved {} zonefile(s)'.format(sum(saved))) log.debug('Reply: {}'.format({'saved': saved})) return self.success_response({'saved': saved})
def get_vaults_by_ids(self, *args, **kwargs): """Pass through to provider VaultLookupSession.get_vaults_by_ids""" # Implemented from kitosid template for - # osid.resource.BinLookupSession.get_bins_by_ids catalogs = self._get_provider_session('vault_lookup_session').get_vaults_by_ids(*args, **kwargs) cat_list = [] for cat in catalogs: cat_list.append(Vault(self._provider_manager, cat, self._runtime, self._proxy)) return VaultList(cat_list)
def function[get_vaults_by_ids, parameter[self]]: constant[Pass through to provider VaultLookupSession.get_vaults_by_ids] variable[catalogs] assign[=] call[call[name[self]._get_provider_session, parameter[constant[vault_lookup_session]]].get_vaults_by_ids, parameter[<ast.Starred object at 0x7da20c7c81c0>]] variable[cat_list] assign[=] list[[]] for taget[name[cat]] in starred[name[catalogs]] begin[:] call[name[cat_list].append, parameter[call[name[Vault], parameter[name[self]._provider_manager, name[cat], name[self]._runtime, name[self]._proxy]]]] return[call[name[VaultList], parameter[name[cat_list]]]]
keyword[def] identifier[get_vaults_by_ids] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[catalogs] = identifier[self] . identifier[_get_provider_session] ( literal[string] ). identifier[get_vaults_by_ids] (* identifier[args] ,** identifier[kwargs] ) identifier[cat_list] =[] keyword[for] identifier[cat] keyword[in] identifier[catalogs] : identifier[cat_list] . identifier[append] ( identifier[Vault] ( identifier[self] . identifier[_provider_manager] , identifier[cat] , identifier[self] . identifier[_runtime] , identifier[self] . identifier[_proxy] )) keyword[return] identifier[VaultList] ( identifier[cat_list] )
def get_vaults_by_ids(self, *args, **kwargs): """Pass through to provider VaultLookupSession.get_vaults_by_ids""" # Implemented from kitosid template for - # osid.resource.BinLookupSession.get_bins_by_ids catalogs = self._get_provider_session('vault_lookup_session').get_vaults_by_ids(*args, **kwargs) cat_list = [] for cat in catalogs: cat_list.append(Vault(self._provider_manager, cat, self._runtime, self._proxy)) # depends on [control=['for'], data=['cat']] return VaultList(cat_list)
def trace(self, execution_request): """Yields a stringified 'stacktrace' starting from the scheduler's roots.""" for line in self._scheduler.graph_trace(execution_request.native): yield line
def function[trace, parameter[self, execution_request]]: constant[Yields a stringified 'stacktrace' starting from the scheduler's roots.] for taget[name[line]] in starred[call[name[self]._scheduler.graph_trace, parameter[name[execution_request].native]]] begin[:] <ast.Yield object at 0x7da1b22b9930>
keyword[def] identifier[trace] ( identifier[self] , identifier[execution_request] ): literal[string] keyword[for] identifier[line] keyword[in] identifier[self] . identifier[_scheduler] . identifier[graph_trace] ( identifier[execution_request] . identifier[native] ): keyword[yield] identifier[line]
def trace(self, execution_request): """Yields a stringified 'stacktrace' starting from the scheduler's roots.""" for line in self._scheduler.graph_trace(execution_request.native): yield line # depends on [control=['for'], data=['line']]
def create_network(batch_size, update_freq): """Create a linear regression network for performing SVRG optimization. :return: an instance of mx.io.NDArrayIter :return: an instance of mx.mod.svrgmodule for performing SVRG optimization """ head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) data = np.random.randint(1, 5, [1000, 2]) #Test_Train data split n_train = int(data.shape[0] * 0.8) weights = np.array([1.0, 2.0]) label = data.dot(weights) di = mx.io.NDArrayIter(data[:n_train, :], label[:n_train], batch_size=batch_size, shuffle=True, label_name='lin_reg_label') val_iter = mx.io.NDArrayIter(data[n_train:, :], label[n_train:], batch_size=batch_size) X = mx.sym.Variable('data') Y = mx.symbol.Variable('lin_reg_label') fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") mod = SVRGModule( symbol=lro, data_names=['data'], label_names=['lin_reg_label'], update_freq=update_freq, logger=logging) return di, val_iter, mod
def function[create_network, parameter[batch_size, update_freq]]: constant[Create a linear regression network for performing SVRG optimization. :return: an instance of mx.io.NDArrayIter :return: an instance of mx.mod.svrgmodule for performing SVRG optimization ] variable[head] assign[=] constant[%(asctime)-15s %(message)s] call[name[logging].basicConfig, parameter[]] variable[data] assign[=] call[name[np].random.randint, parameter[constant[1], constant[5], list[[<ast.Constant object at 0x7da1b1fa2800>, <ast.Constant object at 0x7da1b1fa27a0>]]]] variable[n_train] assign[=] call[name[int], parameter[binary_operation[call[name[data].shape][constant[0]] * constant[0.8]]]] variable[weights] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b1fa0910>, <ast.Constant object at 0x7da1b1fa1570>]]]] variable[label] assign[=] call[name[data].dot, parameter[name[weights]]] variable[di] assign[=] call[name[mx].io.NDArrayIter, parameter[call[name[data]][tuple[[<ast.Slice object at 0x7da1b1fa19f0>, <ast.Slice object at 0x7da1b1fa18d0>]]], call[name[label]][<ast.Slice object at 0x7da1b1fa1600>]]] variable[val_iter] assign[=] call[name[mx].io.NDArrayIter, parameter[call[name[data]][tuple[[<ast.Slice object at 0x7da1b1fa1120>, <ast.Slice object at 0x7da1b1fa1510>]]], call[name[label]][<ast.Slice object at 0x7da1b1fa0a00>]]] variable[X] assign[=] call[name[mx].sym.Variable, parameter[constant[data]]] variable[Y] assign[=] call[name[mx].symbol.Variable, parameter[constant[lin_reg_label]]] variable[fully_connected_layer] assign[=] call[name[mx].sym.FullyConnected, parameter[]] variable[lro] assign[=] call[name[mx].sym.LinearRegressionOutput, parameter[]] variable[mod] assign[=] call[name[SVRGModule], parameter[]] return[tuple[[<ast.Name object at 0x7da1b1fa1a20>, <ast.Name object at 0x7da1b1fa2260>, <ast.Name object at 0x7da1b1fa2650>]]]
keyword[def] identifier[create_network] ( identifier[batch_size] , identifier[update_freq] ): literal[string] identifier[head] = literal[string] identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[logging] . identifier[INFO] , identifier[format] = identifier[head] ) identifier[data] = identifier[np] . identifier[random] . identifier[randint] ( literal[int] , literal[int] ,[ literal[int] , literal[int] ]) identifier[n_train] = identifier[int] ( identifier[data] . identifier[shape] [ literal[int] ]* literal[int] ) identifier[weights] = identifier[np] . identifier[array] ([ literal[int] , literal[int] ]) identifier[label] = identifier[data] . identifier[dot] ( identifier[weights] ) identifier[di] = identifier[mx] . identifier[io] . identifier[NDArrayIter] ( identifier[data] [: identifier[n_train] ,:], identifier[label] [: identifier[n_train] ], identifier[batch_size] = identifier[batch_size] , identifier[shuffle] = keyword[True] , identifier[label_name] = literal[string] ) identifier[val_iter] = identifier[mx] . identifier[io] . identifier[NDArrayIter] ( identifier[data] [ identifier[n_train] :,:], identifier[label] [ identifier[n_train] :], identifier[batch_size] = identifier[batch_size] ) identifier[X] = identifier[mx] . identifier[sym] . identifier[Variable] ( literal[string] ) identifier[Y] = identifier[mx] . identifier[symbol] . identifier[Variable] ( literal[string] ) identifier[fully_connected_layer] = identifier[mx] . identifier[sym] . identifier[FullyConnected] ( identifier[data] = identifier[X] , identifier[name] = literal[string] , identifier[num_hidden] = literal[int] ) identifier[lro] = identifier[mx] . identifier[sym] . identifier[LinearRegressionOutput] ( identifier[data] = identifier[fully_connected_layer] , identifier[label] = identifier[Y] , identifier[name] = literal[string] ) identifier[mod] = identifier[SVRGModule] ( identifier[symbol] = identifier[lro] , identifier[data_names] =[ literal[string] ], identifier[label_names] =[ literal[string] ], identifier[update_freq] = identifier[update_freq] , identifier[logger] = identifier[logging] ) keyword[return] identifier[di] , identifier[val_iter] , identifier[mod]
def create_network(batch_size, update_freq): """Create a linear regression network for performing SVRG optimization. :return: an instance of mx.io.NDArrayIter :return: an instance of mx.mod.svrgmodule for performing SVRG optimization """ head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) data = np.random.randint(1, 5, [1000, 2]) #Test_Train data split n_train = int(data.shape[0] * 0.8) weights = np.array([1.0, 2.0]) label = data.dot(weights) di = mx.io.NDArrayIter(data[:n_train, :], label[:n_train], batch_size=batch_size, shuffle=True, label_name='lin_reg_label') val_iter = mx.io.NDArrayIter(data[n_train:, :], label[n_train:], batch_size=batch_size) X = mx.sym.Variable('data') Y = mx.symbol.Variable('lin_reg_label') fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name='lro') mod = SVRGModule(symbol=lro, data_names=['data'], label_names=['lin_reg_label'], update_freq=update_freq, logger=logging) return (di, val_iter, mod)
def clear_limit(self, identifier=None): """ Remove a single limit or all defined limits :param identifier: The identifier to clear limits for, or if no identifier is supplied, clears ALL limits :type identifier: int :return: True if a limit was successfully found and removed, False if no limit could be matched for removal :rtype : bool """ # Remove a single limit if identifier: if identifier in self._limits: del self._limits[identifier] return True else: return False # Remove all limits if self._limits: self._limits.clear() return True else: return False
def function[clear_limit, parameter[self, identifier]]: constant[ Remove a single limit or all defined limits :param identifier: The identifier to clear limits for, or if no identifier is supplied, clears ALL limits :type identifier: int :return: True if a limit was successfully found and removed, False if no limit could be matched for removal :rtype : bool ] if name[identifier] begin[:] if compare[name[identifier] in name[self]._limits] begin[:] <ast.Delete object at 0x7da1b164a0b0> return[constant[True]] if name[self]._limits begin[:] call[name[self]._limits.clear, parameter[]] return[constant[True]]
keyword[def] identifier[clear_limit] ( identifier[self] , identifier[identifier] = keyword[None] ): literal[string] keyword[if] identifier[identifier] : keyword[if] identifier[identifier] keyword[in] identifier[self] . identifier[_limits] : keyword[del] identifier[self] . identifier[_limits] [ identifier[identifier] ] keyword[return] keyword[True] keyword[else] : keyword[return] keyword[False] keyword[if] identifier[self] . identifier[_limits] : identifier[self] . identifier[_limits] . identifier[clear] () keyword[return] keyword[True] keyword[else] : keyword[return] keyword[False]
def clear_limit(self, identifier=None): """ Remove a single limit or all defined limits :param identifier: The identifier to clear limits for, or if no identifier is supplied, clears ALL limits :type identifier: int :return: True if a limit was successfully found and removed, False if no limit could be matched for removal :rtype : bool """ # Remove a single limit if identifier: if identifier in self._limits: del self._limits[identifier] return True # depends on [control=['if'], data=['identifier']] else: return False # depends on [control=['if'], data=[]] # Remove all limits if self._limits: self._limits.clear() return True # depends on [control=['if'], data=[]] else: return False
def col_isbool(df,col_name = None): """ Returns a list of columns that are of type 'bool'. If col_name is specified, returns whether the column in the DataFrame is of type 'bool' instead. Parameters: df - DataFrame DataFrame to check col_name - string, default None If specified, this function will True if df[col_name] is of type 'bool' """ col_list = df.select_dtypes(include = 'bool').columns if col_name is None: return col_list else: return col_name in col_list
def function[col_isbool, parameter[df, col_name]]: constant[ Returns a list of columns that are of type 'bool'. If col_name is specified, returns whether the column in the DataFrame is of type 'bool' instead. Parameters: df - DataFrame DataFrame to check col_name - string, default None If specified, this function will True if df[col_name] is of type 'bool' ] variable[col_list] assign[=] call[name[df].select_dtypes, parameter[]].columns if compare[name[col_name] is constant[None]] begin[:] return[name[col_list]]
keyword[def] identifier[col_isbool] ( identifier[df] , identifier[col_name] = keyword[None] ): literal[string] identifier[col_list] = identifier[df] . identifier[select_dtypes] ( identifier[include] = literal[string] ). identifier[columns] keyword[if] identifier[col_name] keyword[is] keyword[None] : keyword[return] identifier[col_list] keyword[else] : keyword[return] identifier[col_name] keyword[in] identifier[col_list]
def col_isbool(df, col_name=None): """ Returns a list of columns that are of type 'bool'. If col_name is specified, returns whether the column in the DataFrame is of type 'bool' instead. Parameters: df - DataFrame DataFrame to check col_name - string, default None If specified, this function will True if df[col_name] is of type 'bool' """ col_list = df.select_dtypes(include='bool').columns if col_name is None: return col_list # depends on [control=['if'], data=[]] else: return col_name in col_list
def find_nodes(self, **kwargs): """Searches the data nodes that are associated with this graph using the key word arguments as a filter and returns a :class:`django.db.models.query.QuerySet`` of the attached :class:`Node` objects. :param kwargs: filter arguments applied to searching the :class:`BaseNodeData` subclass associated with this graph. :returns: ``QuerySet`` of :class:`Node` objects """ filter_args = {} classname = self.data_content_type.model_class().__name__.lower() for key, value in kwargs.items(): filter_args['%s__%s' % (classname, key)] = value return Node.objects.filter(**filter_args)
def function[find_nodes, parameter[self]]: constant[Searches the data nodes that are associated with this graph using the key word arguments as a filter and returns a :class:`django.db.models.query.QuerySet`` of the attached :class:`Node` objects. :param kwargs: filter arguments applied to searching the :class:`BaseNodeData` subclass associated with this graph. :returns: ``QuerySet`` of :class:`Node` objects ] variable[filter_args] assign[=] dictionary[[], []] variable[classname] assign[=] call[call[name[self].data_content_type.model_class, parameter[]].__name__.lower, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b0049150>, <ast.Name object at 0x7da1b00481f0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:] call[name[filter_args]][binary_operation[constant[%s__%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0048e20>, <ast.Name object at 0x7da1b0048970>]]]] assign[=] name[value] return[call[name[Node].objects.filter, parameter[]]]
keyword[def] identifier[find_nodes] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[filter_args] ={} identifier[classname] = identifier[self] . identifier[data_content_type] . identifier[model_class] (). identifier[__name__] . identifier[lower] () keyword[for] identifier[key] , identifier[value] keyword[in] identifier[kwargs] . identifier[items] (): identifier[filter_args] [ literal[string] %( identifier[classname] , identifier[key] )]= identifier[value] keyword[return] identifier[Node] . identifier[objects] . identifier[filter] (** identifier[filter_args] )
def find_nodes(self, **kwargs): """Searches the data nodes that are associated with this graph using the key word arguments as a filter and returns a :class:`django.db.models.query.QuerySet`` of the attached :class:`Node` objects. :param kwargs: filter arguments applied to searching the :class:`BaseNodeData` subclass associated with this graph. :returns: ``QuerySet`` of :class:`Node` objects """ filter_args = {} classname = self.data_content_type.model_class().__name__.lower() for (key, value) in kwargs.items(): filter_args['%s__%s' % (classname, key)] = value # depends on [control=['for'], data=[]] return Node.objects.filter(**filter_args)
def set_raw_holding_register(self, name, value): """Write to register by name.""" self._conn.write_register( unit=self._slave, address=(self._holding_regs[name]['addr']), value=value)
def function[set_raw_holding_register, parameter[self, name, value]]: constant[Write to register by name.] call[name[self]._conn.write_register, parameter[]]
keyword[def] identifier[set_raw_holding_register] ( identifier[self] , identifier[name] , identifier[value] ): literal[string] identifier[self] . identifier[_conn] . identifier[write_register] ( identifier[unit] = identifier[self] . identifier[_slave] , identifier[address] =( identifier[self] . identifier[_holding_regs] [ identifier[name] ][ literal[string] ]), identifier[value] = identifier[value] )
def set_raw_holding_register(self, name, value): """Write to register by name.""" self._conn.write_register(unit=self._slave, address=self._holding_regs[name]['addr'], value=value)
def hist(self, by=None, bins=10, **kwds): """ Draw one histogram of the DataFrame's columns. A histogram is a representation of the distribution of data. This function groups the values of all given Series in the DataFrame into bins and draws all bins in one :class:`matplotlib.axes.Axes`. This is useful when the DataFrame's Series are in a similar scale. Parameters ---------- by : str or sequence, optional Column in the DataFrame to group by. bins : int, default 10 Number of histogram bins to be used. **kwds Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- class:`matplotlib.AxesSubplot` Return a histogram plot. See Also -------- DataFrame.hist : Draw histograms per DataFrame's Series. Series.hist : Draw a histogram with Series' data. Examples -------- When we draw a dice 6000 times, we expect to get each value around 1000 times. But when we draw two dices and sum the result, the distribution is going to be quite different. A histogram illustrates those distributions. .. plot:: :context: close-figs >>> df = pd.DataFrame( ... np.random.randint(1, 7, 6000), ... columns = ['one']) >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000) >>> ax = df.plot.hist(bins=12, alpha=0.5) """ return self(kind='hist', by=by, bins=bins, **kwds)
def function[hist, parameter[self, by, bins]]: constant[ Draw one histogram of the DataFrame's columns. A histogram is a representation of the distribution of data. This function groups the values of all given Series in the DataFrame into bins and draws all bins in one :class:`matplotlib.axes.Axes`. This is useful when the DataFrame's Series are in a similar scale. Parameters ---------- by : str or sequence, optional Column in the DataFrame to group by. bins : int, default 10 Number of histogram bins to be used. **kwds Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- class:`matplotlib.AxesSubplot` Return a histogram plot. See Also -------- DataFrame.hist : Draw histograms per DataFrame's Series. Series.hist : Draw a histogram with Series' data. Examples -------- When we draw a dice 6000 times, we expect to get each value around 1000 times. But when we draw two dices and sum the result, the distribution is going to be quite different. A histogram illustrates those distributions. .. plot:: :context: close-figs >>> df = pd.DataFrame( ... np.random.randint(1, 7, 6000), ... columns = ['one']) >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000) >>> ax = df.plot.hist(bins=12, alpha=0.5) ] return[call[name[self], parameter[]]]
keyword[def] identifier[hist] ( identifier[self] , identifier[by] = keyword[None] , identifier[bins] = literal[int] ,** identifier[kwds] ): literal[string] keyword[return] identifier[self] ( identifier[kind] = literal[string] , identifier[by] = identifier[by] , identifier[bins] = identifier[bins] ,** identifier[kwds] )
def hist(self, by=None, bins=10, **kwds): """ Draw one histogram of the DataFrame's columns. A histogram is a representation of the distribution of data. This function groups the values of all given Series in the DataFrame into bins and draws all bins in one :class:`matplotlib.axes.Axes`. This is useful when the DataFrame's Series are in a similar scale. Parameters ---------- by : str or sequence, optional Column in the DataFrame to group by. bins : int, default 10 Number of histogram bins to be used. **kwds Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- class:`matplotlib.AxesSubplot` Return a histogram plot. See Also -------- DataFrame.hist : Draw histograms per DataFrame's Series. Series.hist : Draw a histogram with Series' data. Examples -------- When we draw a dice 6000 times, we expect to get each value around 1000 times. But when we draw two dices and sum the result, the distribution is going to be quite different. A histogram illustrates those distributions. .. plot:: :context: close-figs >>> df = pd.DataFrame( ... np.random.randint(1, 7, 6000), ... columns = ['one']) >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000) >>> ax = df.plot.hist(bins=12, alpha=0.5) """ return self(kind='hist', by=by, bins=bins, **kwds)
def _save_table(self, raw=False, cls=None, force_insert=None, force_update=None, using=None, update_fields=None): """ Saves the current instance. """ # Connection aliasing connection = connections[using] create = bool(force_insert or not self.dn) # Prepare fields if update_fields: target_fields = [ self._meta.get_field(name) for name in update_fields ] else: target_fields = [ field for field in cls._meta.get_fields(include_hidden=True) if field.concrete and not field.primary_key ] def get_field_value(field, instance): python_value = getattr(instance, field.attname) return field.get_db_prep_save(python_value, connection=connection) if create: old = None else: old = cls.objects.using(using).get(dn=self._saved_dn) changes = { field.db_column: ( None if old is None else get_field_value(field, old), get_field_value(field, self), ) for field in target_fields } # Actual saving old_dn = self.dn new_dn = self.build_dn() updated = False # Insertion if create: # FIXME(rbarrois): This should be handled through a hidden field. hidden_values = [ ('objectClass', [obj_class.encode('utf-8') for obj_class in self.object_classes]) ] new_values = hidden_values + [ (colname, change[1]) for colname, change in sorted(changes.items()) if change[1] != [] ] new_dn = self.build_dn() logger.debug("Creating new LDAP entry %s", new_dn) connection.add_s(new_dn, new_values) # Update else: modlist = [] for colname, change in sorted(changes.items()): old_value, new_value = change if old_value == new_value: continue modlist.append(( ldap.MOD_DELETE if new_value == [] else ldap.MOD_REPLACE, colname, new_value, )) if new_dn != old_dn: logger.debug("renaming ldap entry %s to %s", old_dn, new_dn) connection.rename_s(old_dn, self.build_rdn()) if modlist: logger.debug("Modifying existing LDAP entry %s", new_dn) connection.modify_s(new_dn, modlist) updated = True self.dn = new_dn # Finishing self._saved_dn = self.dn return updated
def function[_save_table, parameter[self, raw, cls, force_insert, force_update, using, update_fields]]: constant[ Saves the current instance. ] variable[connection] assign[=] call[name[connections]][name[using]] variable[create] assign[=] call[name[bool], parameter[<ast.BoolOp object at 0x7da1b0b0aa40>]] if name[update_fields] begin[:] variable[target_fields] assign[=] <ast.ListComp object at 0x7da1b0b0a1a0> def function[get_field_value, parameter[field, instance]]: variable[python_value] assign[=] call[name[getattr], parameter[name[instance], name[field].attname]] return[call[name[field].get_db_prep_save, parameter[name[python_value]]]] if name[create] begin[:] variable[old] assign[=] constant[None] variable[changes] assign[=] <ast.DictComp object at 0x7da1b0ddca30> variable[old_dn] assign[=] name[self].dn variable[new_dn] assign[=] call[name[self].build_dn, parameter[]] variable[updated] assign[=] constant[False] if name[create] begin[:] variable[hidden_values] assign[=] list[[<ast.Tuple object at 0x7da1b0ddcac0>]] variable[new_values] assign[=] binary_operation[name[hidden_values] + <ast.ListComp object at 0x7da1b0ddca00>] variable[new_dn] assign[=] call[name[self].build_dn, parameter[]] call[name[logger].debug, parameter[constant[Creating new LDAP entry %s], name[new_dn]]] call[name[connection].add_s, parameter[name[new_dn], name[new_values]]] name[self].dn assign[=] name[new_dn] name[self]._saved_dn assign[=] name[self].dn return[name[updated]]
keyword[def] identifier[_save_table] ( identifier[self] , identifier[raw] = keyword[False] , identifier[cls] = keyword[None] , identifier[force_insert] = keyword[None] , identifier[force_update] = keyword[None] , identifier[using] = keyword[None] , identifier[update_fields] = keyword[None] ): literal[string] identifier[connection] = identifier[connections] [ identifier[using] ] identifier[create] = identifier[bool] ( identifier[force_insert] keyword[or] keyword[not] identifier[self] . identifier[dn] ) keyword[if] identifier[update_fields] : identifier[target_fields] =[ identifier[self] . identifier[_meta] . identifier[get_field] ( identifier[name] ) keyword[for] identifier[name] keyword[in] identifier[update_fields] ] keyword[else] : identifier[target_fields] =[ identifier[field] keyword[for] identifier[field] keyword[in] identifier[cls] . identifier[_meta] . identifier[get_fields] ( identifier[include_hidden] = keyword[True] ) keyword[if] identifier[field] . identifier[concrete] keyword[and] keyword[not] identifier[field] . identifier[primary_key] ] keyword[def] identifier[get_field_value] ( identifier[field] , identifier[instance] ): identifier[python_value] = identifier[getattr] ( identifier[instance] , identifier[field] . identifier[attname] ) keyword[return] identifier[field] . identifier[get_db_prep_save] ( identifier[python_value] , identifier[connection] = identifier[connection] ) keyword[if] identifier[create] : identifier[old] = keyword[None] keyword[else] : identifier[old] = identifier[cls] . identifier[objects] . identifier[using] ( identifier[using] ). identifier[get] ( identifier[dn] = identifier[self] . identifier[_saved_dn] ) identifier[changes] ={ identifier[field] . identifier[db_column] :( keyword[None] keyword[if] identifier[old] keyword[is] keyword[None] keyword[else] identifier[get_field_value] ( identifier[field] , identifier[old] ), identifier[get_field_value] ( identifier[field] , identifier[self] ), ) keyword[for] identifier[field] keyword[in] identifier[target_fields] } identifier[old_dn] = identifier[self] . identifier[dn] identifier[new_dn] = identifier[self] . identifier[build_dn] () identifier[updated] = keyword[False] keyword[if] identifier[create] : identifier[hidden_values] =[ ( literal[string] ,[ identifier[obj_class] . identifier[encode] ( literal[string] ) keyword[for] identifier[obj_class] keyword[in] identifier[self] . identifier[object_classes] ]) ] identifier[new_values] = identifier[hidden_values] +[ ( identifier[colname] , identifier[change] [ literal[int] ]) keyword[for] identifier[colname] , identifier[change] keyword[in] identifier[sorted] ( identifier[changes] . identifier[items] ()) keyword[if] identifier[change] [ literal[int] ]!=[] ] identifier[new_dn] = identifier[self] . identifier[build_dn] () identifier[logger] . identifier[debug] ( literal[string] , identifier[new_dn] ) identifier[connection] . identifier[add_s] ( identifier[new_dn] , identifier[new_values] ) keyword[else] : identifier[modlist] =[] keyword[for] identifier[colname] , identifier[change] keyword[in] identifier[sorted] ( identifier[changes] . identifier[items] ()): identifier[old_value] , identifier[new_value] = identifier[change] keyword[if] identifier[old_value] == identifier[new_value] : keyword[continue] identifier[modlist] . identifier[append] (( identifier[ldap] . identifier[MOD_DELETE] keyword[if] identifier[new_value] ==[] keyword[else] identifier[ldap] . identifier[MOD_REPLACE] , identifier[colname] , identifier[new_value] , )) keyword[if] identifier[new_dn] != identifier[old_dn] : identifier[logger] . identifier[debug] ( literal[string] , identifier[old_dn] , identifier[new_dn] ) identifier[connection] . identifier[rename_s] ( identifier[old_dn] , identifier[self] . identifier[build_rdn] ()) keyword[if] identifier[modlist] : identifier[logger] . identifier[debug] ( literal[string] , identifier[new_dn] ) identifier[connection] . identifier[modify_s] ( identifier[new_dn] , identifier[modlist] ) identifier[updated] = keyword[True] identifier[self] . identifier[dn] = identifier[new_dn] identifier[self] . identifier[_saved_dn] = identifier[self] . identifier[dn] keyword[return] identifier[updated]
def _save_table(self, raw=False, cls=None, force_insert=None, force_update=None, using=None, update_fields=None): """ Saves the current instance. """ # Connection aliasing connection = connections[using] create = bool(force_insert or not self.dn) # Prepare fields if update_fields: target_fields = [self._meta.get_field(name) for name in update_fields] # depends on [control=['if'], data=[]] else: target_fields = [field for field in cls._meta.get_fields(include_hidden=True) if field.concrete and (not field.primary_key)] def get_field_value(field, instance): python_value = getattr(instance, field.attname) return field.get_db_prep_save(python_value, connection=connection) if create: old = None # depends on [control=['if'], data=[]] else: old = cls.objects.using(using).get(dn=self._saved_dn) changes = {field.db_column: (None if old is None else get_field_value(field, old), get_field_value(field, self)) for field in target_fields} # Actual saving old_dn = self.dn new_dn = self.build_dn() updated = False # Insertion if create: # FIXME(rbarrois): This should be handled through a hidden field. hidden_values = [('objectClass', [obj_class.encode('utf-8') for obj_class in self.object_classes])] new_values = hidden_values + [(colname, change[1]) for (colname, change) in sorted(changes.items()) if change[1] != []] new_dn = self.build_dn() logger.debug('Creating new LDAP entry %s', new_dn) connection.add_s(new_dn, new_values) # depends on [control=['if'], data=[]] else: # Update modlist = [] for (colname, change) in sorted(changes.items()): (old_value, new_value) = change if old_value == new_value: continue # depends on [control=['if'], data=[]] modlist.append((ldap.MOD_DELETE if new_value == [] else ldap.MOD_REPLACE, colname, new_value)) # depends on [control=['for'], data=[]] if new_dn != old_dn: logger.debug('renaming ldap entry %s to %s', old_dn, new_dn) connection.rename_s(old_dn, self.build_rdn()) # depends on [control=['if'], data=['new_dn', 'old_dn']] if modlist: logger.debug('Modifying existing LDAP entry %s', new_dn) connection.modify_s(new_dn, modlist) # depends on [control=['if'], data=[]] updated = True self.dn = new_dn # Finishing self._saved_dn = self.dn return updated
def list(self, service_rec=None, host_rec=None, hostfilter=None): """ List a specific service or all services :param service_rec: t_services.id :param host_rec: t_hosts.id :param hostfilter: Valid hostfilter or None :return: [(svc.t_services.id, svc.t_services.f_hosts_id, svc.t_hosts.f_ipaddr, svc.t_hosts.f_hostname, svc.t_services.f_proto, svc.t_services.f_number, svc.t_services.f_status, svc.t_services.f_name, svc.t_services.f_banner), ...] """ return self.send.service_list(service_rec, host_rec, hostfilter)
def function[list, parameter[self, service_rec, host_rec, hostfilter]]: constant[ List a specific service or all services :param service_rec: t_services.id :param host_rec: t_hosts.id :param hostfilter: Valid hostfilter or None :return: [(svc.t_services.id, svc.t_services.f_hosts_id, svc.t_hosts.f_ipaddr, svc.t_hosts.f_hostname, svc.t_services.f_proto, svc.t_services.f_number, svc.t_services.f_status, svc.t_services.f_name, svc.t_services.f_banner), ...] ] return[call[name[self].send.service_list, parameter[name[service_rec], name[host_rec], name[hostfilter]]]]
keyword[def] identifier[list] ( identifier[self] , identifier[service_rec] = keyword[None] , identifier[host_rec] = keyword[None] , identifier[hostfilter] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[send] . identifier[service_list] ( identifier[service_rec] , identifier[host_rec] , identifier[hostfilter] )
def list(self, service_rec=None, host_rec=None, hostfilter=None): """ List a specific service or all services :param service_rec: t_services.id :param host_rec: t_hosts.id :param hostfilter: Valid hostfilter or None :return: [(svc.t_services.id, svc.t_services.f_hosts_id, svc.t_hosts.f_ipaddr, svc.t_hosts.f_hostname, svc.t_services.f_proto, svc.t_services.f_number, svc.t_services.f_status, svc.t_services.f_name, svc.t_services.f_banner), ...] """ return self.send.service_list(service_rec, host_rec, hostfilter)
def copy(self): ''' :return: a copy of the container ''' dup = super(Container, self).copy() dup._fields = [field.copy() for field in self._fields] dup._fields_dict = {field.get_name(): field for field in dup._fields if field.get_name() is not None} dup._containers = [] for container in self._containers: idx = self._fields.index(container) dup._containers.append(dup._fields[idx]) for field in dup._fields: field.enclosing = dup return dup
def function[copy, parameter[self]]: constant[ :return: a copy of the container ] variable[dup] assign[=] call[call[name[super], parameter[name[Container], name[self]]].copy, parameter[]] name[dup]._fields assign[=] <ast.ListComp object at 0x7da20e957550> name[dup]._fields_dict assign[=] <ast.DictComp object at 0x7da20e954ee0> name[dup]._containers assign[=] list[[]] for taget[name[container]] in starred[name[self]._containers] begin[:] variable[idx] assign[=] call[name[self]._fields.index, parameter[name[container]]] call[name[dup]._containers.append, parameter[call[name[dup]._fields][name[idx]]]] for taget[name[field]] in starred[name[dup]._fields] begin[:] name[field].enclosing assign[=] name[dup] return[name[dup]]
keyword[def] identifier[copy] ( identifier[self] ): literal[string] identifier[dup] = identifier[super] ( identifier[Container] , identifier[self] ). identifier[copy] () identifier[dup] . identifier[_fields] =[ identifier[field] . identifier[copy] () keyword[for] identifier[field] keyword[in] identifier[self] . identifier[_fields] ] identifier[dup] . identifier[_fields_dict] ={ identifier[field] . identifier[get_name] (): identifier[field] keyword[for] identifier[field] keyword[in] identifier[dup] . identifier[_fields] keyword[if] identifier[field] . identifier[get_name] () keyword[is] keyword[not] keyword[None] } identifier[dup] . identifier[_containers] =[] keyword[for] identifier[container] keyword[in] identifier[self] . identifier[_containers] : identifier[idx] = identifier[self] . identifier[_fields] . identifier[index] ( identifier[container] ) identifier[dup] . identifier[_containers] . identifier[append] ( identifier[dup] . identifier[_fields] [ identifier[idx] ]) keyword[for] identifier[field] keyword[in] identifier[dup] . identifier[_fields] : identifier[field] . identifier[enclosing] = identifier[dup] keyword[return] identifier[dup]
def copy(self): """ :return: a copy of the container """ dup = super(Container, self).copy() dup._fields = [field.copy() for field in self._fields] dup._fields_dict = {field.get_name(): field for field in dup._fields if field.get_name() is not None} dup._containers = [] for container in self._containers: idx = self._fields.index(container) dup._containers.append(dup._fields[idx]) # depends on [control=['for'], data=['container']] for field in dup._fields: field.enclosing = dup # depends on [control=['for'], data=['field']] return dup
def update_nb_metadata(nb_path=None, title=None, summary=None, keywords='fastai', overwrite=True, **kwargs): "Creates jekyll metadata for given notebook path." nb = read_nb(nb_path) data = {'title': title, 'summary': summary, 'keywords': keywords, **kwargs} data = {k:v for (k,v) in data.items() if v is not None} # remove none values if not data: return nb['metadata']['jekyll'] = data write_nb(nb, nb_path) NotebookNotary().sign(nb)
def function[update_nb_metadata, parameter[nb_path, title, summary, keywords, overwrite]]: constant[Creates jekyll metadata for given notebook path.] variable[nb] assign[=] call[name[read_nb], parameter[name[nb_path]]] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b2890>, <ast.Constant object at 0x7da20e9b0d30>, <ast.Constant object at 0x7da20e9b35b0>, None], [<ast.Name object at 0x7da20e9b2b00>, <ast.Name object at 0x7da20e9b14b0>, <ast.Name object at 0x7da20e9b1210>, <ast.Name object at 0x7da20e9b1540>]] variable[data] assign[=] <ast.DictComp object at 0x7da20e9b35e0> if <ast.UnaryOp object at 0x7da1b1d6de10> begin[:] return[None] call[call[name[nb]][constant[metadata]]][constant[jekyll]] assign[=] name[data] call[name[write_nb], parameter[name[nb], name[nb_path]]] call[call[name[NotebookNotary], parameter[]].sign, parameter[name[nb]]]
keyword[def] identifier[update_nb_metadata] ( identifier[nb_path] = keyword[None] , identifier[title] = keyword[None] , identifier[summary] = keyword[None] , identifier[keywords] = literal[string] , identifier[overwrite] = keyword[True] ,** identifier[kwargs] ): literal[string] identifier[nb] = identifier[read_nb] ( identifier[nb_path] ) identifier[data] ={ literal[string] : identifier[title] , literal[string] : identifier[summary] , literal[string] : identifier[keywords] ,** identifier[kwargs] } identifier[data] ={ identifier[k] : identifier[v] keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[data] . identifier[items] () keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] } keyword[if] keyword[not] identifier[data] : keyword[return] identifier[nb] [ literal[string] ][ literal[string] ]= identifier[data] identifier[write_nb] ( identifier[nb] , identifier[nb_path] ) identifier[NotebookNotary] (). identifier[sign] ( identifier[nb] )
def update_nb_metadata(nb_path=None, title=None, summary=None, keywords='fastai', overwrite=True, **kwargs): """Creates jekyll metadata for given notebook path.""" nb = read_nb(nb_path) data = {'title': title, 'summary': summary, 'keywords': keywords, **kwargs} data = {k: v for (k, v) in data.items() if v is not None} # remove none values if not data: return # depends on [control=['if'], data=[]] nb['metadata']['jekyll'] = data write_nb(nb, nb_path) NotebookNotary().sign(nb)
def concat(*cols): """ Concatenates multiple input columns together into a single column. The function works with strings, binary and compatible array columns. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat(df.s, df.d).alias('s')).collect() [Row(s=u'abcd123')] >>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c']) >>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect() [Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
def function[concat, parameter[]]: constant[ Concatenates multiple input columns together into a single column. The function works with strings, binary and compatible array columns. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat(df.s, df.d).alias('s')).collect() [Row(s=u'abcd123')] >>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c']) >>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect() [Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)] ] variable[sc] assign[=] name[SparkContext]._active_spark_context return[call[name[Column], parameter[call[name[sc]._jvm.functions.concat, parameter[call[name[_to_seq], parameter[name[sc], name[cols], name[_to_java_column]]]]]]]]
keyword[def] identifier[concat] (* identifier[cols] ): literal[string] identifier[sc] = identifier[SparkContext] . identifier[_active_spark_context] keyword[return] identifier[Column] ( identifier[sc] . identifier[_jvm] . identifier[functions] . identifier[concat] ( identifier[_to_seq] ( identifier[sc] , identifier[cols] , identifier[_to_java_column] )))
def concat(*cols): """ Concatenates multiple input columns together into a single column. The function works with strings, binary and compatible array columns. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat(df.s, df.d).alias('s')).collect() [Row(s=u'abcd123')] >>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c']) >>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect() [Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
def encode(cls, value): """ encode a floating point number to bytes in redis :param value: float :return: bytes """ try: if float(value) + 0 == value: return repr(value) except (TypeError, ValueError): pass raise InvalidValue('not a float')
def function[encode, parameter[cls, value]]: constant[ encode a floating point number to bytes in redis :param value: float :return: bytes ] <ast.Try object at 0x7da1b0bdb640> <ast.Raise object at 0x7da1b0a6dae0>
keyword[def] identifier[encode] ( identifier[cls] , identifier[value] ): literal[string] keyword[try] : keyword[if] identifier[float] ( identifier[value] )+ literal[int] == identifier[value] : keyword[return] identifier[repr] ( identifier[value] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[pass] keyword[raise] identifier[InvalidValue] ( literal[string] )
def encode(cls, value): """ encode a floating point number to bytes in redis :param value: float :return: bytes """ try: if float(value) + 0 == value: return repr(value) # depends on [control=['if'], data=['value']] # depends on [control=['try'], data=[]] except (TypeError, ValueError): pass # depends on [control=['except'], data=[]] raise InvalidValue('not a float')
def _build_response(self, resp): """Build internal Response object from given response.""" # rememberLogin # if self.method is 'LOGIN' and resp.json().get('code') == 200: # cookiesJar.save_cookies(resp, NCloudBot.username) self.response.content = resp.content self.response.status_code = resp.status_code self.response.headers = resp.headers
def function[_build_response, parameter[self, resp]]: constant[Build internal Response object from given response.] name[self].response.content assign[=] name[resp].content name[self].response.status_code assign[=] name[resp].status_code name[self].response.headers assign[=] name[resp].headers
keyword[def] identifier[_build_response] ( identifier[self] , identifier[resp] ): literal[string] identifier[self] . identifier[response] . identifier[content] = identifier[resp] . identifier[content] identifier[self] . identifier[response] . identifier[status_code] = identifier[resp] . identifier[status_code] identifier[self] . identifier[response] . identifier[headers] = identifier[resp] . identifier[headers]
def _build_response(self, resp): """Build internal Response object from given response.""" # rememberLogin # if self.method is 'LOGIN' and resp.json().get('code') == 200: # cookiesJar.save_cookies(resp, NCloudBot.username) self.response.content = resp.content self.response.status_code = resp.status_code self.response.headers = resp.headers
def depends(self, offset=0, count=25): '''Return all the currently dependent jobs''' return self.client('jobs', 'depends', self.name, offset, count)
def function[depends, parameter[self, offset, count]]: constant[Return all the currently dependent jobs] return[call[name[self].client, parameter[constant[jobs], constant[depends], name[self].name, name[offset], name[count]]]]
keyword[def] identifier[depends] ( identifier[self] , identifier[offset] = literal[int] , identifier[count] = literal[int] ): literal[string] keyword[return] identifier[self] . identifier[client] ( literal[string] , literal[string] , identifier[self] . identifier[name] , identifier[offset] , identifier[count] )
def depends(self, offset=0, count=25): """Return all the currently dependent jobs""" return self.client('jobs', 'depends', self.name, offset, count)
def read(message): """Convert a parsed protobuf message into a histogram.""" require_compatible_version(message.physt_compatible) # Currently the only implementation a_dict = _dict_from_v0342(message) return create_from_dict(a_dict, "Message")
def function[read, parameter[message]]: constant[Convert a parsed protobuf message into a histogram.] call[name[require_compatible_version], parameter[name[message].physt_compatible]] variable[a_dict] assign[=] call[name[_dict_from_v0342], parameter[name[message]]] return[call[name[create_from_dict], parameter[name[a_dict], constant[Message]]]]
keyword[def] identifier[read] ( identifier[message] ): literal[string] identifier[require_compatible_version] ( identifier[message] . identifier[physt_compatible] ) identifier[a_dict] = identifier[_dict_from_v0342] ( identifier[message] ) keyword[return] identifier[create_from_dict] ( identifier[a_dict] , literal[string] )
def read(message): """Convert a parsed protobuf message into a histogram.""" require_compatible_version(message.physt_compatible) # Currently the only implementation a_dict = _dict_from_v0342(message) return create_from_dict(a_dict, 'Message')
def setWorkingPerimeter(self, unPointCount): """Sets the Collision Bounds in the working copy.""" fn = self.function_table.setWorkingPerimeter pPointBuffer = HmdVector2_t() fn(byref(pPointBuffer), unPointCount) return pPointBuffer
def function[setWorkingPerimeter, parameter[self, unPointCount]]: constant[Sets the Collision Bounds in the working copy.] variable[fn] assign[=] name[self].function_table.setWorkingPerimeter variable[pPointBuffer] assign[=] call[name[HmdVector2_t], parameter[]] call[name[fn], parameter[call[name[byref], parameter[name[pPointBuffer]]], name[unPointCount]]] return[name[pPointBuffer]]
keyword[def] identifier[setWorkingPerimeter] ( identifier[self] , identifier[unPointCount] ): literal[string] identifier[fn] = identifier[self] . identifier[function_table] . identifier[setWorkingPerimeter] identifier[pPointBuffer] = identifier[HmdVector2_t] () identifier[fn] ( identifier[byref] ( identifier[pPointBuffer] ), identifier[unPointCount] ) keyword[return] identifier[pPointBuffer]
def setWorkingPerimeter(self, unPointCount): """Sets the Collision Bounds in the working copy.""" fn = self.function_table.setWorkingPerimeter pPointBuffer = HmdVector2_t() fn(byref(pPointBuffer), unPointCount) return pPointBuffer
def removeZeroLenPadding(str, blocksize=AES_blocksize): 'Remove Padding with zeroes + last byte equal to the number of padding bytes' try: pad_len = ord(str[-1]) # last byte contains number of padding bytes except TypeError: pad_len = str[-1] assert pad_len < blocksize, 'padding error' assert pad_len < len(str), 'padding error' return str[:-pad_len]
def function[removeZeroLenPadding, parameter[str, blocksize]]: constant[Remove Padding with zeroes + last byte equal to the number of padding bytes] <ast.Try object at 0x7da20e954490> assert[compare[name[pad_len] less[<] name[blocksize]]] assert[compare[name[pad_len] less[<] call[name[len], parameter[name[str]]]]] return[call[name[str]][<ast.Slice object at 0x7da20c76f160>]]
keyword[def] identifier[removeZeroLenPadding] ( identifier[str] , identifier[blocksize] = identifier[AES_blocksize] ): literal[string] keyword[try] : identifier[pad_len] = identifier[ord] ( identifier[str] [- literal[int] ]) keyword[except] identifier[TypeError] : identifier[pad_len] = identifier[str] [- literal[int] ] keyword[assert] identifier[pad_len] < identifier[blocksize] , literal[string] keyword[assert] identifier[pad_len] < identifier[len] ( identifier[str] ), literal[string] keyword[return] identifier[str] [:- identifier[pad_len] ]
def removeZeroLenPadding(str, blocksize=AES_blocksize): """Remove Padding with zeroes + last byte equal to the number of padding bytes""" try: pad_len = ord(str[-1]) # last byte contains number of padding bytes # depends on [control=['try'], data=[]] except TypeError: pad_len = str[-1] # depends on [control=['except'], data=[]] assert pad_len < blocksize, 'padding error' assert pad_len < len(str), 'padding error' return str[:-pad_len]
def discrete_index(self, indices): """get elements by discrete indices :param indices: list discrete indices :return: elements """ elements = [] for i in indices: elements.append(self[i]) return elements
def function[discrete_index, parameter[self, indices]]: constant[get elements by discrete indices :param indices: list discrete indices :return: elements ] variable[elements] assign[=] list[[]] for taget[name[i]] in starred[name[indices]] begin[:] call[name[elements].append, parameter[call[name[self]][name[i]]]] return[name[elements]]
keyword[def] identifier[discrete_index] ( identifier[self] , identifier[indices] ): literal[string] identifier[elements] =[] keyword[for] identifier[i] keyword[in] identifier[indices] : identifier[elements] . identifier[append] ( identifier[self] [ identifier[i] ]) keyword[return] identifier[elements]
def discrete_index(self, indices): """get elements by discrete indices :param indices: list discrete indices :return: elements """ elements = [] for i in indices: elements.append(self[i]) # depends on [control=['for'], data=['i']] return elements
def list_storage_class(self, **kwargs): """ list or watch objects of kind StorageClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_storage_class(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1StorageClassList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_storage_class_with_http_info(**kwargs) else: (data) = self.list_storage_class_with_http_info(**kwargs) return data
def function[list_storage_class, parameter[self]]: constant[ list or watch objects of kind StorageClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_storage_class(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1StorageClassList If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async_req]]] begin[:] return[call[name[self].list_storage_class_with_http_info, parameter[]]]
keyword[def] identifier[list_storage_class] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[list_storage_class_with_http_info] (** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[list_storage_class_with_http_info] (** identifier[kwargs] ) keyword[return] identifier[data]
def list_storage_class(self, **kwargs): """ list or watch objects of kind StorageClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_storage_class(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1StorageClassList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_storage_class_with_http_info(**kwargs) # depends on [control=['if'], data=[]] else: data = self.list_storage_class_with_http_info(**kwargs) return data
def precision_matrix(self): """ Returns the precision matrix of the distribution. Precision is defined as the inverse of the variance. This method returns the inverse matrix of the covariance. Examples -------- >>> import numpy as np >>> from pgmpy.factors.distributions import GaussianDistribution as GD >>> dis = GD(variables=['x1', 'x2', 'x3'], ... mean=[1, -3, 4], ... cov=[[4, 2, -2], ... [2, 5, -5], ... [-2, -5, 8]])) >>> dis.precision_matrix array([[ 0.3125 , -0.125 , 0. ], [-0.125 , 0.58333333, 0.33333333], [ 0. , 0.33333333, 0.33333333]]) """ if self._precision_matrix is None: self._precision_matrix = np.linalg.inv(self.covariance) return self._precision_matrix
def function[precision_matrix, parameter[self]]: constant[ Returns the precision matrix of the distribution. Precision is defined as the inverse of the variance. This method returns the inverse matrix of the covariance. Examples -------- >>> import numpy as np >>> from pgmpy.factors.distributions import GaussianDistribution as GD >>> dis = GD(variables=['x1', 'x2', 'x3'], ... mean=[1, -3, 4], ... cov=[[4, 2, -2], ... [2, 5, -5], ... [-2, -5, 8]])) >>> dis.precision_matrix array([[ 0.3125 , -0.125 , 0. ], [-0.125 , 0.58333333, 0.33333333], [ 0. , 0.33333333, 0.33333333]]) ] if compare[name[self]._precision_matrix is constant[None]] begin[:] name[self]._precision_matrix assign[=] call[name[np].linalg.inv, parameter[name[self].covariance]] return[name[self]._precision_matrix]
keyword[def] identifier[precision_matrix] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_precision_matrix] keyword[is] keyword[None] : identifier[self] . identifier[_precision_matrix] = identifier[np] . identifier[linalg] . identifier[inv] ( identifier[self] . identifier[covariance] ) keyword[return] identifier[self] . identifier[_precision_matrix]
def precision_matrix(self): """ Returns the precision matrix of the distribution. Precision is defined as the inverse of the variance. This method returns the inverse matrix of the covariance. Examples -------- >>> import numpy as np >>> from pgmpy.factors.distributions import GaussianDistribution as GD >>> dis = GD(variables=['x1', 'x2', 'x3'], ... mean=[1, -3, 4], ... cov=[[4, 2, -2], ... [2, 5, -5], ... [-2, -5, 8]])) >>> dis.precision_matrix array([[ 0.3125 , -0.125 , 0. ], [-0.125 , 0.58333333, 0.33333333], [ 0. , 0.33333333, 0.33333333]]) """ if self._precision_matrix is None: self._precision_matrix = np.linalg.inv(self.covariance) # depends on [control=['if'], data=[]] return self._precision_matrix
def per_from_id_in(s, flavors=chat_flavors+inline_flavors): """ :param s: a list or set of from id :param flavors: ``all`` or a list of flavors :return: a seeder function that returns the from id only if the from id is in ``s`` and message flavor is in ``flavors``. """ return _wrap_none(lambda msg: msg['from']['id'] if (flavors == 'all' or flavor(msg) in flavors) and msg['from']['id'] in s else None)
def function[per_from_id_in, parameter[s, flavors]]: constant[ :param s: a list or set of from id :param flavors: ``all`` or a list of flavors :return: a seeder function that returns the from id only if the from id is in ``s`` and message flavor is in ``flavors``. ] return[call[name[_wrap_none], parameter[<ast.Lambda object at 0x7da1b1baf5b0>]]]
keyword[def] identifier[per_from_id_in] ( identifier[s] , identifier[flavors] = identifier[chat_flavors] + identifier[inline_flavors] ): literal[string] keyword[return] identifier[_wrap_none] ( keyword[lambda] identifier[msg] : identifier[msg] [ literal[string] ][ literal[string] ] keyword[if] ( identifier[flavors] == literal[string] keyword[or] identifier[flavor] ( identifier[msg] ) keyword[in] identifier[flavors] ) keyword[and] identifier[msg] [ literal[string] ][ literal[string] ] keyword[in] identifier[s] keyword[else] keyword[None] )
def per_from_id_in(s, flavors=chat_flavors + inline_flavors): """ :param s: a list or set of from id :param flavors: ``all`` or a list of flavors :return: a seeder function that returns the from id only if the from id is in ``s`` and message flavor is in ``flavors``. """ return _wrap_none(lambda msg: msg['from']['id'] if (flavors == 'all' or flavor(msg) in flavors) and msg['from']['id'] in s else None)
def column_types_equal(a_coltype: TypeEngine, b_coltype: TypeEngine) -> bool: """ Checks that two SQLAlchemy column types are equal (by comparing ``str()`` versions of them). See http://stackoverflow.com/questions/34787794/sqlalchemy-column-type-comparison. IMPERFECT. """ # noqa return str(a_coltype) == str(b_coltype)
def function[column_types_equal, parameter[a_coltype, b_coltype]]: constant[ Checks that two SQLAlchemy column types are equal (by comparing ``str()`` versions of them). See http://stackoverflow.com/questions/34787794/sqlalchemy-column-type-comparison. IMPERFECT. ] return[compare[call[name[str], parameter[name[a_coltype]]] equal[==] call[name[str], parameter[name[b_coltype]]]]]
keyword[def] identifier[column_types_equal] ( identifier[a_coltype] : identifier[TypeEngine] , identifier[b_coltype] : identifier[TypeEngine] )-> identifier[bool] : literal[string] keyword[return] identifier[str] ( identifier[a_coltype] )== identifier[str] ( identifier[b_coltype] )
def column_types_equal(a_coltype: TypeEngine, b_coltype: TypeEngine) -> bool: """ Checks that two SQLAlchemy column types are equal (by comparing ``str()`` versions of them). See http://stackoverflow.com/questions/34787794/sqlalchemy-column-type-comparison. IMPERFECT. """ # noqa return str(a_coltype) == str(b_coltype)
def list_functions(self, prefix=None): """List extant cloud functions.""" return self.client.execute_command( 'list', {'parent': "projects/{}/locations/{}".format( self.session.get_default_project(), self.region)} ).get('functions', [])
def function[list_functions, parameter[self, prefix]]: constant[List extant cloud functions.] return[call[call[name[self].client.execute_command, parameter[constant[list], dictionary[[<ast.Constant object at 0x7da1b1c3c940>], [<ast.Call object at 0x7da1b1c3d090>]]]].get, parameter[constant[functions], list[[]]]]]
keyword[def] identifier[list_functions] ( identifier[self] , identifier[prefix] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[client] . identifier[execute_command] ( literal[string] , { literal[string] : literal[string] . identifier[format] ( identifier[self] . identifier[session] . identifier[get_default_project] (), identifier[self] . identifier[region] )} ). identifier[get] ( literal[string] ,[])
def list_functions(self, prefix=None): """List extant cloud functions.""" return self.client.execute_command('list', {'parent': 'projects/{}/locations/{}'.format(self.session.get_default_project(), self.region)}).get('functions', [])
def _filter_pb(field_or_unary): """Convert a specific protobuf filter to the generic filter type. Args: field_or_unary (Union[google.cloud.proto.firestore.v1beta1.\ query_pb2.StructuredQuery.FieldFilter, google.cloud.proto.\ firestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A field or unary filter to convert to a generic filter. Returns: google.cloud.firestore_v1beta1.types.\ StructuredQuery.Filter: A "generic" filter. Raises: ValueError: If ``field_or_unary`` is not a field or unary filter. """ if isinstance(field_or_unary, query_pb2.StructuredQuery.FieldFilter): return query_pb2.StructuredQuery.Filter(field_filter=field_or_unary) elif isinstance(field_or_unary, query_pb2.StructuredQuery.UnaryFilter): return query_pb2.StructuredQuery.Filter(unary_filter=field_or_unary) else: raise ValueError("Unexpected filter type", type(field_or_unary), field_or_unary)
def function[_filter_pb, parameter[field_or_unary]]: constant[Convert a specific protobuf filter to the generic filter type. Args: field_or_unary (Union[google.cloud.proto.firestore.v1beta1. query_pb2.StructuredQuery.FieldFilter, google.cloud.proto. firestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A field or unary filter to convert to a generic filter. Returns: google.cloud.firestore_v1beta1.types. StructuredQuery.Filter: A "generic" filter. Raises: ValueError: If ``field_or_unary`` is not a field or unary filter. ] if call[name[isinstance], parameter[name[field_or_unary], name[query_pb2].StructuredQuery.FieldFilter]] begin[:] return[call[name[query_pb2].StructuredQuery.Filter, parameter[]]]
keyword[def] identifier[_filter_pb] ( identifier[field_or_unary] ): literal[string] keyword[if] identifier[isinstance] ( identifier[field_or_unary] , identifier[query_pb2] . identifier[StructuredQuery] . identifier[FieldFilter] ): keyword[return] identifier[query_pb2] . identifier[StructuredQuery] . identifier[Filter] ( identifier[field_filter] = identifier[field_or_unary] ) keyword[elif] identifier[isinstance] ( identifier[field_or_unary] , identifier[query_pb2] . identifier[StructuredQuery] . identifier[UnaryFilter] ): keyword[return] identifier[query_pb2] . identifier[StructuredQuery] . identifier[Filter] ( identifier[unary_filter] = identifier[field_or_unary] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] , identifier[type] ( identifier[field_or_unary] ), identifier[field_or_unary] )
def _filter_pb(field_or_unary): """Convert a specific protobuf filter to the generic filter type. Args: field_or_unary (Union[google.cloud.proto.firestore.v1beta1. query_pb2.StructuredQuery.FieldFilter, google.cloud.proto. firestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A field or unary filter to convert to a generic filter. Returns: google.cloud.firestore_v1beta1.types. StructuredQuery.Filter: A "generic" filter. Raises: ValueError: If ``field_or_unary`` is not a field or unary filter. """ if isinstance(field_or_unary, query_pb2.StructuredQuery.FieldFilter): return query_pb2.StructuredQuery.Filter(field_filter=field_or_unary) # depends on [control=['if'], data=[]] elif isinstance(field_or_unary, query_pb2.StructuredQuery.UnaryFilter): return query_pb2.StructuredQuery.Filter(unary_filter=field_or_unary) # depends on [control=['if'], data=[]] else: raise ValueError('Unexpected filter type', type(field_or_unary), field_or_unary)