code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def is_subtype_of(self, type_name, context=None): """Whether this class is a subtype of the given type. :param type_name: The name of the type of check against. :type type_name: str :returns: True if this class is a subtype of the given type, False otherwise. :rtype: bool """ if self.qname() == type_name: return True for anc in self.ancestors(context=context): if anc.qname() == type_name: return True return False
def function[is_subtype_of, parameter[self, type_name, context]]: constant[Whether this class is a subtype of the given type. :param type_name: The name of the type of check against. :type type_name: str :returns: True if this class is a subtype of the given type, False otherwise. :rtype: bool ] if compare[call[name[self].qname, parameter[]] equal[==] name[type_name]] begin[:] return[constant[True]] for taget[name[anc]] in starred[call[name[self].ancestors, parameter[]]] begin[:] if compare[call[name[anc].qname, parameter[]] equal[==] name[type_name]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[is_subtype_of] ( identifier[self] , identifier[type_name] , identifier[context] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[qname] ()== identifier[type_name] : keyword[return] keyword[True] keyword[for] identifier[anc] keyword[in] identifier[self] . identifier[ancestors] ( identifier[context] = identifier[context] ): keyword[if] identifier[anc] . identifier[qname] ()== identifier[type_name] : keyword[return] keyword[True] keyword[return] keyword[False]
def is_subtype_of(self, type_name, context=None): """Whether this class is a subtype of the given type. :param type_name: The name of the type of check against. :type type_name: str :returns: True if this class is a subtype of the given type, False otherwise. :rtype: bool """ if self.qname() == type_name: return True # depends on [control=['if'], data=[]] for anc in self.ancestors(context=context): if anc.qname() == type_name: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['anc']] return False
def dump(self): """Dump the current state to debug level log.""" logging.debug('AccessPolicy:') map( logging.debug, [ ' {}'.format(s) for s in pprint.pformat(self.get_normalized_perm_list()).splitlines() ], )
def function[dump, parameter[self]]: constant[Dump the current state to debug level log.] call[name[logging].debug, parameter[constant[AccessPolicy:]]] call[name[map], parameter[name[logging].debug, <ast.ListComp object at 0x7da1b1adcd30>]]
keyword[def] identifier[dump] ( identifier[self] ): literal[string] identifier[logging] . identifier[debug] ( literal[string] ) identifier[map] ( identifier[logging] . identifier[debug] , [ literal[string] . identifier[format] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[pprint] . identifier[pformat] ( identifier[self] . identifier[get_normalized_perm_list] ()). identifier[splitlines] () ], )
def dump(self): """Dump the current state to debug level log.""" logging.debug('AccessPolicy:') map(logging.debug, [' {}'.format(s) for s in pprint.pformat(self.get_normalized_perm_list()).splitlines()])
def com_google_fonts_check_metadata_valid_copyright(font_metadata): """Copyright notices match canonical pattern in METADATA.pb""" import re string = font_metadata.copyright does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)', string) if does_match: yield PASS, "METADATA.pb copyright string is good" else: yield FAIL, ("METADATA.pb: Copyright notices should match" " a pattern similar to:" " 'Copyright 2017 The Familyname" " Project Authors (git url)'\n" "But instead we have got:" " '{}'").format(string)
def function[com_google_fonts_check_metadata_valid_copyright, parameter[font_metadata]]: constant[Copyright notices match canonical pattern in METADATA.pb] import module[re] variable[string] assign[=] name[font_metadata].copyright variable[does_match] assign[=] call[name[re].search, parameter[constant[Copyright [0-9]{4} The .* Project Authors \([^\@]*\)], name[string]]] if name[does_match] begin[:] <ast.Yield object at 0x7da1b12190c0>
keyword[def] identifier[com_google_fonts_check_metadata_valid_copyright] ( identifier[font_metadata] ): literal[string] keyword[import] identifier[re] identifier[string] = identifier[font_metadata] . identifier[copyright] identifier[does_match] = identifier[re] . identifier[search] ( literal[string] , identifier[string] ) keyword[if] identifier[does_match] : keyword[yield] identifier[PASS] , literal[string] keyword[else] : keyword[yield] identifier[FAIL] ,( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ). identifier[format] ( identifier[string] )
def com_google_fonts_check_metadata_valid_copyright(font_metadata): """Copyright notices match canonical pattern in METADATA.pb""" import re string = font_metadata.copyright does_match = re.search('Copyright [0-9]{4} The .* Project Authors \\([^\\@]*\\)', string) if does_match: yield (PASS, 'METADATA.pb copyright string is good') # depends on [control=['if'], data=[]] else: yield (FAIL, "METADATA.pb: Copyright notices should match a pattern similar to: 'Copyright 2017 The Familyname Project Authors (git url)'\nBut instead we have got: '{}'".format(string))
def evalsha(self, sha1, keys=None, args=None): """Evaluates a script cached on the server side by its SHA1 digest. Scripts are cached on the server side using the :meth:`~tredis.RedisClient.script_load` command. The command is otherwise identical to :meth:`~tredis.RedisClient.eval`. .. note:: **Time complexity**: Depends on the script that is executed. :param str sha1: The sha1 hash of the script to execute :param list keys: A list of keys to pass into the script :param list args: A list of args to pass into the script :return: mixed """ if not keys: keys = [] if not args: args = [] return self._execute([b'EVALSHA', sha1, str(len(keys))] + keys + args)
def function[evalsha, parameter[self, sha1, keys, args]]: constant[Evaluates a script cached on the server side by its SHA1 digest. Scripts are cached on the server side using the :meth:`~tredis.RedisClient.script_load` command. The command is otherwise identical to :meth:`~tredis.RedisClient.eval`. .. note:: **Time complexity**: Depends on the script that is executed. :param str sha1: The sha1 hash of the script to execute :param list keys: A list of keys to pass into the script :param list args: A list of args to pass into the script :return: mixed ] if <ast.UnaryOp object at 0x7da204621c60> begin[:] variable[keys] assign[=] list[[]] if <ast.UnaryOp object at 0x7da2046226b0> begin[:] variable[args] assign[=] list[[]] return[call[name[self]._execute, parameter[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da204620f70>, <ast.Name object at 0x7da2046215d0>, <ast.Call object at 0x7da204623e50>]] + name[keys]] + name[args]]]]]
keyword[def] identifier[evalsha] ( identifier[self] , identifier[sha1] , identifier[keys] = keyword[None] , identifier[args] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[keys] : identifier[keys] =[] keyword[if] keyword[not] identifier[args] : identifier[args] =[] keyword[return] identifier[self] . identifier[_execute] ([ literal[string] , identifier[sha1] , identifier[str] ( identifier[len] ( identifier[keys] ))]+ identifier[keys] + identifier[args] )
def evalsha(self, sha1, keys=None, args=None): """Evaluates a script cached on the server side by its SHA1 digest. Scripts are cached on the server side using the :meth:`~tredis.RedisClient.script_load` command. The command is otherwise identical to :meth:`~tredis.RedisClient.eval`. .. note:: **Time complexity**: Depends on the script that is executed. :param str sha1: The sha1 hash of the script to execute :param list keys: A list of keys to pass into the script :param list args: A list of args to pass into the script :return: mixed """ if not keys: keys = [] # depends on [control=['if'], data=[]] if not args: args = [] # depends on [control=['if'], data=[]] return self._execute([b'EVALSHA', sha1, str(len(keys))] + keys + args)
def attr_to_dict(obj, attr, dct): """ Add attribute to dict if it exists. :param dct: :param obj: object :param attr: object attribute name :return: dict """ if hasattr(obj, attr): dct[attr] = getattr(obj, attr) return dct
def function[attr_to_dict, parameter[obj, attr, dct]]: constant[ Add attribute to dict if it exists. :param dct: :param obj: object :param attr: object attribute name :return: dict ] if call[name[hasattr], parameter[name[obj], name[attr]]] begin[:] call[name[dct]][name[attr]] assign[=] call[name[getattr], parameter[name[obj], name[attr]]] return[name[dct]]
keyword[def] identifier[attr_to_dict] ( identifier[obj] , identifier[attr] , identifier[dct] ): literal[string] keyword[if] identifier[hasattr] ( identifier[obj] , identifier[attr] ): identifier[dct] [ identifier[attr] ]= identifier[getattr] ( identifier[obj] , identifier[attr] ) keyword[return] identifier[dct]
def attr_to_dict(obj, attr, dct): """ Add attribute to dict if it exists. :param dct: :param obj: object :param attr: object attribute name :return: dict """ if hasattr(obj, attr): dct[attr] = getattr(obj, attr) # depends on [control=['if'], data=[]] return dct
def round_figures(x, n): """Returns x rounded to n significant figures.""" return round(x, int(n - math.ceil(math.log10(abs(x)))))
def function[round_figures, parameter[x, n]]: constant[Returns x rounded to n significant figures.] return[call[name[round], parameter[name[x], call[name[int], parameter[binary_operation[name[n] - call[name[math].ceil, parameter[call[name[math].log10, parameter[call[name[abs], parameter[name[x]]]]]]]]]]]]]
keyword[def] identifier[round_figures] ( identifier[x] , identifier[n] ): literal[string] keyword[return] identifier[round] ( identifier[x] , identifier[int] ( identifier[n] - identifier[math] . identifier[ceil] ( identifier[math] . identifier[log10] ( identifier[abs] ( identifier[x] )))))
def round_figures(x, n): """Returns x rounded to n significant figures.""" return round(x, int(n - math.ceil(math.log10(abs(x)))))
def __send_message(self, proto, payload): # pylint: disable=no-self-use """Sends a message""" try: if not isinstance(payload, bytes): payload = payload.encode("utf-8") if not proto.connected: raise IOError("not connected") proto.sendMessage(payload) logger.debug("sent: %r", payload) except Exception as ex: logger.exception("Failed to send message") proto.reraise(ex)
def function[__send_message, parameter[self, proto, payload]]: constant[Sends a message] <ast.Try object at 0x7da20e960130>
keyword[def] identifier[__send_message] ( identifier[self] , identifier[proto] , identifier[payload] ): literal[string] keyword[try] : keyword[if] keyword[not] identifier[isinstance] ( identifier[payload] , identifier[bytes] ): identifier[payload] = identifier[payload] . identifier[encode] ( literal[string] ) keyword[if] keyword[not] identifier[proto] . identifier[connected] : keyword[raise] identifier[IOError] ( literal[string] ) identifier[proto] . identifier[sendMessage] ( identifier[payload] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[payload] ) keyword[except] identifier[Exception] keyword[as] identifier[ex] : identifier[logger] . identifier[exception] ( literal[string] ) identifier[proto] . identifier[reraise] ( identifier[ex] )
def __send_message(self, proto, payload): # pylint: disable=no-self-use 'Sends a message' try: if not isinstance(payload, bytes): payload = payload.encode('utf-8') # depends on [control=['if'], data=[]] if not proto.connected: raise IOError('not connected') # depends on [control=['if'], data=[]] proto.sendMessage(payload) logger.debug('sent: %r', payload) # depends on [control=['try'], data=[]] except Exception as ex: logger.exception('Failed to send message') proto.reraise(ex) # depends on [control=['except'], data=['ex']]
def get_user_last_submissions(self, limit=5, request=None): """ Get last submissions of a user """ if request is None: request = {} request.update({"username": self._user_manager.session_username()}) # Before, submissions were first sorted by submission date, then grouped # and then resorted by submission date before limiting. Actually, grouping # and pushing, keeping the max date, followed by result filtering is much more # efficient data = self._database.submissions.aggregate([ {"$match": request}, {"$group": {"_id": {"courseid": "$courseid", "taskid": "$taskid"}, "submitted_on": {"$max": "$submitted_on"}, "submissions": {"$push": { "_id": "$_id", "result": "$result", "status" : "$status", "courseid": "$courseid", "taskid": "$taskid", "submitted_on": "$submitted_on" }}, }}, {"$project": { "submitted_on": 1, "submissions": { # This could be replaced by $filter if mongo v3.2 is set as dependency "$setDifference": [ {"$map": { "input": "$submissions", "as": "submission", "in": { "$cond": [{"$eq": ["$submitted_on", "$$submission.submitted_on"]}, "$$submission", False] } }}, [False] ] } }}, {"$sort": {"submitted_on": pymongo.DESCENDING}}, {"$limit": limit} ]) return [item["submissions"][0] for item in data]
def function[get_user_last_submissions, parameter[self, limit, request]]: constant[ Get last submissions of a user ] if compare[name[request] is constant[None]] begin[:] variable[request] assign[=] dictionary[[], []] call[name[request].update, parameter[dictionary[[<ast.Constant object at 0x7da18f58c6d0>], [<ast.Call object at 0x7da18f58f640>]]]] variable[data] assign[=] call[name[self]._database.submissions.aggregate, parameter[list[[<ast.Dict object at 0x7da18f58c220>, <ast.Dict object at 0x7da18f58ee60>, <ast.Dict object at 0x7da18f58efb0>, <ast.Dict object at 0x7da18f58fc10>, <ast.Dict object at 0x7da18f58fd30>]]]] return[<ast.ListComp object at 0x7da18f58d930>]
keyword[def] identifier[get_user_last_submissions] ( identifier[self] , identifier[limit] = literal[int] , identifier[request] = keyword[None] ): literal[string] keyword[if] identifier[request] keyword[is] keyword[None] : identifier[request] ={} identifier[request] . identifier[update] ({ literal[string] : identifier[self] . identifier[_user_manager] . identifier[session_username] ()}) identifier[data] = identifier[self] . identifier[_database] . identifier[submissions] . identifier[aggregate] ([ { literal[string] : identifier[request] }, { literal[string] :{ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }, literal[string] :{ literal[string] : literal[string] }, literal[string] :{ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }}, }}, { literal[string] :{ literal[string] : literal[int] , literal[string] :{ literal[string] :[ { literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :{ literal[string] :[{ literal[string] :[ literal[string] , literal[string] ]}, literal[string] , keyword[False] ] } }}, [ keyword[False] ] ] } }}, { literal[string] :{ literal[string] : identifier[pymongo] . identifier[DESCENDING] }}, { literal[string] : identifier[limit] } ]) keyword[return] [ identifier[item] [ literal[string] ][ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[data] ]
def get_user_last_submissions(self, limit=5, request=None): """ Get last submissions of a user """ if request is None: request = {} # depends on [control=['if'], data=['request']] request.update({'username': self._user_manager.session_username()}) # Before, submissions were first sorted by submission date, then grouped # and then resorted by submission date before limiting. Actually, grouping # and pushing, keeping the max date, followed by result filtering is much more # efficient # This could be replaced by $filter if mongo v3.2 is set as dependency data = self._database.submissions.aggregate([{'$match': request}, {'$group': {'_id': {'courseid': '$courseid', 'taskid': '$taskid'}, 'submitted_on': {'$max': '$submitted_on'}, 'submissions': {'$push': {'_id': '$_id', 'result': '$result', 'status': '$status', 'courseid': '$courseid', 'taskid': '$taskid', 'submitted_on': '$submitted_on'}}}}, {'$project': {'submitted_on': 1, 'submissions': {'$setDifference': [{'$map': {'input': '$submissions', 'as': 'submission', 'in': {'$cond': [{'$eq': ['$submitted_on', '$$submission.submitted_on']}, '$$submission', False]}}}, [False]]}}}, {'$sort': {'submitted_on': pymongo.DESCENDING}}, {'$limit': limit}]) return [item['submissions'][0] for item in data]
def get_common_secret(self, server_public, client_public): """u = H(PAD(A) | PAD(B)) :param int server_public: :param int client_public: :rtype: int """ return self.hash(self.pad(client_public), self.pad(server_public))
def function[get_common_secret, parameter[self, server_public, client_public]]: constant[u = H(PAD(A) | PAD(B)) :param int server_public: :param int client_public: :rtype: int ] return[call[name[self].hash, parameter[call[name[self].pad, parameter[name[client_public]]], call[name[self].pad, parameter[name[server_public]]]]]]
keyword[def] identifier[get_common_secret] ( identifier[self] , identifier[server_public] , identifier[client_public] ): literal[string] keyword[return] identifier[self] . identifier[hash] ( identifier[self] . identifier[pad] ( identifier[client_public] ), identifier[self] . identifier[pad] ( identifier[server_public] ))
def get_common_secret(self, server_public, client_public): """u = H(PAD(A) | PAD(B)) :param int server_public: :param int client_public: :rtype: int """ return self.hash(self.pad(client_public), self.pad(server_public))
def _run(self, thread_n): """The thread function.""" try: logger.debug("{0!r}: entering thread #{1}" .format(self, thread_n)) resolver = self._make_resolver() while True: request = self.queue.get() if request is None: break method, args = request logger.debug(" calling {0!r}.{1}{2!r}" .format(resolver, method, args)) getattr(resolver, method)(*args) # pylint: disable=W0142 self.queue.task_done() logger.debug("{0!r}: leaving thread #{1}" .format(self, thread_n)) finally: self.threads.remove(threading.currentThread())
def function[_run, parameter[self, thread_n]]: constant[The thread function.] <ast.Try object at 0x7da1b2347100>
keyword[def] identifier[_run] ( identifier[self] , identifier[thread_n] ): literal[string] keyword[try] : identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] , identifier[thread_n] )) identifier[resolver] = identifier[self] . identifier[_make_resolver] () keyword[while] keyword[True] : identifier[request] = identifier[self] . identifier[queue] . identifier[get] () keyword[if] identifier[request] keyword[is] keyword[None] : keyword[break] identifier[method] , identifier[args] = identifier[request] identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[resolver] , identifier[method] , identifier[args] )) identifier[getattr] ( identifier[resolver] , identifier[method] )(* identifier[args] ) identifier[self] . identifier[queue] . identifier[task_done] () identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] , identifier[thread_n] )) keyword[finally] : identifier[self] . identifier[threads] . identifier[remove] ( identifier[threading] . identifier[currentThread] ())
def _run(self, thread_n): """The thread function.""" try: logger.debug('{0!r}: entering thread #{1}'.format(self, thread_n)) resolver = self._make_resolver() while True: request = self.queue.get() if request is None: break # depends on [control=['if'], data=[]] (method, args) = request logger.debug(' calling {0!r}.{1}{2!r}'.format(resolver, method, args)) getattr(resolver, method)(*args) # pylint: disable=W0142 self.queue.task_done() # depends on [control=['while'], data=[]] logger.debug('{0!r}: leaving thread #{1}'.format(self, thread_n)) # depends on [control=['try'], data=[]] finally: self.threads.remove(threading.currentThread())
def process_IN_MOVED_FROM(self, raw_event): """ Map the cookie with the source path (+ date for cleaning). """ watch_ = self._watch_manager.get_watch(raw_event.wd) path_ = watch_.path src_path = os.path.normpath(os.path.join(path_, raw_event.name)) self._mv_cookie[raw_event.cookie] = (src_path, datetime.now()) return self.process_default(raw_event, {'cookie': raw_event.cookie})
def function[process_IN_MOVED_FROM, parameter[self, raw_event]]: constant[ Map the cookie with the source path (+ date for cleaning). ] variable[watch_] assign[=] call[name[self]._watch_manager.get_watch, parameter[name[raw_event].wd]] variable[path_] assign[=] name[watch_].path variable[src_path] assign[=] call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[name[path_], name[raw_event].name]]]] call[name[self]._mv_cookie][name[raw_event].cookie] assign[=] tuple[[<ast.Name object at 0x7da18f58c4c0>, <ast.Call object at 0x7da18f58d1e0>]] return[call[name[self].process_default, parameter[name[raw_event], dictionary[[<ast.Constant object at 0x7da18f58e170>], [<ast.Attribute object at 0x7da18f58e7d0>]]]]]
keyword[def] identifier[process_IN_MOVED_FROM] ( identifier[self] , identifier[raw_event] ): literal[string] identifier[watch_] = identifier[self] . identifier[_watch_manager] . identifier[get_watch] ( identifier[raw_event] . identifier[wd] ) identifier[path_] = identifier[watch_] . identifier[path] identifier[src_path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path_] , identifier[raw_event] . identifier[name] )) identifier[self] . identifier[_mv_cookie] [ identifier[raw_event] . identifier[cookie] ]=( identifier[src_path] , identifier[datetime] . identifier[now] ()) keyword[return] identifier[self] . identifier[process_default] ( identifier[raw_event] ,{ literal[string] : identifier[raw_event] . identifier[cookie] })
def process_IN_MOVED_FROM(self, raw_event): """ Map the cookie with the source path (+ date for cleaning). """ watch_ = self._watch_manager.get_watch(raw_event.wd) path_ = watch_.path src_path = os.path.normpath(os.path.join(path_, raw_event.name)) self._mv_cookie[raw_event.cookie] = (src_path, datetime.now()) return self.process_default(raw_event, {'cookie': raw_event.cookie})
def get_daemon_stats(self, details=False): # pylint: disable=unused-argument """Get state of modules and create a scheme for stats data of daemon This may be overridden in subclasses (and it is...) :return: A dict with the following structure :: { 'modules': { 'internal': {'name': "MYMODULE1", 'state': 'ok'}, 'external': {'name': "MYMODULE2", 'state': 'stopped'}, }, And some extra information, see the source code below... } These information are completed with the data provided by the get_id function which provides the daemon identification :rtype: dict """ res = self.get_id() res.update({ "program_start": self.program_start, "spare": self.spare, 'counters': {}, 'metrics': [], 'modules': { 'internal': {}, 'external': {} } }) # Modules information modules = res['modules'] counters = res['counters'] counters['modules'] = len(self.modules_manager.instances) # first get data for all internal modules for instance in self.modules_manager.get_internal_instances(): state = {True: 'ok', False: 'stopped'}[(instance not in self.modules_manager.to_restart)] modules['internal'][instance.name] = {'name': instance.name, 'state': state} # Same but for external ones for instance in self.modules_manager.get_external_instances(): state = {True: 'ok', False: 'stopped'}[(instance not in self.modules_manager.to_restart)] modules['internal'][instance.name] = {'name': instance.name, 'state': state} return res
def function[get_daemon_stats, parameter[self, details]]: constant[Get state of modules and create a scheme for stats data of daemon This may be overridden in subclasses (and it is...) :return: A dict with the following structure :: { 'modules': { 'internal': {'name': "MYMODULE1", 'state': 'ok'}, 'external': {'name': "MYMODULE2", 'state': 'stopped'}, }, And some extra information, see the source code below... } These information are completed with the data provided by the get_id function which provides the daemon identification :rtype: dict ] variable[res] assign[=] call[name[self].get_id, parameter[]] call[name[res].update, parameter[dictionary[[<ast.Constant object at 0x7da1b26ae500>, <ast.Constant object at 0x7da1b26ac580>, <ast.Constant object at 0x7da1b26ac130>, <ast.Constant object at 0x7da1b26ae590>, <ast.Constant object at 0x7da1b26ae770>], [<ast.Attribute object at 0x7da1b26afbb0>, <ast.Attribute object at 0x7da1b26ae7d0>, <ast.Dict object at 0x7da1b26ada50>, <ast.List object at 0x7da212d41a80>, <ast.Dict object at 0x7da207f01360>]]]] variable[modules] assign[=] call[name[res]][constant[modules]] variable[counters] assign[=] call[name[res]][constant[counters]] call[name[counters]][constant[modules]] assign[=] call[name[len], parameter[name[self].modules_manager.instances]] for taget[name[instance]] in starred[call[name[self].modules_manager.get_internal_instances, parameter[]]] begin[:] variable[state] assign[=] call[dictionary[[<ast.Constant object at 0x7da207f02f50>, <ast.Constant object at 0x7da207f037c0>], [<ast.Constant object at 0x7da207f01930>, <ast.Constant object at 0x7da207f00e50>]]][compare[name[instance] <ast.NotIn object at 0x7da2590d7190> name[self].modules_manager.to_restart]] call[call[name[modules]][constant[internal]]][name[instance].name] assign[=] dictionary[[<ast.Constant object at 0x7da207f03130>, <ast.Constant object at 0x7da207f02a40>], [<ast.Attribute object at 0x7da207f02c50>, <ast.Name object at 0x7da207f01660>]] for taget[name[instance]] in starred[call[name[self].modules_manager.get_external_instances, parameter[]]] begin[:] variable[state] assign[=] call[dictionary[[<ast.Constant object at 0x7da204565060>, <ast.Constant object at 0x7da204566bc0>], [<ast.Constant object at 0x7da204567e50>, <ast.Constant object at 0x7da204565c60>]]][compare[name[instance] <ast.NotIn object at 0x7da2590d7190> name[self].modules_manager.to_restart]] call[call[name[modules]][constant[internal]]][name[instance].name] assign[=] dictionary[[<ast.Constant object at 0x7da204564f40>, <ast.Constant object at 0x7da2045659c0>], [<ast.Attribute object at 0x7da204565210>, <ast.Name object at 0x7da204566590>]] return[name[res]]
keyword[def] identifier[get_daemon_stats] ( identifier[self] , identifier[details] = keyword[False] ): literal[string] identifier[res] = identifier[self] . identifier[get_id] () identifier[res] . identifier[update] ({ literal[string] : identifier[self] . identifier[program_start] , literal[string] : identifier[self] . identifier[spare] , literal[string] :{}, literal[string] :[], literal[string] :{ literal[string] :{}, literal[string] :{} } }) identifier[modules] = identifier[res] [ literal[string] ] identifier[counters] = identifier[res] [ literal[string] ] identifier[counters] [ literal[string] ]= identifier[len] ( identifier[self] . identifier[modules_manager] . identifier[instances] ) keyword[for] identifier[instance] keyword[in] identifier[self] . identifier[modules_manager] . identifier[get_internal_instances] (): identifier[state] ={ keyword[True] : literal[string] , keyword[False] : literal[string] }[( identifier[instance] keyword[not] keyword[in] identifier[self] . identifier[modules_manager] . identifier[to_restart] )] identifier[modules] [ literal[string] ][ identifier[instance] . identifier[name] ]={ literal[string] : identifier[instance] . identifier[name] , literal[string] : identifier[state] } keyword[for] identifier[instance] keyword[in] identifier[self] . identifier[modules_manager] . identifier[get_external_instances] (): identifier[state] ={ keyword[True] : literal[string] , keyword[False] : literal[string] }[( identifier[instance] keyword[not] keyword[in] identifier[self] . identifier[modules_manager] . identifier[to_restart] )] identifier[modules] [ literal[string] ][ identifier[instance] . identifier[name] ]={ literal[string] : identifier[instance] . identifier[name] , literal[string] : identifier[state] } keyword[return] identifier[res]
def get_daemon_stats(self, details=False): # pylint: disable=unused-argument 'Get state of modules and create a scheme for stats data of daemon\n This may be overridden in subclasses (and it is...)\n\n :return: A dict with the following structure\n ::\n {\n \'modules\': {\n \'internal\': {\'name\': "MYMODULE1", \'state\': \'ok\'},\n \'external\': {\'name\': "MYMODULE2", \'state\': \'stopped\'},\n },\n And some extra information, see the source code below...\n }\n\n These information are completed with the data provided by the get_id function\n which provides the daemon identification\n\n :rtype: dict\n ' res = self.get_id() res.update({'program_start': self.program_start, 'spare': self.spare, 'counters': {}, 'metrics': [], 'modules': {'internal': {}, 'external': {}}}) # Modules information modules = res['modules'] counters = res['counters'] counters['modules'] = len(self.modules_manager.instances) # first get data for all internal modules for instance in self.modules_manager.get_internal_instances(): state = {True: 'ok', False: 'stopped'}[instance not in self.modules_manager.to_restart] modules['internal'][instance.name] = {'name': instance.name, 'state': state} # depends on [control=['for'], data=['instance']] # Same but for external ones for instance in self.modules_manager.get_external_instances(): state = {True: 'ok', False: 'stopped'}[instance not in self.modules_manager.to_restart] modules['internal'][instance.name] = {'name': instance.name, 'state': state} # depends on [control=['for'], data=['instance']] return res
def setting_address(key): """Computes the radix address for the given setting key. Keys are broken into four parts, based on the dots in the string. For example, the key `a.b.c` address is computed based on `a`, `b`, `c` and the empty string. A longer key, for example `a.b.c.d.e`, is still broken into four parts, but the remaining pieces are in the last part: `a`, `b`, `c` and `d.e`. Each of these peices has a short hash computed (the first 16 characters of its SHA256 hash in hex), and is joined into a single address, with the config namespace (`000000`) added at the beginning. Args: key (str): the setting key Returns: str: the computed address """ # split the key into 4 parts, maximum key_parts = key.split('.', maxsplit=_MAX_KEY_PARTS - 1) # compute the short hash of each part addr_parts = [_short_hash(x.encode()) for x in key_parts] # pad the parts with the empty hash, if needed addr_parts.extend([_EMPTY_PART] * (_MAX_KEY_PARTS - len(addr_parts))) return CONFIG_STATE_NAMESPACE + ''.join(addr_parts)
def function[setting_address, parameter[key]]: constant[Computes the radix address for the given setting key. Keys are broken into four parts, based on the dots in the string. For example, the key `a.b.c` address is computed based on `a`, `b`, `c` and the empty string. A longer key, for example `a.b.c.d.e`, is still broken into four parts, but the remaining pieces are in the last part: `a`, `b`, `c` and `d.e`. Each of these peices has a short hash computed (the first 16 characters of its SHA256 hash in hex), and is joined into a single address, with the config namespace (`000000`) added at the beginning. Args: key (str): the setting key Returns: str: the computed address ] variable[key_parts] assign[=] call[name[key].split, parameter[constant[.]]] variable[addr_parts] assign[=] <ast.ListComp object at 0x7da18f00d240> call[name[addr_parts].extend, parameter[binary_operation[list[[<ast.Name object at 0x7da18f00f4c0>]] * binary_operation[name[_MAX_KEY_PARTS] - call[name[len], parameter[name[addr_parts]]]]]]] return[binary_operation[name[CONFIG_STATE_NAMESPACE] + call[constant[].join, parameter[name[addr_parts]]]]]
keyword[def] identifier[setting_address] ( identifier[key] ): literal[string] identifier[key_parts] = identifier[key] . identifier[split] ( literal[string] , identifier[maxsplit] = identifier[_MAX_KEY_PARTS] - literal[int] ) identifier[addr_parts] =[ identifier[_short_hash] ( identifier[x] . identifier[encode] ()) keyword[for] identifier[x] keyword[in] identifier[key_parts] ] identifier[addr_parts] . identifier[extend] ([ identifier[_EMPTY_PART] ]*( identifier[_MAX_KEY_PARTS] - identifier[len] ( identifier[addr_parts] ))) keyword[return] identifier[CONFIG_STATE_NAMESPACE] + literal[string] . identifier[join] ( identifier[addr_parts] )
def setting_address(key): """Computes the radix address for the given setting key. Keys are broken into four parts, based on the dots in the string. For example, the key `a.b.c` address is computed based on `a`, `b`, `c` and the empty string. A longer key, for example `a.b.c.d.e`, is still broken into four parts, but the remaining pieces are in the last part: `a`, `b`, `c` and `d.e`. Each of these peices has a short hash computed (the first 16 characters of its SHA256 hash in hex), and is joined into a single address, with the config namespace (`000000`) added at the beginning. Args: key (str): the setting key Returns: str: the computed address """ # split the key into 4 parts, maximum key_parts = key.split('.', maxsplit=_MAX_KEY_PARTS - 1) # compute the short hash of each part addr_parts = [_short_hash(x.encode()) for x in key_parts] # pad the parts with the empty hash, if needed addr_parts.extend([_EMPTY_PART] * (_MAX_KEY_PARTS - len(addr_parts))) return CONFIG_STATE_NAMESPACE + ''.join(addr_parts)
def set_rest_notification(self, hit_type, url, event_types=None): """ Performs a SetHITTypeNotification operation to set REST notification for a specified HIT type """ return self._set_notification(hit_type, 'REST', url, event_types)
def function[set_rest_notification, parameter[self, hit_type, url, event_types]]: constant[ Performs a SetHITTypeNotification operation to set REST notification for a specified HIT type ] return[call[name[self]._set_notification, parameter[name[hit_type], constant[REST], name[url], name[event_types]]]]
keyword[def] identifier[set_rest_notification] ( identifier[self] , identifier[hit_type] , identifier[url] , identifier[event_types] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[_set_notification] ( identifier[hit_type] , literal[string] , identifier[url] , identifier[event_types] )
def set_rest_notification(self, hit_type, url, event_types=None): """ Performs a SetHITTypeNotification operation to set REST notification for a specified HIT type """ return self._set_notification(hit_type, 'REST', url, event_types)
def top(self, body_output, features): """Computes logits given body output and features. Args: body_output: dict of str to Tensor, comprising one key-value pair for each target. Each value denotes the target's pre-logit activations. Alternatively, it may be a single Tensor denoting the pre-logits for that target. features: dict of str to Tensor. Typically it is the preprocessed data batch after Problem's preprocess_example(). Returns: logits: dict of str to Tensor, denoting each logits for each target; or a single Tensor denoting the logits for that target. When targets are generated at training time: logits == { "self_generated_targets": <generated targets tensor> "logits": <original logits Tensor or dict> } """ if isinstance(body_output, dict): logits = {} for k, v in six.iteritems(body_output): # TODO(aidangomez): share variables here? with tf.variable_scope(k) as top_vs: self._add_variable_scope("top_%s" % k, top_vs) logits[k] = self._top_single(v, k, features) return logits else: return self._top_single(body_output, "targets", features)
def function[top, parameter[self, body_output, features]]: constant[Computes logits given body output and features. Args: body_output: dict of str to Tensor, comprising one key-value pair for each target. Each value denotes the target's pre-logit activations. Alternatively, it may be a single Tensor denoting the pre-logits for that target. features: dict of str to Tensor. Typically it is the preprocessed data batch after Problem's preprocess_example(). Returns: logits: dict of str to Tensor, denoting each logits for each target; or a single Tensor denoting the logits for that target. When targets are generated at training time: logits == { "self_generated_targets": <generated targets tensor> "logits": <original logits Tensor or dict> } ] if call[name[isinstance], parameter[name[body_output], name[dict]]] begin[:] variable[logits] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da204620250>, <ast.Name object at 0x7da204621ff0>]]] in starred[call[name[six].iteritems, parameter[name[body_output]]]] begin[:] with call[name[tf].variable_scope, parameter[name[k]]] begin[:] call[name[self]._add_variable_scope, parameter[binary_operation[constant[top_%s] <ast.Mod object at 0x7da2590d6920> name[k]], name[top_vs]]] call[name[logits]][name[k]] assign[=] call[name[self]._top_single, parameter[name[v], name[k], name[features]]] return[name[logits]]
keyword[def] identifier[top] ( identifier[self] , identifier[body_output] , identifier[features] ): literal[string] keyword[if] identifier[isinstance] ( identifier[body_output] , identifier[dict] ): identifier[logits] ={} keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[body_output] ): keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[k] ) keyword[as] identifier[top_vs] : identifier[self] . identifier[_add_variable_scope] ( literal[string] % identifier[k] , identifier[top_vs] ) identifier[logits] [ identifier[k] ]= identifier[self] . identifier[_top_single] ( identifier[v] , identifier[k] , identifier[features] ) keyword[return] identifier[logits] keyword[else] : keyword[return] identifier[self] . identifier[_top_single] ( identifier[body_output] , literal[string] , identifier[features] )
def top(self, body_output, features): """Computes logits given body output and features. Args: body_output: dict of str to Tensor, comprising one key-value pair for each target. Each value denotes the target's pre-logit activations. Alternatively, it may be a single Tensor denoting the pre-logits for that target. features: dict of str to Tensor. Typically it is the preprocessed data batch after Problem's preprocess_example(). Returns: logits: dict of str to Tensor, denoting each logits for each target; or a single Tensor denoting the logits for that target. When targets are generated at training time: logits == { "self_generated_targets": <generated targets tensor> "logits": <original logits Tensor or dict> } """ if isinstance(body_output, dict): logits = {} for (k, v) in six.iteritems(body_output): # TODO(aidangomez): share variables here? with tf.variable_scope(k) as top_vs: self._add_variable_scope('top_%s' % k, top_vs) logits[k] = self._top_single(v, k, features) # depends on [control=['with'], data=['top_vs']] # depends on [control=['for'], data=[]] return logits # depends on [control=['if'], data=[]] else: return self._top_single(body_output, 'targets', features)
def _construct_where_to_match(where_block): """Transform a Filter block into a MATCH query string.""" if where_block.predicate == TrueLiteral: raise AssertionError(u'Received WHERE block with TrueLiteral predicate: {}' .format(where_block)) return u'WHERE ' + where_block.predicate.to_match()
def function[_construct_where_to_match, parameter[where_block]]: constant[Transform a Filter block into a MATCH query string.] if compare[name[where_block].predicate equal[==] name[TrueLiteral]] begin[:] <ast.Raise object at 0x7da1b16f9de0> return[binary_operation[constant[WHERE ] + call[name[where_block].predicate.to_match, parameter[]]]]
keyword[def] identifier[_construct_where_to_match] ( identifier[where_block] ): literal[string] keyword[if] identifier[where_block] . identifier[predicate] == identifier[TrueLiteral] : keyword[raise] identifier[AssertionError] ( literal[string] . identifier[format] ( identifier[where_block] )) keyword[return] literal[string] + identifier[where_block] . identifier[predicate] . identifier[to_match] ()
def _construct_where_to_match(where_block): """Transform a Filter block into a MATCH query string.""" if where_block.predicate == TrueLiteral: raise AssertionError(u'Received WHERE block with TrueLiteral predicate: {}'.format(where_block)) # depends on [control=['if'], data=[]] return u'WHERE ' + where_block.predicate.to_match()
def handle_unsuback(self): """Handle incoming UNSUBACK packet.""" self.logger.info("UNSUBACK received") ret, mid = self.in_packet.read_uint16() if ret != NC.ERR_SUCCESS: return ret evt = event.EventUnsuback(mid) self.push_event(evt) return NC.ERR_SUCCESS
def function[handle_unsuback, parameter[self]]: constant[Handle incoming UNSUBACK packet.] call[name[self].logger.info, parameter[constant[UNSUBACK received]]] <ast.Tuple object at 0x7da1b10e47f0> assign[=] call[name[self].in_packet.read_uint16, parameter[]] if compare[name[ret] not_equal[!=] name[NC].ERR_SUCCESS] begin[:] return[name[ret]] variable[evt] assign[=] call[name[event].EventUnsuback, parameter[name[mid]]] call[name[self].push_event, parameter[name[evt]]] return[name[NC].ERR_SUCCESS]
keyword[def] identifier[handle_unsuback] ( identifier[self] ): literal[string] identifier[self] . identifier[logger] . identifier[info] ( literal[string] ) identifier[ret] , identifier[mid] = identifier[self] . identifier[in_packet] . identifier[read_uint16] () keyword[if] identifier[ret] != identifier[NC] . identifier[ERR_SUCCESS] : keyword[return] identifier[ret] identifier[evt] = identifier[event] . identifier[EventUnsuback] ( identifier[mid] ) identifier[self] . identifier[push_event] ( identifier[evt] ) keyword[return] identifier[NC] . identifier[ERR_SUCCESS]
def handle_unsuback(self): """Handle incoming UNSUBACK packet.""" self.logger.info('UNSUBACK received') (ret, mid) = self.in_packet.read_uint16() if ret != NC.ERR_SUCCESS: return ret # depends on [control=['if'], data=['ret']] evt = event.EventUnsuback(mid) self.push_event(evt) return NC.ERR_SUCCESS
def dimensions(filenames): """given a filename or list of filenames, return a tuple or sequence of tuples (x, y, filename)""" single = type(filenames) is str if single: filenames = [filenames] dims = get_dimensions(filenames) if single: dims = dims[0] return dims
def function[dimensions, parameter[filenames]]: constant[given a filename or list of filenames, return a tuple or sequence of tuples (x, y, filename)] variable[single] assign[=] compare[call[name[type], parameter[name[filenames]]] is name[str]] if name[single] begin[:] variable[filenames] assign[=] list[[<ast.Name object at 0x7da18fe93d00>]] variable[dims] assign[=] call[name[get_dimensions], parameter[name[filenames]]] if name[single] begin[:] variable[dims] assign[=] call[name[dims]][constant[0]] return[name[dims]]
keyword[def] identifier[dimensions] ( identifier[filenames] ): literal[string] identifier[single] = identifier[type] ( identifier[filenames] ) keyword[is] identifier[str] keyword[if] identifier[single] : identifier[filenames] =[ identifier[filenames] ] identifier[dims] = identifier[get_dimensions] ( identifier[filenames] ) keyword[if] identifier[single] : identifier[dims] = identifier[dims] [ literal[int] ] keyword[return] identifier[dims]
def dimensions(filenames): """given a filename or list of filenames, return a tuple or sequence of tuples (x, y, filename)""" single = type(filenames) is str if single: filenames = [filenames] # depends on [control=['if'], data=[]] dims = get_dimensions(filenames) if single: dims = dims[0] # depends on [control=['if'], data=[]] return dims
def constant(constanttype): """ Get A Constant """ constanttype = constanttype.lower() if constanttype == 'pi': return math.pi elif constanttype == 'e': return math.e elif constanttype == 'tau': return math.tau elif constanttype == 'inf': return math.inf elif constanttype == 'nan': return math.nan elif constanttype in ['phi', 'golden']: return (1 + 5**0.5) / 2
def function[constant, parameter[constanttype]]: constant[ Get A Constant ] variable[constanttype] assign[=] call[name[constanttype].lower, parameter[]] if compare[name[constanttype] equal[==] constant[pi]] begin[:] return[name[math].pi]
keyword[def] identifier[constant] ( identifier[constanttype] ): literal[string] identifier[constanttype] = identifier[constanttype] . identifier[lower] () keyword[if] identifier[constanttype] == literal[string] : keyword[return] identifier[math] . identifier[pi] keyword[elif] identifier[constanttype] == literal[string] : keyword[return] identifier[math] . identifier[e] keyword[elif] identifier[constanttype] == literal[string] : keyword[return] identifier[math] . identifier[tau] keyword[elif] identifier[constanttype] == literal[string] : keyword[return] identifier[math] . identifier[inf] keyword[elif] identifier[constanttype] == literal[string] : keyword[return] identifier[math] . identifier[nan] keyword[elif] identifier[constanttype] keyword[in] [ literal[string] , literal[string] ]: keyword[return] ( literal[int] + literal[int] ** literal[int] )/ literal[int]
def constant(constanttype): """ Get A Constant """ constanttype = constanttype.lower() if constanttype == 'pi': return math.pi # depends on [control=['if'], data=[]] elif constanttype == 'e': return math.e # depends on [control=['if'], data=[]] elif constanttype == 'tau': return math.tau # depends on [control=['if'], data=[]] elif constanttype == 'inf': return math.inf # depends on [control=['if'], data=[]] elif constanttype == 'nan': return math.nan # depends on [control=['if'], data=[]] elif constanttype in ['phi', 'golden']: return (1 + 5 ** 0.5) / 2 # depends on [control=['if'], data=[]]
def serialize_search(self, pid_fetcher, search_result, links=None, item_links_factory=None, **kwargs): """Serialize a search result. :param pid_fetcher: Persistent identifier fetcher. :param search_result: Elasticsearch search result. :param links: Dictionary of links to add to response. """ return json.dumps(dict( hits=dict( hits=[self.transform_search_hit( pid_fetcher(hit['_id'], hit['_source']), hit, links_factory=item_links_factory, **kwargs ) for hit in search_result['hits']['hits']], total=search_result['hits']['total'], ), links=links or {}, aggregations=search_result.get('aggregations', dict()), ), **self._format_args())
def function[serialize_search, parameter[self, pid_fetcher, search_result, links, item_links_factory]]: constant[Serialize a search result. :param pid_fetcher: Persistent identifier fetcher. :param search_result: Elasticsearch search result. :param links: Dictionary of links to add to response. ] return[call[name[json].dumps, parameter[call[name[dict], parameter[]]]]]
keyword[def] identifier[serialize_search] ( identifier[self] , identifier[pid_fetcher] , identifier[search_result] , identifier[links] = keyword[None] , identifier[item_links_factory] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[json] . identifier[dumps] ( identifier[dict] ( identifier[hits] = identifier[dict] ( identifier[hits] =[ identifier[self] . identifier[transform_search_hit] ( identifier[pid_fetcher] ( identifier[hit] [ literal[string] ], identifier[hit] [ literal[string] ]), identifier[hit] , identifier[links_factory] = identifier[item_links_factory] , ** identifier[kwargs] ) keyword[for] identifier[hit] keyword[in] identifier[search_result] [ literal[string] ][ literal[string] ]], identifier[total] = identifier[search_result] [ literal[string] ][ literal[string] ], ), identifier[links] = identifier[links] keyword[or] {}, identifier[aggregations] = identifier[search_result] . identifier[get] ( literal[string] , identifier[dict] ()), ),** identifier[self] . identifier[_format_args] ())
def serialize_search(self, pid_fetcher, search_result, links=None, item_links_factory=None, **kwargs): """Serialize a search result. :param pid_fetcher: Persistent identifier fetcher. :param search_result: Elasticsearch search result. :param links: Dictionary of links to add to response. """ return json.dumps(dict(hits=dict(hits=[self.transform_search_hit(pid_fetcher(hit['_id'], hit['_source']), hit, links_factory=item_links_factory, **kwargs) for hit in search_result['hits']['hits']], total=search_result['hits']['total']), links=links or {}, aggregations=search_result.get('aggregations', dict())), **self._format_args())
def overtimes(self): """ Returns an ``int`` of the number of overtimes that were played during the game and 0 if the game finished at the end of regulation time. """ if self._overtimes == '' or self._overtimes is None: return 0 if self._overtimes.lower() == 'ot': return 1 num_overtimes = re.findall(r'\d+', self._overtimes) try: return int(num_overtimes[0]) except (ValueError, IndexError): return 0
def function[overtimes, parameter[self]]: constant[ Returns an ``int`` of the number of overtimes that were played during the game and 0 if the game finished at the end of regulation time. ] if <ast.BoolOp object at 0x7da1b0b37ee0> begin[:] return[constant[0]] if compare[call[name[self]._overtimes.lower, parameter[]] equal[==] constant[ot]] begin[:] return[constant[1]] variable[num_overtimes] assign[=] call[name[re].findall, parameter[constant[\d+], name[self]._overtimes]] <ast.Try object at 0x7da1b0b35330>
keyword[def] identifier[overtimes] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_overtimes] == literal[string] keyword[or] identifier[self] . identifier[_overtimes] keyword[is] keyword[None] : keyword[return] literal[int] keyword[if] identifier[self] . identifier[_overtimes] . identifier[lower] ()== literal[string] : keyword[return] literal[int] identifier[num_overtimes] = identifier[re] . identifier[findall] ( literal[string] , identifier[self] . identifier[_overtimes] ) keyword[try] : keyword[return] identifier[int] ( identifier[num_overtimes] [ literal[int] ]) keyword[except] ( identifier[ValueError] , identifier[IndexError] ): keyword[return] literal[int]
def overtimes(self): """ Returns an ``int`` of the number of overtimes that were played during the game and 0 if the game finished at the end of regulation time. """ if self._overtimes == '' or self._overtimes is None: return 0 # depends on [control=['if'], data=[]] if self._overtimes.lower() == 'ot': return 1 # depends on [control=['if'], data=[]] num_overtimes = re.findall('\\d+', self._overtimes) try: return int(num_overtimes[0]) # depends on [control=['try'], data=[]] except (ValueError, IndexError): return 0 # depends on [control=['except'], data=[]]
def build(cls, path, tag=None, dockerfile=None): """ Build the image from the provided dockerfile in path :param path : str, path to the directory containing the Dockerfile :param tag: str, A tag to add to the final image :param dockerfile: str, path within the build context to the Dockerfile :return: instance of DockerImage """ if not path: raise ConuException('Please specify path to the directory containing the Dockerfile') client = get_client() response = [line for line in client.build(path, rm=True, tag=tag, dockerfile=dockerfile, quiet=True)] if not response: raise ConuException('Failed to get ID of image') # The expected output is just one line with image ID if len(response) > 1: raise ConuException('Build failed: ' + str(response)) # get ID from output # b'{"stream":"sha256:39c7bac4e2da37983203df4fcf612a02de9e6f6456a7f3434d1fccbc9ad639a5\\n"}\r\n' response_utf = response[0].decode('utf-8') if response_utf[:11] != '{"stream":"' or response_utf[-6:] != '\\n"}\r\n': raise ConuException('Failed to parse ID from ' + response_utf) image_id = response_utf[11:-6] return cls(None, identifier=image_id)
def function[build, parameter[cls, path, tag, dockerfile]]: constant[ Build the image from the provided dockerfile in path :param path : str, path to the directory containing the Dockerfile :param tag: str, A tag to add to the final image :param dockerfile: str, path within the build context to the Dockerfile :return: instance of DockerImage ] if <ast.UnaryOp object at 0x7da1b12c3190> begin[:] <ast.Raise object at 0x7da1b12c2f50> variable[client] assign[=] call[name[get_client], parameter[]] variable[response] assign[=] <ast.ListComp object at 0x7da1b12c3f10> if <ast.UnaryOp object at 0x7da1b11a7d30> begin[:] <ast.Raise object at 0x7da1b11a4e50> if compare[call[name[len], parameter[name[response]]] greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da1b11a6c50> variable[response_utf] assign[=] call[call[name[response]][constant[0]].decode, parameter[constant[utf-8]]] if <ast.BoolOp object at 0x7da1b121a3b0> begin[:] <ast.Raise object at 0x7da1b11a4280> variable[image_id] assign[=] call[name[response_utf]][<ast.Slice object at 0x7da1b11a4670>] return[call[name[cls], parameter[constant[None]]]]
keyword[def] identifier[build] ( identifier[cls] , identifier[path] , identifier[tag] = keyword[None] , identifier[dockerfile] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[path] : keyword[raise] identifier[ConuException] ( literal[string] ) identifier[client] = identifier[get_client] () identifier[response] =[ identifier[line] keyword[for] identifier[line] keyword[in] identifier[client] . identifier[build] ( identifier[path] , identifier[rm] = keyword[True] , identifier[tag] = identifier[tag] , identifier[dockerfile] = identifier[dockerfile] , identifier[quiet] = keyword[True] )] keyword[if] keyword[not] identifier[response] : keyword[raise] identifier[ConuException] ( literal[string] ) keyword[if] identifier[len] ( identifier[response] )> literal[int] : keyword[raise] identifier[ConuException] ( literal[string] + identifier[str] ( identifier[response] )) identifier[response_utf] = identifier[response] [ literal[int] ]. identifier[decode] ( literal[string] ) keyword[if] identifier[response_utf] [: literal[int] ]!= literal[string] keyword[or] identifier[response_utf] [- literal[int] :]!= literal[string] : keyword[raise] identifier[ConuException] ( literal[string] + identifier[response_utf] ) identifier[image_id] = identifier[response_utf] [ literal[int] :- literal[int] ] keyword[return] identifier[cls] ( keyword[None] , identifier[identifier] = identifier[image_id] )
def build(cls, path, tag=None, dockerfile=None): """ Build the image from the provided dockerfile in path :param path : str, path to the directory containing the Dockerfile :param tag: str, A tag to add to the final image :param dockerfile: str, path within the build context to the Dockerfile :return: instance of DockerImage """ if not path: raise ConuException('Please specify path to the directory containing the Dockerfile') # depends on [control=['if'], data=[]] client = get_client() response = [line for line in client.build(path, rm=True, tag=tag, dockerfile=dockerfile, quiet=True)] if not response: raise ConuException('Failed to get ID of image') # depends on [control=['if'], data=[]] # The expected output is just one line with image ID if len(response) > 1: raise ConuException('Build failed: ' + str(response)) # depends on [control=['if'], data=[]] # get ID from output # b'{"stream":"sha256:39c7bac4e2da37983203df4fcf612a02de9e6f6456a7f3434d1fccbc9ad639a5\\n"}\r\n' response_utf = response[0].decode('utf-8') if response_utf[:11] != '{"stream":"' or response_utf[-6:] != '\\n"}\r\n': raise ConuException('Failed to parse ID from ' + response_utf) # depends on [control=['if'], data=[]] image_id = response_utf[11:-6] return cls(None, identifier=image_id)
def calcMD5(path): """ calc MD5 based on path """ # check that file exists if os.path.exists(path) is False: yield False else: command = ['md5sum', path] p = Popen(command, stdout = PIPE) for line in p.communicate()[0].splitlines(): yield line.decode('ascii').strip().split()[0] p.wait() yield False
def function[calcMD5, parameter[path]]: constant[ calc MD5 based on path ] if compare[call[name[os].path.exists, parameter[name[path]]] is constant[False]] begin[:] <ast.Yield object at 0x7da18f720070>
keyword[def] identifier[calcMD5] ( identifier[path] ): literal[string] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ) keyword[is] keyword[False] : keyword[yield] keyword[False] keyword[else] : identifier[command] =[ literal[string] , identifier[path] ] identifier[p] = identifier[Popen] ( identifier[command] , identifier[stdout] = identifier[PIPE] ) keyword[for] identifier[line] keyword[in] identifier[p] . identifier[communicate] ()[ literal[int] ]. identifier[splitlines] (): keyword[yield] identifier[line] . identifier[decode] ( literal[string] ). identifier[strip] (). identifier[split] ()[ literal[int] ] identifier[p] . identifier[wait] () keyword[yield] keyword[False]
def calcMD5(path): """ calc MD5 based on path """ # check that file exists if os.path.exists(path) is False: yield False # depends on [control=['if'], data=[]] else: command = ['md5sum', path] p = Popen(command, stdout=PIPE) for line in p.communicate()[0].splitlines(): yield line.decode('ascii').strip().split()[0] # depends on [control=['for'], data=['line']] p.wait() yield False
def prepare_command(self): """ Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command` """ try: cmdline_args = self.loader.load_file('args', string_types) self.command = shlex.split(cmdline_args.decode('utf-8')) self.execution_mode = ExecutionMode.RAW except ConfigurationError: self.command = self.generate_ansible_command()
def function[prepare_command, parameter[self]]: constant[ Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command` ] <ast.Try object at 0x7da1b1a64a90>
keyword[def] identifier[prepare_command] ( identifier[self] ): literal[string] keyword[try] : identifier[cmdline_args] = identifier[self] . identifier[loader] . identifier[load_file] ( literal[string] , identifier[string_types] ) identifier[self] . identifier[command] = identifier[shlex] . identifier[split] ( identifier[cmdline_args] . identifier[decode] ( literal[string] )) identifier[self] . identifier[execution_mode] = identifier[ExecutionMode] . identifier[RAW] keyword[except] identifier[ConfigurationError] : identifier[self] . identifier[command] = identifier[self] . identifier[generate_ansible_command] ()
def prepare_command(self): """ Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command` """ try: cmdline_args = self.loader.load_file('args', string_types) self.command = shlex.split(cmdline_args.decode('utf-8')) self.execution_mode = ExecutionMode.RAW # depends on [control=['try'], data=[]] except ConfigurationError: self.command = self.generate_ansible_command() # depends on [control=['except'], data=[]]
def errorhandler(self, code_or_exception): """A decorator that is used to register a function give a given error code. Example:: @app.errorhandler(404) def page_not_found(error): return 'This page does not exist', 404 You can also register handlers for arbitrary exceptions:: @app.errorhandler(DatabaseError) def special_exception_handler(error): return 'Database connection failed', 500 You can also register a function as error handler without using the :meth:`errorhandler` decorator. The following example is equivalent to the one above:: def page_not_found(error): return 'This page does not exist', 404 app.error_handler_spec[None][404] = page_not_found Setting error handlers via assignments to :attr:`error_handler_spec` however is discouraged as it requires fiddling with nested dictionaries and the special case for arbitrary exception types. The first `None` refers to the active blueprint. If the error handler should be application wide `None` shall be used. .. versionadded:: 0.7 One can now additionally also register custom exception types that do not necessarily have to be a subclass of the :class:`~werkzeug.exceptions.HTTPException` class. :param code: the code as integer for the handler """ def decorator(f): self._register_error_handler(None, code_or_exception, f) return f return decorator
def function[errorhandler, parameter[self, code_or_exception]]: constant[A decorator that is used to register a function give a given error code. Example:: @app.errorhandler(404) def page_not_found(error): return 'This page does not exist', 404 You can also register handlers for arbitrary exceptions:: @app.errorhandler(DatabaseError) def special_exception_handler(error): return 'Database connection failed', 500 You can also register a function as error handler without using the :meth:`errorhandler` decorator. The following example is equivalent to the one above:: def page_not_found(error): return 'This page does not exist', 404 app.error_handler_spec[None][404] = page_not_found Setting error handlers via assignments to :attr:`error_handler_spec` however is discouraged as it requires fiddling with nested dictionaries and the special case for arbitrary exception types. The first `None` refers to the active blueprint. If the error handler should be application wide `None` shall be used. .. versionadded:: 0.7 One can now additionally also register custom exception types that do not necessarily have to be a subclass of the :class:`~werkzeug.exceptions.HTTPException` class. :param code: the code as integer for the handler ] def function[decorator, parameter[f]]: call[name[self]._register_error_handler, parameter[constant[None], name[code_or_exception], name[f]]] return[name[f]] return[name[decorator]]
keyword[def] identifier[errorhandler] ( identifier[self] , identifier[code_or_exception] ): literal[string] keyword[def] identifier[decorator] ( identifier[f] ): identifier[self] . identifier[_register_error_handler] ( keyword[None] , identifier[code_or_exception] , identifier[f] ) keyword[return] identifier[f] keyword[return] identifier[decorator]
def errorhandler(self, code_or_exception): """A decorator that is used to register a function give a given error code. Example:: @app.errorhandler(404) def page_not_found(error): return 'This page does not exist', 404 You can also register handlers for arbitrary exceptions:: @app.errorhandler(DatabaseError) def special_exception_handler(error): return 'Database connection failed', 500 You can also register a function as error handler without using the :meth:`errorhandler` decorator. The following example is equivalent to the one above:: def page_not_found(error): return 'This page does not exist', 404 app.error_handler_spec[None][404] = page_not_found Setting error handlers via assignments to :attr:`error_handler_spec` however is discouraged as it requires fiddling with nested dictionaries and the special case for arbitrary exception types. The first `None` refers to the active blueprint. If the error handler should be application wide `None` shall be used. .. versionadded:: 0.7 One can now additionally also register custom exception types that do not necessarily have to be a subclass of the :class:`~werkzeug.exceptions.HTTPException` class. :param code: the code as integer for the handler """ def decorator(f): self._register_error_handler(None, code_or_exception, f) return f return decorator
def from_histogram(cls, histogram, bin_edges, axis_names=None): """Make a HistdD from numpy histogram + bin edges :param histogram: Initial histogram :param bin_edges: x bin edges of histogram, y bin edges, ... :return: Histnd instance """ bin_edges = np.array(bin_edges) self = cls(bins=bin_edges, axis_names=axis_names) self.histogram = histogram return self
def function[from_histogram, parameter[cls, histogram, bin_edges, axis_names]]: constant[Make a HistdD from numpy histogram + bin edges :param histogram: Initial histogram :param bin_edges: x bin edges of histogram, y bin edges, ... :return: Histnd instance ] variable[bin_edges] assign[=] call[name[np].array, parameter[name[bin_edges]]] variable[self] assign[=] call[name[cls], parameter[]] name[self].histogram assign[=] name[histogram] return[name[self]]
keyword[def] identifier[from_histogram] ( identifier[cls] , identifier[histogram] , identifier[bin_edges] , identifier[axis_names] = keyword[None] ): literal[string] identifier[bin_edges] = identifier[np] . identifier[array] ( identifier[bin_edges] ) identifier[self] = identifier[cls] ( identifier[bins] = identifier[bin_edges] , identifier[axis_names] = identifier[axis_names] ) identifier[self] . identifier[histogram] = identifier[histogram] keyword[return] identifier[self]
def from_histogram(cls, histogram, bin_edges, axis_names=None): """Make a HistdD from numpy histogram + bin edges :param histogram: Initial histogram :param bin_edges: x bin edges of histogram, y bin edges, ... :return: Histnd instance """ bin_edges = np.array(bin_edges) self = cls(bins=bin_edges, axis_names=axis_names) self.histogram = histogram return self
def reentrancies(self): """ Return a mapping of variables to their re-entrancy count. A re-entrancy is when more than one edge selects a node as its target. These graphs are rooted, so the top node always has an implicit entrancy. Only nodes with re-entrancies are reported, and the count is only for the entrant edges beyond the first. Also note that these counts are for the interpreted graph, not for the linearized form, so inverted edges are always re-entrant. """ entrancies = defaultdict(int) entrancies[self.top] += 1 # implicit entrancy to top for t in self.edges(): entrancies[t.target] += 1 return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
def function[reentrancies, parameter[self]]: constant[ Return a mapping of variables to their re-entrancy count. A re-entrancy is when more than one edge selects a node as its target. These graphs are rooted, so the top node always has an implicit entrancy. Only nodes with re-entrancies are reported, and the count is only for the entrant edges beyond the first. Also note that these counts are for the interpreted graph, not for the linearized form, so inverted edges are always re-entrant. ] variable[entrancies] assign[=] call[name[defaultdict], parameter[name[int]]] <ast.AugAssign object at 0x7da18bccb700> for taget[name[t]] in starred[call[name[self].edges, parameter[]]] begin[:] <ast.AugAssign object at 0x7da18dc07190> return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18dc045e0>]]]
keyword[def] identifier[reentrancies] ( identifier[self] ): literal[string] identifier[entrancies] = identifier[defaultdict] ( identifier[int] ) identifier[entrancies] [ identifier[self] . identifier[top] ]+= literal[int] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[edges] (): identifier[entrancies] [ identifier[t] . identifier[target] ]+= literal[int] keyword[return] identifier[dict] (( identifier[v] , identifier[cnt] - literal[int] ) keyword[for] identifier[v] , identifier[cnt] keyword[in] identifier[entrancies] . identifier[items] () keyword[if] identifier[cnt] >= literal[int] )
def reentrancies(self): """ Return a mapping of variables to their re-entrancy count. A re-entrancy is when more than one edge selects a node as its target. These graphs are rooted, so the top node always has an implicit entrancy. Only nodes with re-entrancies are reported, and the count is only for the entrant edges beyond the first. Also note that these counts are for the interpreted graph, not for the linearized form, so inverted edges are always re-entrant. """ entrancies = defaultdict(int) entrancies[self.top] += 1 # implicit entrancy to top for t in self.edges(): entrancies[t.target] += 1 # depends on [control=['for'], data=['t']] return dict(((v, cnt - 1) for (v, cnt) in entrancies.items() if cnt >= 2))
def put(self, filename, handle): """ Upload a distribution archive to the configured Amazon S3 bucket. If the :attr:`~.Config.s3_cache_readonly` configuration option is enabled this method does nothing. :param filename: The filename of the distribution archive (a string). :param handle: A file-like object that provides access to the distribution archive. :raises: :exc:`.CacheBackendError` when any underlying method fails. """ if self.config.s3_cache_readonly: logger.info('Skipping upload to S3 bucket (using S3 in read only mode).') else: timer = Timer() self.check_prerequisites() with PatchedBotoConfig(): from boto.s3.key import Key raw_key = self.get_cache_key(filename) logger.info("Uploading distribution archive to S3 bucket: %s", raw_key) key = Key(self.s3_bucket) key.key = raw_key try: key.set_contents_from_file(handle) except Exception as e: logger.info("Encountered error writing to S3 bucket, " "falling back to read only mode (exception: %s)", e) self.config.s3_cache_readonly = True else: logger.info("Finished uploading distribution archive to S3 bucket in %s.", timer)
def function[put, parameter[self, filename, handle]]: constant[ Upload a distribution archive to the configured Amazon S3 bucket. If the :attr:`~.Config.s3_cache_readonly` configuration option is enabled this method does nothing. :param filename: The filename of the distribution archive (a string). :param handle: A file-like object that provides access to the distribution archive. :raises: :exc:`.CacheBackendError` when any underlying method fails. ] if name[self].config.s3_cache_readonly begin[:] call[name[logger].info, parameter[constant[Skipping upload to S3 bucket (using S3 in read only mode).]]]
keyword[def] identifier[put] ( identifier[self] , identifier[filename] , identifier[handle] ): literal[string] keyword[if] identifier[self] . identifier[config] . identifier[s3_cache_readonly] : identifier[logger] . identifier[info] ( literal[string] ) keyword[else] : identifier[timer] = identifier[Timer] () identifier[self] . identifier[check_prerequisites] () keyword[with] identifier[PatchedBotoConfig] (): keyword[from] identifier[boto] . identifier[s3] . identifier[key] keyword[import] identifier[Key] identifier[raw_key] = identifier[self] . identifier[get_cache_key] ( identifier[filename] ) identifier[logger] . identifier[info] ( literal[string] , identifier[raw_key] ) identifier[key] = identifier[Key] ( identifier[self] . identifier[s3_bucket] ) identifier[key] . identifier[key] = identifier[raw_key] keyword[try] : identifier[key] . identifier[set_contents_from_file] ( identifier[handle] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logger] . identifier[info] ( literal[string] literal[string] , identifier[e] ) identifier[self] . identifier[config] . identifier[s3_cache_readonly] = keyword[True] keyword[else] : identifier[logger] . identifier[info] ( literal[string] , identifier[timer] )
def put(self, filename, handle): """ Upload a distribution archive to the configured Amazon S3 bucket. If the :attr:`~.Config.s3_cache_readonly` configuration option is enabled this method does nothing. :param filename: The filename of the distribution archive (a string). :param handle: A file-like object that provides access to the distribution archive. :raises: :exc:`.CacheBackendError` when any underlying method fails. """ if self.config.s3_cache_readonly: logger.info('Skipping upload to S3 bucket (using S3 in read only mode).') # depends on [control=['if'], data=[]] else: timer = Timer() self.check_prerequisites() with PatchedBotoConfig(): from boto.s3.key import Key raw_key = self.get_cache_key(filename) logger.info('Uploading distribution archive to S3 bucket: %s', raw_key) key = Key(self.s3_bucket) key.key = raw_key try: key.set_contents_from_file(handle) # depends on [control=['try'], data=[]] except Exception as e: logger.info('Encountered error writing to S3 bucket, falling back to read only mode (exception: %s)', e) self.config.s3_cache_readonly = True # depends on [control=['except'], data=['e']] else: logger.info('Finished uploading distribution archive to S3 bucket in %s.', timer) # depends on [control=['with'], data=[]]
def column(environment, book, sheet_name, sheet_source, column_source, column_key): """ Returns an array of values from column from a different dataset, ordered as the key. """ a = book.sheets[sheet_source] b = book.sheets[sheet_name] return environment.copy([a.get(**{column_key: row[column_key]})[column_source] for row in b.all()])
def function[column, parameter[environment, book, sheet_name, sheet_source, column_source, column_key]]: constant[ Returns an array of values from column from a different dataset, ordered as the key. ] variable[a] assign[=] call[name[book].sheets][name[sheet_source]] variable[b] assign[=] call[name[book].sheets][name[sheet_name]] return[call[name[environment].copy, parameter[<ast.ListComp object at 0x7da1b15f2c50>]]]
keyword[def] identifier[column] ( identifier[environment] , identifier[book] , identifier[sheet_name] , identifier[sheet_source] , identifier[column_source] , identifier[column_key] ): literal[string] identifier[a] = identifier[book] . identifier[sheets] [ identifier[sheet_source] ] identifier[b] = identifier[book] . identifier[sheets] [ identifier[sheet_name] ] keyword[return] identifier[environment] . identifier[copy] ([ identifier[a] . identifier[get] (**{ identifier[column_key] : identifier[row] [ identifier[column_key] ]})[ identifier[column_source] ] keyword[for] identifier[row] keyword[in] identifier[b] . identifier[all] ()])
def column(environment, book, sheet_name, sheet_source, column_source, column_key): """ Returns an array of values from column from a different dataset, ordered as the key. """ a = book.sheets[sheet_source] b = book.sheets[sheet_name] return environment.copy([a.get(**{column_key: row[column_key]})[column_source] for row in b.all()])
def _normalize_key(self, key, unknown_ok=True): """Return the normalized version of KEY. KEY may be a frameid (a string), or a Frame class object. If KEY corresponds to a registered frameid, then that frameid is returned. Otherwise, either KeyError is raised, or KEY is returned verbatim, depending on the value of UNKNOWN_OK. """ if Frames.is_frame_class(key): key = key.frameid if isinstance(key, str): if not self._is_frame_id(key): raise KeyError("{0}: Invalid frame id".format(key)) if key not in self.known_frames: if unknown_ok: warn("{0}: Unknown frame id".format(key), UnknownFrameWarning) else: raise KeyError("{0}: Unknown frame id".format(key)) return key
def function[_normalize_key, parameter[self, key, unknown_ok]]: constant[Return the normalized version of KEY. KEY may be a frameid (a string), or a Frame class object. If KEY corresponds to a registered frameid, then that frameid is returned. Otherwise, either KeyError is raised, or KEY is returned verbatim, depending on the value of UNKNOWN_OK. ] if call[name[Frames].is_frame_class, parameter[name[key]]] begin[:] variable[key] assign[=] name[key].frameid if call[name[isinstance], parameter[name[key], name[str]]] begin[:] if <ast.UnaryOp object at 0x7da1b269f670> begin[:] <ast.Raise object at 0x7da1b269e800> if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self].known_frames] begin[:] if name[unknown_ok] begin[:] call[name[warn], parameter[call[constant[{0}: Unknown frame id].format, parameter[name[key]]], name[UnknownFrameWarning]]] return[name[key]]
keyword[def] identifier[_normalize_key] ( identifier[self] , identifier[key] , identifier[unknown_ok] = keyword[True] ): literal[string] keyword[if] identifier[Frames] . identifier[is_frame_class] ( identifier[key] ): identifier[key] = identifier[key] . identifier[frameid] keyword[if] identifier[isinstance] ( identifier[key] , identifier[str] ): keyword[if] keyword[not] identifier[self] . identifier[_is_frame_id] ( identifier[key] ): keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[key] )) keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[known_frames] : keyword[if] identifier[unknown_ok] : identifier[warn] ( literal[string] . identifier[format] ( identifier[key] ), identifier[UnknownFrameWarning] ) keyword[else] : keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[key] )) keyword[return] identifier[key]
def _normalize_key(self, key, unknown_ok=True): """Return the normalized version of KEY. KEY may be a frameid (a string), or a Frame class object. If KEY corresponds to a registered frameid, then that frameid is returned. Otherwise, either KeyError is raised, or KEY is returned verbatim, depending on the value of UNKNOWN_OK. """ if Frames.is_frame_class(key): key = key.frameid # depends on [control=['if'], data=[]] if isinstance(key, str): if not self._is_frame_id(key): raise KeyError('{0}: Invalid frame id'.format(key)) # depends on [control=['if'], data=[]] if key not in self.known_frames: if unknown_ok: warn('{0}: Unknown frame id'.format(key), UnknownFrameWarning) # depends on [control=['if'], data=[]] else: raise KeyError('{0}: Unknown frame id'.format(key)) # depends on [control=['if'], data=['key']] # depends on [control=['if'], data=[]] return key
def update_dns_zone_record(env, zone_id, **kwargs): """Create a Route53 CNAME record in _env_ zone. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. Keyword Args: dns_name (str): FQDN of application's dns entry to add/update. dns_name_aws (str): FQDN of AWS resource dns_ttl (int): DNS time-to-live (ttl) """ client = boto3.Session(profile_name=env).client('route53') response = {} hosted_zone_info = client.get_hosted_zone(Id=zone_id) zone_name = hosted_zone_info['HostedZone']['Name'].rstrip('.') dns_name = kwargs.get('dns_name') if dns_name and dns_name.endswith(zone_name): dns_name_aws = kwargs.get('dns_name_aws') # This is what will be added to DNS dns_json = get_template(template_file='infrastructure/dns_upsert.json.j2', **kwargs) LOG.info('Attempting to create DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) try: response = client.change_resource_record_sets( HostedZoneId=zone_id, ChangeBatch=json.loads(dns_json), ) LOG.info('Upserted DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) except botocore.exceptions.ClientError as error: LOG.info('Error creating DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) LOG.debug(error) else: LOG.info('Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)', dns_name, zone_id, zone_name) LOG.debug('Route53 JSON Response: \n%s', pformat(response))
def function[update_dns_zone_record, parameter[env, zone_id]]: constant[Create a Route53 CNAME record in _env_ zone. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. Keyword Args: dns_name (str): FQDN of application's dns entry to add/update. dns_name_aws (str): FQDN of AWS resource dns_ttl (int): DNS time-to-live (ttl) ] variable[client] assign[=] call[call[name[boto3].Session, parameter[]].client, parameter[constant[route53]]] variable[response] assign[=] dictionary[[], []] variable[hosted_zone_info] assign[=] call[name[client].get_hosted_zone, parameter[]] variable[zone_name] assign[=] call[call[call[name[hosted_zone_info]][constant[HostedZone]]][constant[Name]].rstrip, parameter[constant[.]]] variable[dns_name] assign[=] call[name[kwargs].get, parameter[constant[dns_name]]] if <ast.BoolOp object at 0x7da18f58dc00> begin[:] variable[dns_name_aws] assign[=] call[name[kwargs].get, parameter[constant[dns_name_aws]]] variable[dns_json] assign[=] call[name[get_template], parameter[]] call[name[LOG].info, parameter[constant[Attempting to create DNS record %s (%s) in Hosted Zone %s (%s)], name[dns_name], name[dns_name_aws], name[zone_id], name[zone_name]]] <ast.Try object at 0x7da20c795cc0> call[name[LOG].debug, parameter[constant[Route53 JSON Response: %s], call[name[pformat], parameter[name[response]]]]]
keyword[def] identifier[update_dns_zone_record] ( identifier[env] , identifier[zone_id] ,** identifier[kwargs] ): literal[string] identifier[client] = identifier[boto3] . identifier[Session] ( identifier[profile_name] = identifier[env] ). identifier[client] ( literal[string] ) identifier[response] ={} identifier[hosted_zone_info] = identifier[client] . identifier[get_hosted_zone] ( identifier[Id] = identifier[zone_id] ) identifier[zone_name] = identifier[hosted_zone_info] [ literal[string] ][ literal[string] ]. identifier[rstrip] ( literal[string] ) identifier[dns_name] = identifier[kwargs] . identifier[get] ( literal[string] ) keyword[if] identifier[dns_name] keyword[and] identifier[dns_name] . identifier[endswith] ( identifier[zone_name] ): identifier[dns_name_aws] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[dns_json] = identifier[get_template] ( identifier[template_file] = literal[string] ,** identifier[kwargs] ) identifier[LOG] . identifier[info] ( literal[string] , identifier[dns_name] , identifier[dns_name_aws] , identifier[zone_id] , identifier[zone_name] ) keyword[try] : identifier[response] = identifier[client] . identifier[change_resource_record_sets] ( identifier[HostedZoneId] = identifier[zone_id] , identifier[ChangeBatch] = identifier[json] . identifier[loads] ( identifier[dns_json] ),) identifier[LOG] . identifier[info] ( literal[string] , identifier[dns_name] , identifier[dns_name_aws] , identifier[zone_id] , identifier[zone_name] ) keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[error] : identifier[LOG] . identifier[info] ( literal[string] , identifier[dns_name] , identifier[dns_name_aws] , identifier[zone_id] , identifier[zone_name] ) identifier[LOG] . identifier[debug] ( identifier[error] ) keyword[else] : identifier[LOG] . identifier[info] ( literal[string] , identifier[dns_name] , identifier[zone_id] , identifier[zone_name] ) identifier[LOG] . identifier[debug] ( literal[string] , identifier[pformat] ( identifier[response] ))
def update_dns_zone_record(env, zone_id, **kwargs): """Create a Route53 CNAME record in _env_ zone. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. Keyword Args: dns_name (str): FQDN of application's dns entry to add/update. dns_name_aws (str): FQDN of AWS resource dns_ttl (int): DNS time-to-live (ttl) """ client = boto3.Session(profile_name=env).client('route53') response = {} hosted_zone_info = client.get_hosted_zone(Id=zone_id) zone_name = hosted_zone_info['HostedZone']['Name'].rstrip('.') dns_name = kwargs.get('dns_name') if dns_name and dns_name.endswith(zone_name): dns_name_aws = kwargs.get('dns_name_aws') # This is what will be added to DNS dns_json = get_template(template_file='infrastructure/dns_upsert.json.j2', **kwargs) LOG.info('Attempting to create DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) try: response = client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch=json.loads(dns_json)) LOG.info('Upserted DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) # depends on [control=['try'], data=[]] except botocore.exceptions.ClientError as error: LOG.info('Error creating DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) LOG.debug(error) # depends on [control=['except'], data=['error']] # depends on [control=['if'], data=[]] else: LOG.info('Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)', dns_name, zone_id, zone_name) LOG.debug('Route53 JSON Response: \n%s', pformat(response))
def clean(self): """ Clean form fields prior to database entry. In this case, the major cleaning operation is substituting a None value for a blank value in the Catalog field. """ cleaned_data = super(EnterpriseCustomerAdminForm, self).clean() if 'catalog' in cleaned_data and not cleaned_data['catalog']: cleaned_data['catalog'] = None return cleaned_data
def function[clean, parameter[self]]: constant[ Clean form fields prior to database entry. In this case, the major cleaning operation is substituting a None value for a blank value in the Catalog field. ] variable[cleaned_data] assign[=] call[call[name[super], parameter[name[EnterpriseCustomerAdminForm], name[self]]].clean, parameter[]] if <ast.BoolOp object at 0x7da1b0125660> begin[:] call[name[cleaned_data]][constant[catalog]] assign[=] constant[None] return[name[cleaned_data]]
keyword[def] identifier[clean] ( identifier[self] ): literal[string] identifier[cleaned_data] = identifier[super] ( identifier[EnterpriseCustomerAdminForm] , identifier[self] ). identifier[clean] () keyword[if] literal[string] keyword[in] identifier[cleaned_data] keyword[and] keyword[not] identifier[cleaned_data] [ literal[string] ]: identifier[cleaned_data] [ literal[string] ]= keyword[None] keyword[return] identifier[cleaned_data]
def clean(self): """ Clean form fields prior to database entry. In this case, the major cleaning operation is substituting a None value for a blank value in the Catalog field. """ cleaned_data = super(EnterpriseCustomerAdminForm, self).clean() if 'catalog' in cleaned_data and (not cleaned_data['catalog']): cleaned_data['catalog'] = None # depends on [control=['if'], data=[]] return cleaned_data
def _parse_object(value): """Coerce value into a dict. :param str value: Value to parse. :returns: dict or None if the value is not a JSON object :raises: TypeError or ValueError if value appears to be an object but can't be parsed as JSON. """ value = value.lstrip() if not value or value[0] not in _brace_strings: return None return json.loads(value)
def function[_parse_object, parameter[value]]: constant[Coerce value into a dict. :param str value: Value to parse. :returns: dict or None if the value is not a JSON object :raises: TypeError or ValueError if value appears to be an object but can't be parsed as JSON. ] variable[value] assign[=] call[name[value].lstrip, parameter[]] if <ast.BoolOp object at 0x7da1b1803010> begin[:] return[constant[None]] return[call[name[json].loads, parameter[name[value]]]]
keyword[def] identifier[_parse_object] ( identifier[value] ): literal[string] identifier[value] = identifier[value] . identifier[lstrip] () keyword[if] keyword[not] identifier[value] keyword[or] identifier[value] [ literal[int] ] keyword[not] keyword[in] identifier[_brace_strings] : keyword[return] keyword[None] keyword[return] identifier[json] . identifier[loads] ( identifier[value] )
def _parse_object(value): """Coerce value into a dict. :param str value: Value to parse. :returns: dict or None if the value is not a JSON object :raises: TypeError or ValueError if value appears to be an object but can't be parsed as JSON. """ value = value.lstrip() if not value or value[0] not in _brace_strings: return None # depends on [control=['if'], data=[]] return json.loads(value)
def unmark_featured(self, request, queryset): """ Un-Mark selected featured posts. """ queryset.update(featured=False) self.message_user( request, _('Selected entries are no longer marked as featured.'))
def function[unmark_featured, parameter[self, request, queryset]]: constant[ Un-Mark selected featured posts. ] call[name[queryset].update, parameter[]] call[name[self].message_user, parameter[name[request], call[name[_], parameter[constant[Selected entries are no longer marked as featured.]]]]]
keyword[def] identifier[unmark_featured] ( identifier[self] , identifier[request] , identifier[queryset] ): literal[string] identifier[queryset] . identifier[update] ( identifier[featured] = keyword[False] ) identifier[self] . identifier[message_user] ( identifier[request] , identifier[_] ( literal[string] ))
def unmark_featured(self, request, queryset): """ Un-Mark selected featured posts. """ queryset.update(featured=False) self.message_user(request, _('Selected entries are no longer marked as featured.'))
def append_onto_file(self, file_name, msg): """ Appends msg onto the Given File """ with open(file_name, "a") as heart_file: heart_file.write(msg) heart_file.close()
def function[append_onto_file, parameter[self, file_name, msg]]: constant[ Appends msg onto the Given File ] with call[name[open], parameter[name[file_name], constant[a]]] begin[:] call[name[heart_file].write, parameter[name[msg]]] call[name[heart_file].close, parameter[]]
keyword[def] identifier[append_onto_file] ( identifier[self] , identifier[file_name] , identifier[msg] ): literal[string] keyword[with] identifier[open] ( identifier[file_name] , literal[string] ) keyword[as] identifier[heart_file] : identifier[heart_file] . identifier[write] ( identifier[msg] ) identifier[heart_file] . identifier[close] ()
def append_onto_file(self, file_name, msg): """ Appends msg onto the Given File """ with open(file_name, 'a') as heart_file: heart_file.write(msg) heart_file.close() # depends on [control=['with'], data=['heart_file']]
def addReliableListener(self, listener, style=iaxiom.LOCAL): """ Add the given Item to the set which will be notified of Items available for processing. Note: Each Item is processed synchronously. Adding too many listeners to a single batch processor will cause the L{step} method to block while it sends notification to each listener. @param listener: An Item instance which provides a C{processItem} method. @return: An Item representing L{listener}'s persistent tracking state. """ existing = self.store.findUnique(_ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.listener == listener), default=None) if existing is not None: return existing for work in self.store.query(self.workUnitType, sort=self.workUnitType.storeID.descending, limit=1): forwardMark = work.storeID backwardMark = work.storeID + 1 break else: forwardMark = 0 backwardMark = 0 if self.scheduled is None: self.scheduled = extime.Time() iaxiom.IScheduler(self.store).schedule(self, self.scheduled) return _ReliableListener(store=self.store, processor=self, listener=listener, forwardMark=forwardMark, backwardMark=backwardMark, style=style)
def function[addReliableListener, parameter[self, listener, style]]: constant[ Add the given Item to the set which will be notified of Items available for processing. Note: Each Item is processed synchronously. Adding too many listeners to a single batch processor will cause the L{step} method to block while it sends notification to each listener. @param listener: An Item instance which provides a C{processItem} method. @return: An Item representing L{listener}'s persistent tracking state. ] variable[existing] assign[=] call[name[self].store.findUnique, parameter[name[_ReliableListener], call[name[attributes].AND, parameter[compare[name[_ReliableListener].processor equal[==] name[self]], compare[name[_ReliableListener].listener equal[==] name[listener]]]]]] if compare[name[existing] is_not constant[None]] begin[:] return[name[existing]] for taget[name[work]] in starred[call[name[self].store.query, parameter[name[self].workUnitType]]] begin[:] variable[forwardMark] assign[=] name[work].storeID variable[backwardMark] assign[=] binary_operation[name[work].storeID + constant[1]] break if compare[name[self].scheduled is constant[None]] begin[:] name[self].scheduled assign[=] call[name[extime].Time, parameter[]] call[call[name[iaxiom].IScheduler, parameter[name[self].store]].schedule, parameter[name[self], name[self].scheduled]] return[call[name[_ReliableListener], parameter[]]]
keyword[def] identifier[addReliableListener] ( identifier[self] , identifier[listener] , identifier[style] = identifier[iaxiom] . identifier[LOCAL] ): literal[string] identifier[existing] = identifier[self] . identifier[store] . identifier[findUnique] ( identifier[_ReliableListener] , identifier[attributes] . identifier[AND] ( identifier[_ReliableListener] . identifier[processor] == identifier[self] , identifier[_ReliableListener] . identifier[listener] == identifier[listener] ), identifier[default] = keyword[None] ) keyword[if] identifier[existing] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[existing] keyword[for] identifier[work] keyword[in] identifier[self] . identifier[store] . identifier[query] ( identifier[self] . identifier[workUnitType] , identifier[sort] = identifier[self] . identifier[workUnitType] . identifier[storeID] . identifier[descending] , identifier[limit] = literal[int] ): identifier[forwardMark] = identifier[work] . identifier[storeID] identifier[backwardMark] = identifier[work] . identifier[storeID] + literal[int] keyword[break] keyword[else] : identifier[forwardMark] = literal[int] identifier[backwardMark] = literal[int] keyword[if] identifier[self] . identifier[scheduled] keyword[is] keyword[None] : identifier[self] . identifier[scheduled] = identifier[extime] . identifier[Time] () identifier[iaxiom] . identifier[IScheduler] ( identifier[self] . identifier[store] ). identifier[schedule] ( identifier[self] , identifier[self] . identifier[scheduled] ) keyword[return] identifier[_ReliableListener] ( identifier[store] = identifier[self] . identifier[store] , identifier[processor] = identifier[self] , identifier[listener] = identifier[listener] , identifier[forwardMark] = identifier[forwardMark] , identifier[backwardMark] = identifier[backwardMark] , identifier[style] = identifier[style] )
def addReliableListener(self, listener, style=iaxiom.LOCAL): """ Add the given Item to the set which will be notified of Items available for processing. Note: Each Item is processed synchronously. Adding too many listeners to a single batch processor will cause the L{step} method to block while it sends notification to each listener. @param listener: An Item instance which provides a C{processItem} method. @return: An Item representing L{listener}'s persistent tracking state. """ existing = self.store.findUnique(_ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.listener == listener), default=None) if existing is not None: return existing # depends on [control=['if'], data=['existing']] for work in self.store.query(self.workUnitType, sort=self.workUnitType.storeID.descending, limit=1): forwardMark = work.storeID backwardMark = work.storeID + 1 break # depends on [control=['for'], data=['work']] else: forwardMark = 0 backwardMark = 0 if self.scheduled is None: self.scheduled = extime.Time() iaxiom.IScheduler(self.store).schedule(self, self.scheduled) # depends on [control=['if'], data=[]] return _ReliableListener(store=self.store, processor=self, listener=listener, forwardMark=forwardMark, backwardMark=backwardMark, style=style)
def normalize_version(version): """ Helper function to normalize version. Returns a comparable object. Args: version (str) version, e.g. "0.1.0" """ rv = [] for x in version.split("."): try: rv.append(int(x)) except ValueError: for y in re.split("([0-9]+)", x): if y == '': continue try: rv.append(int(y)) except ValueError: rv.append(y) return rv
def function[normalize_version, parameter[version]]: constant[ Helper function to normalize version. Returns a comparable object. Args: version (str) version, e.g. "0.1.0" ] variable[rv] assign[=] list[[]] for taget[name[x]] in starred[call[name[version].split, parameter[constant[.]]]] begin[:] <ast.Try object at 0x7da1b26af6d0> return[name[rv]]
keyword[def] identifier[normalize_version] ( identifier[version] ): literal[string] identifier[rv] =[] keyword[for] identifier[x] keyword[in] identifier[version] . identifier[split] ( literal[string] ): keyword[try] : identifier[rv] . identifier[append] ( identifier[int] ( identifier[x] )) keyword[except] identifier[ValueError] : keyword[for] identifier[y] keyword[in] identifier[re] . identifier[split] ( literal[string] , identifier[x] ): keyword[if] identifier[y] == literal[string] : keyword[continue] keyword[try] : identifier[rv] . identifier[append] ( identifier[int] ( identifier[y] )) keyword[except] identifier[ValueError] : identifier[rv] . identifier[append] ( identifier[y] ) keyword[return] identifier[rv]
def normalize_version(version): """ Helper function to normalize version. Returns a comparable object. Args: version (str) version, e.g. "0.1.0" """ rv = [] for x in version.split('.'): try: rv.append(int(x)) # depends on [control=['try'], data=[]] except ValueError: for y in re.split('([0-9]+)', x): if y == '': continue # depends on [control=['if'], data=[]] try: rv.append(int(y)) # depends on [control=['try'], data=[]] except ValueError: rv.append(y) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['y']] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['x']] return rv
def vx(self,*args,**kwargs): """ NAME: vx PURPOSE: return x velocity at time t INPUT: t - (optional) time at which to get the velocity (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: vx(t) HISTORY: 2010-11-30 - Written - Bovy (NYU) """ out= self._orb.vx(*args,**kwargs) if len(out) == 1: return out[0] else: return out
def function[vx, parameter[self]]: constant[ NAME: vx PURPOSE: return x velocity at time t INPUT: t - (optional) time at which to get the velocity (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: vx(t) HISTORY: 2010-11-30 - Written - Bovy (NYU) ] variable[out] assign[=] call[name[self]._orb.vx, parameter[<ast.Starred object at 0x7da1b0e8c2e0>]] if compare[call[name[len], parameter[name[out]]] equal[==] constant[1]] begin[:] return[call[name[out]][constant[0]]]
keyword[def] identifier[vx] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[out] = identifier[self] . identifier[_orb] . identifier[vx] (* identifier[args] ,** identifier[kwargs] ) keyword[if] identifier[len] ( identifier[out] )== literal[int] : keyword[return] identifier[out] [ literal[int] ] keyword[else] : keyword[return] identifier[out]
def vx(self, *args, **kwargs): """ NAME: vx PURPOSE: return x velocity at time t INPUT: t - (optional) time at which to get the velocity (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: vx(t) HISTORY: 2010-11-30 - Written - Bovy (NYU) """ out = self._orb.vx(*args, **kwargs) if len(out) == 1: return out[0] # depends on [control=['if'], data=[]] else: return out
def plotER(self,*args,**kwargs): """ NAME: plotER PURPOSE: plot ER(.) along the orbit INPUT: bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2014-06-16 - Written - Bovy (IAS) """ if kwargs.pop('normed',False): kwargs['d2']= 'ERnorm' else: kwargs['d2']= 'ER' return self.plot(*args,**kwargs)
def function[plotER, parameter[self]]: constant[ NAME: plotER PURPOSE: plot ER(.) along the orbit INPUT: bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2014-06-16 - Written - Bovy (IAS) ] if call[name[kwargs].pop, parameter[constant[normed], constant[False]]] begin[:] call[name[kwargs]][constant[d2]] assign[=] constant[ERnorm] return[call[name[self].plot, parameter[<ast.Starred object at 0x7da18fe93490>]]]
keyword[def] identifier[plotER] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ): identifier[kwargs] [ literal[string] ]= literal[string] keyword[else] : identifier[kwargs] [ literal[string] ]= literal[string] keyword[return] identifier[self] . identifier[plot] (* identifier[args] ,** identifier[kwargs] )
def plotER(self, *args, **kwargs): """ NAME: plotER PURPOSE: plot ER(.) along the orbit INPUT: bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2014-06-16 - Written - Bovy (IAS) """ if kwargs.pop('normed', False): kwargs['d2'] = 'ERnorm' # depends on [control=['if'], data=[]] else: kwargs['d2'] = 'ER' return self.plot(*args, **kwargs)
def hmean_int(a, a_min=5778, a_max=1149851): """ Harmonic mean of an array, returns the closest int """ from scipy.stats import hmean return int(round(hmean(np.clip(a, a_min, a_max))))
def function[hmean_int, parameter[a, a_min, a_max]]: constant[ Harmonic mean of an array, returns the closest int ] from relative_module[scipy.stats] import module[hmean] return[call[name[int], parameter[call[name[round], parameter[call[name[hmean], parameter[call[name[np].clip, parameter[name[a], name[a_min], name[a_max]]]]]]]]]]
keyword[def] identifier[hmean_int] ( identifier[a] , identifier[a_min] = literal[int] , identifier[a_max] = literal[int] ): literal[string] keyword[from] identifier[scipy] . identifier[stats] keyword[import] identifier[hmean] keyword[return] identifier[int] ( identifier[round] ( identifier[hmean] ( identifier[np] . identifier[clip] ( identifier[a] , identifier[a_min] , identifier[a_max] ))))
def hmean_int(a, a_min=5778, a_max=1149851): """ Harmonic mean of an array, returns the closest int """ from scipy.stats import hmean return int(round(hmean(np.clip(a, a_min, a_max))))
def mid(self, value): """ Sets the MID of the message. :type value: Integer :param value: the MID :raise AttributeError: if value is not int or cannot be represented on 16 bits. """ if not isinstance(value, int) or value > 65536: raise AttributeError self._mid = value
def function[mid, parameter[self, value]]: constant[ Sets the MID of the message. :type value: Integer :param value: the MID :raise AttributeError: if value is not int or cannot be represented on 16 bits. ] if <ast.BoolOp object at 0x7da204344ee0> begin[:] <ast.Raise object at 0x7da204347e20> name[self]._mid assign[=] name[value]
keyword[def] identifier[mid] ( identifier[self] , identifier[value] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[int] ) keyword[or] identifier[value] > literal[int] : keyword[raise] identifier[AttributeError] identifier[self] . identifier[_mid] = identifier[value]
def mid(self, value): """ Sets the MID of the message. :type value: Integer :param value: the MID :raise AttributeError: if value is not int or cannot be represented on 16 bits. """ if not isinstance(value, int) or value > 65536: raise AttributeError # depends on [control=['if'], data=[]] self._mid = value
def smoothness(self, convert_to_muC_per_cm2=True, all_in_polar=True): """ Get rms average difference between spline and same branch polarization data. """ tot = self.get_same_branch_polarization_data( convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar) L = tot.shape[0] try: sp = self.same_branch_splines(convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar) except: print("Something went wrong.") return None sp_latt = [sp[i](range(L)) for i in range(3)] diff = [sp_latt[i] - tot[:, i].ravel() for i in range(3)] rms = [np.sqrt(np.sum(np.square(diff[i])) / L) for i in range(3)] return rms
def function[smoothness, parameter[self, convert_to_muC_per_cm2, all_in_polar]]: constant[ Get rms average difference between spline and same branch polarization data. ] variable[tot] assign[=] call[name[self].get_same_branch_polarization_data, parameter[]] variable[L] assign[=] call[name[tot].shape][constant[0]] <ast.Try object at 0x7da207f020b0> variable[sp_latt] assign[=] <ast.ListComp object at 0x7da207f02a70> variable[diff] assign[=] <ast.ListComp object at 0x7da207f03fd0> variable[rms] assign[=] <ast.ListComp object at 0x7da204346fb0> return[name[rms]]
keyword[def] identifier[smoothness] ( identifier[self] , identifier[convert_to_muC_per_cm2] = keyword[True] , identifier[all_in_polar] = keyword[True] ): literal[string] identifier[tot] = identifier[self] . identifier[get_same_branch_polarization_data] ( identifier[convert_to_muC_per_cm2] = identifier[convert_to_muC_per_cm2] , identifier[all_in_polar] = identifier[all_in_polar] ) identifier[L] = identifier[tot] . identifier[shape] [ literal[int] ] keyword[try] : identifier[sp] = identifier[self] . identifier[same_branch_splines] ( identifier[convert_to_muC_per_cm2] = identifier[convert_to_muC_per_cm2] , identifier[all_in_polar] = identifier[all_in_polar] ) keyword[except] : identifier[print] ( literal[string] ) keyword[return] keyword[None] identifier[sp_latt] =[ identifier[sp] [ identifier[i] ]( identifier[range] ( identifier[L] )) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )] identifier[diff] =[ identifier[sp_latt] [ identifier[i] ]- identifier[tot] [:, identifier[i] ]. identifier[ravel] () keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )] identifier[rms] =[ identifier[np] . identifier[sqrt] ( identifier[np] . identifier[sum] ( identifier[np] . identifier[square] ( identifier[diff] [ identifier[i] ]))/ identifier[L] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )] keyword[return] identifier[rms]
def smoothness(self, convert_to_muC_per_cm2=True, all_in_polar=True): """ Get rms average difference between spline and same branch polarization data. """ tot = self.get_same_branch_polarization_data(convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar) L = tot.shape[0] try: sp = self.same_branch_splines(convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar) # depends on [control=['try'], data=[]] except: print('Something went wrong.') return None # depends on [control=['except'], data=[]] sp_latt = [sp[i](range(L)) for i in range(3)] diff = [sp_latt[i] - tot[:, i].ravel() for i in range(3)] rms = [np.sqrt(np.sum(np.square(diff[i])) / L) for i in range(3)] return rms
def smart_query_string(parser, token): """ Outputs current GET query string with additions appended. Additions are provided in token pairs. """ args = token.split_contents() additions = args[1:] addition_pairs = [] while additions: addition_pairs.append(additions[0:2]) additions = additions[2:] return SmartQueryStringNode(addition_pairs)
def function[smart_query_string, parameter[parser, token]]: constant[ Outputs current GET query string with additions appended. Additions are provided in token pairs. ] variable[args] assign[=] call[name[token].split_contents, parameter[]] variable[additions] assign[=] call[name[args]][<ast.Slice object at 0x7da1b131bb80>] variable[addition_pairs] assign[=] list[[]] while name[additions] begin[:] call[name[addition_pairs].append, parameter[call[name[additions]][<ast.Slice object at 0x7da1b1335c90>]]] variable[additions] assign[=] call[name[additions]][<ast.Slice object at 0x7da1b1334520>] return[call[name[SmartQueryStringNode], parameter[name[addition_pairs]]]]
keyword[def] identifier[smart_query_string] ( identifier[parser] , identifier[token] ): literal[string] identifier[args] = identifier[token] . identifier[split_contents] () identifier[additions] = identifier[args] [ literal[int] :] identifier[addition_pairs] =[] keyword[while] identifier[additions] : identifier[addition_pairs] . identifier[append] ( identifier[additions] [ literal[int] : literal[int] ]) identifier[additions] = identifier[additions] [ literal[int] :] keyword[return] identifier[SmartQueryStringNode] ( identifier[addition_pairs] )
def smart_query_string(parser, token): """ Outputs current GET query string with additions appended. Additions are provided in token pairs. """ args = token.split_contents() additions = args[1:] addition_pairs = [] while additions: addition_pairs.append(additions[0:2]) additions = additions[2:] # depends on [control=['while'], data=[]] return SmartQueryStringNode(addition_pairs)
def folderitem(self, obj, item, index): """Applies new properties to the item (analysis) that is currently being rendered as a row in the list. :param obj: analysis to be rendered as a row in the list :param item: dict representation of the analysis, suitable for the list :param index: current position of the item within the list :type obj: ATContentType/DexterityContentType :type item: dict :type index: int :return: the dict representation of the item :rtype: dict """ item = super(AnalysesView, self).folderitem(obj, item, index) item_obj = api.get_object(obj) uid = item["uid"] # Slot is the row position where all analyses sharing the same parent # (eg. AnalysisRequest, SampleReference), will be displayed as a group slot = self.get_item_slot(uid) item["Pos"] = slot # The position string contains both the slot + the position of the # analysis within the slot: "position_sortkey" will be used to sort all # the analyses to be displayed in the list str_position = self.uids_strpositions[uid] item["pos_sortkey"] = str_position item["colspan"] = {"Pos": 1} item["Service"] = item_obj.Title() item["Category"] = item_obj.getCategoryTitle() item["DueDate"] = self.ulocalized_time(item_obj, long_format=0) item["class"]["Service"] = "service_title" # To prevent extra loops, we compute here the number of analyses to be # rendered within each slot. This information will be useful later for # applying rowspan to the first cell of each slot, that contains info # about the parent of all the analyses contained in that slot (e.g # Analysis Request ID, Sample Type, etc.) rowspans = self.items_rowspans.get(slot, 0) + 1 remarks_enabled = self.is_analysis_remarks_enabled() if remarks_enabled: # Increase in one unit the rowspan, cause the comment field for # this analysis will be rendered in a new row, below the row that # displays the current item rowspans = rowspans + 1 # We map this rowspan information in items_rowspan, that will be used # later during the rendereing of slot headers (first cell of each row) self.items_rowspans[slot] = rowspans return item
def function[folderitem, parameter[self, obj, item, index]]: constant[Applies new properties to the item (analysis) that is currently being rendered as a row in the list. :param obj: analysis to be rendered as a row in the list :param item: dict representation of the analysis, suitable for the list :param index: current position of the item within the list :type obj: ATContentType/DexterityContentType :type item: dict :type index: int :return: the dict representation of the item :rtype: dict ] variable[item] assign[=] call[call[name[super], parameter[name[AnalysesView], name[self]]].folderitem, parameter[name[obj], name[item], name[index]]] variable[item_obj] assign[=] call[name[api].get_object, parameter[name[obj]]] variable[uid] assign[=] call[name[item]][constant[uid]] variable[slot] assign[=] call[name[self].get_item_slot, parameter[name[uid]]] call[name[item]][constant[Pos]] assign[=] name[slot] variable[str_position] assign[=] call[name[self].uids_strpositions][name[uid]] call[name[item]][constant[pos_sortkey]] assign[=] name[str_position] call[name[item]][constant[colspan]] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cfa00>], [<ast.Constant object at 0x7da18c4cc5e0>]] call[name[item]][constant[Service]] assign[=] call[name[item_obj].Title, parameter[]] call[name[item]][constant[Category]] assign[=] call[name[item_obj].getCategoryTitle, parameter[]] call[name[item]][constant[DueDate]] assign[=] call[name[self].ulocalized_time, parameter[name[item_obj]]] call[call[name[item]][constant[class]]][constant[Service]] assign[=] constant[service_title] variable[rowspans] assign[=] binary_operation[call[name[self].items_rowspans.get, parameter[name[slot], constant[0]]] + constant[1]] variable[remarks_enabled] assign[=] call[name[self].is_analysis_remarks_enabled, parameter[]] if name[remarks_enabled] begin[:] variable[rowspans] assign[=] binary_operation[name[rowspans] + constant[1]] call[name[self].items_rowspans][name[slot]] assign[=] name[rowspans] return[name[item]]
keyword[def] identifier[folderitem] ( identifier[self] , identifier[obj] , identifier[item] , identifier[index] ): literal[string] identifier[item] = identifier[super] ( identifier[AnalysesView] , identifier[self] ). identifier[folderitem] ( identifier[obj] , identifier[item] , identifier[index] ) identifier[item_obj] = identifier[api] . identifier[get_object] ( identifier[obj] ) identifier[uid] = identifier[item] [ literal[string] ] identifier[slot] = identifier[self] . identifier[get_item_slot] ( identifier[uid] ) identifier[item] [ literal[string] ]= identifier[slot] identifier[str_position] = identifier[self] . identifier[uids_strpositions] [ identifier[uid] ] identifier[item] [ literal[string] ]= identifier[str_position] identifier[item] [ literal[string] ]={ literal[string] : literal[int] } identifier[item] [ literal[string] ]= identifier[item_obj] . identifier[Title] () identifier[item] [ literal[string] ]= identifier[item_obj] . identifier[getCategoryTitle] () identifier[item] [ literal[string] ]= identifier[self] . identifier[ulocalized_time] ( identifier[item_obj] , identifier[long_format] = literal[int] ) identifier[item] [ literal[string] ][ literal[string] ]= literal[string] identifier[rowspans] = identifier[self] . identifier[items_rowspans] . identifier[get] ( identifier[slot] , literal[int] )+ literal[int] identifier[remarks_enabled] = identifier[self] . identifier[is_analysis_remarks_enabled] () keyword[if] identifier[remarks_enabled] : identifier[rowspans] = identifier[rowspans] + literal[int] identifier[self] . identifier[items_rowspans] [ identifier[slot] ]= identifier[rowspans] keyword[return] identifier[item]
def folderitem(self, obj, item, index): """Applies new properties to the item (analysis) that is currently being rendered as a row in the list. :param obj: analysis to be rendered as a row in the list :param item: dict representation of the analysis, suitable for the list :param index: current position of the item within the list :type obj: ATContentType/DexterityContentType :type item: dict :type index: int :return: the dict representation of the item :rtype: dict """ item = super(AnalysesView, self).folderitem(obj, item, index) item_obj = api.get_object(obj) uid = item['uid'] # Slot is the row position where all analyses sharing the same parent # (eg. AnalysisRequest, SampleReference), will be displayed as a group slot = self.get_item_slot(uid) item['Pos'] = slot # The position string contains both the slot + the position of the # analysis within the slot: "position_sortkey" will be used to sort all # the analyses to be displayed in the list str_position = self.uids_strpositions[uid] item['pos_sortkey'] = str_position item['colspan'] = {'Pos': 1} item['Service'] = item_obj.Title() item['Category'] = item_obj.getCategoryTitle() item['DueDate'] = self.ulocalized_time(item_obj, long_format=0) item['class']['Service'] = 'service_title' # To prevent extra loops, we compute here the number of analyses to be # rendered within each slot. This information will be useful later for # applying rowspan to the first cell of each slot, that contains info # about the parent of all the analyses contained in that slot (e.g # Analysis Request ID, Sample Type, etc.) rowspans = self.items_rowspans.get(slot, 0) + 1 remarks_enabled = self.is_analysis_remarks_enabled() if remarks_enabled: # Increase in one unit the rowspan, cause the comment field for # this analysis will be rendered in a new row, below the row that # displays the current item rowspans = rowspans + 1 # depends on [control=['if'], data=[]] # We map this rowspan information in items_rowspan, that will be used # later during the rendereing of slot headers (first cell of each row) self.items_rowspans[slot] = rowspans return item
def _get_monthly_data(self, p_p_id): """Get monthly data.""" params = {"p_p_id": p_p_id, "p_p_lifecycle": 2, "p_p_resource_id": ("resourceObtenirDonnees" "PeriodesConsommation")} try: raw_res = yield from self._session.get(PROFILE_URL, params=params, timeout=self._timeout) except OSError: raise PyHydroQuebecError("Can not get monthly data") try: json_output = yield from raw_res.json(content_type='text/json') except (OSError, json.decoder.JSONDecodeError): raise PyHydroQuebecError("Could not get monthly data") if not json_output.get('success'): raise PyHydroQuebecError("Could not get monthly data") return json_output.get('results')
def function[_get_monthly_data, parameter[self, p_p_id]]: constant[Get monthly data.] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ff11b0>, <ast.Constant object at 0x7da1b0ff1060>, <ast.Constant object at 0x7da1b0ff1150>], [<ast.Name object at 0x7da1b0ff0fd0>, <ast.Constant object at 0x7da1b0ff1000>, <ast.Constant object at 0x7da1b0ff0e80>]] <ast.Try object at 0x7da1b0ff2320> <ast.Try object at 0x7da1b0ff2920> if <ast.UnaryOp object at 0x7da1b0ff1270> begin[:] <ast.Raise object at 0x7da1b0ff1240> return[call[name[json_output].get, parameter[constant[results]]]]
keyword[def] identifier[_get_monthly_data] ( identifier[self] , identifier[p_p_id] ): literal[string] identifier[params] ={ literal[string] : identifier[p_p_id] , literal[string] : literal[int] , literal[string] :( literal[string] literal[string] )} keyword[try] : identifier[raw_res] = keyword[yield] keyword[from] identifier[self] . identifier[_session] . identifier[get] ( identifier[PROFILE_URL] , identifier[params] = identifier[params] , identifier[timeout] = identifier[self] . identifier[_timeout] ) keyword[except] identifier[OSError] : keyword[raise] identifier[PyHydroQuebecError] ( literal[string] ) keyword[try] : identifier[json_output] = keyword[yield] keyword[from] identifier[raw_res] . identifier[json] ( identifier[content_type] = literal[string] ) keyword[except] ( identifier[OSError] , identifier[json] . identifier[decoder] . identifier[JSONDecodeError] ): keyword[raise] identifier[PyHydroQuebecError] ( literal[string] ) keyword[if] keyword[not] identifier[json_output] . identifier[get] ( literal[string] ): keyword[raise] identifier[PyHydroQuebecError] ( literal[string] ) keyword[return] identifier[json_output] . identifier[get] ( literal[string] )
def _get_monthly_data(self, p_p_id): """Get monthly data.""" params = {'p_p_id': p_p_id, 'p_p_lifecycle': 2, 'p_p_resource_id': 'resourceObtenirDonneesPeriodesConsommation'} try: raw_res = (yield from self._session.get(PROFILE_URL, params=params, timeout=self._timeout)) # depends on [control=['try'], data=[]] except OSError: raise PyHydroQuebecError('Can not get monthly data') # depends on [control=['except'], data=[]] try: json_output = (yield from raw_res.json(content_type='text/json')) # depends on [control=['try'], data=[]] except (OSError, json.decoder.JSONDecodeError): raise PyHydroQuebecError('Could not get monthly data') # depends on [control=['except'], data=[]] if not json_output.get('success'): raise PyHydroQuebecError('Could not get monthly data') # depends on [control=['if'], data=[]] return json_output.get('results')
def keyPressEvent(self, event): """Reimplement Qt method to allow cyclic behavior.""" if event.key() == Qt.Key_Down: self.select_row(1) elif event.key() == Qt.Key_Up: self.select_row(-1)
def function[keyPressEvent, parameter[self, event]]: constant[Reimplement Qt method to allow cyclic behavior.] if compare[call[name[event].key, parameter[]] equal[==] name[Qt].Key_Down] begin[:] call[name[self].select_row, parameter[constant[1]]]
keyword[def] identifier[keyPressEvent] ( identifier[self] , identifier[event] ): literal[string] keyword[if] identifier[event] . identifier[key] ()== identifier[Qt] . identifier[Key_Down] : identifier[self] . identifier[select_row] ( literal[int] ) keyword[elif] identifier[event] . identifier[key] ()== identifier[Qt] . identifier[Key_Up] : identifier[self] . identifier[select_row] (- literal[int] )
def keyPressEvent(self, event): """Reimplement Qt method to allow cyclic behavior.""" if event.key() == Qt.Key_Down: self.select_row(1) # depends on [control=['if'], data=[]] elif event.key() == Qt.Key_Up: self.select_row(-1) # depends on [control=['if'], data=[]]
def SCAS(cpu, dest, src): """ Scans String. Compares the byte, word, or double word specified with the memory operand with the value in the AL, AX, EAX, or RAX register, and sets the status flags according to the results. The memory operand address is read from either the ES:RDI, ES:EDI or the ES:DI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively):: IF (byte comparison) THEN temp = AL - SRC; SetStatusFlags(temp); THEN IF DF = 0 THEN (E)DI = (E)DI + 1; ELSE (E)DI = (E)DI - 1; FI; ELSE IF (word comparison) THEN temp = AX - SRC; SetStatusFlags(temp) THEN IF DF = 0 THEN (E)DI = (E)DI + 2; ELSE (E)DI = (E)DI - 2; FI; ELSE (* doubleword comparison *) temp = EAX - SRC; SetStatusFlags(temp) THEN IF DF = 0 THEN (E)DI = (E)DI + 4; ELSE (E)DI = (E)DI - 4; FI; FI; FI; :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ dest_reg = dest.reg mem_reg = src.mem.base # , src.type, src.read() size = dest.size arg0 = dest.read() arg1 = src.read() res = arg0 - arg1 cpu._calculate_CMP_flags(size, res, arg0, arg1) increment = Operators.ITEBV(cpu.address_bit_size, cpu.DF, -size // 8, size // 8) cpu.write_register(mem_reg, cpu.read_register(mem_reg) + increment)
def function[SCAS, parameter[cpu, dest, src]]: constant[ Scans String. Compares the byte, word, or double word specified with the memory operand with the value in the AL, AX, EAX, or RAX register, and sets the status flags according to the results. The memory operand address is read from either the ES:RDI, ES:EDI or the ES:DI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively):: IF (byte comparison) THEN temp = AL - SRC; SetStatusFlags(temp); THEN IF DF = 0 THEN (E)DI = (E)DI + 1; ELSE (E)DI = (E)DI - 1; FI; ELSE IF (word comparison) THEN temp = AX - SRC; SetStatusFlags(temp) THEN IF DF = 0 THEN (E)DI = (E)DI + 2; ELSE (E)DI = (E)DI - 2; FI; ELSE (* doubleword comparison *) temp = EAX - SRC; SetStatusFlags(temp) THEN IF DF = 0 THEN (E)DI = (E)DI + 4; ELSE (E)DI = (E)DI - 4; FI; FI; FI; :param cpu: current CPU. :param dest: destination operand. :param src: source operand. ] variable[dest_reg] assign[=] name[dest].reg variable[mem_reg] assign[=] name[src].mem.base variable[size] assign[=] name[dest].size variable[arg0] assign[=] call[name[dest].read, parameter[]] variable[arg1] assign[=] call[name[src].read, parameter[]] variable[res] assign[=] binary_operation[name[arg0] - name[arg1]] call[name[cpu]._calculate_CMP_flags, parameter[name[size], name[res], name[arg0], name[arg1]]] variable[increment] assign[=] call[name[Operators].ITEBV, parameter[name[cpu].address_bit_size, name[cpu].DF, binary_operation[<ast.UnaryOp object at 0x7da1b26af070> <ast.FloorDiv object at 0x7da2590d6bc0> constant[8]], binary_operation[name[size] <ast.FloorDiv object at 0x7da2590d6bc0> constant[8]]]] call[name[cpu].write_register, parameter[name[mem_reg], binary_operation[call[name[cpu].read_register, parameter[name[mem_reg]]] + name[increment]]]]
keyword[def] identifier[SCAS] ( identifier[cpu] , identifier[dest] , identifier[src] ): literal[string] identifier[dest_reg] = identifier[dest] . identifier[reg] identifier[mem_reg] = identifier[src] . identifier[mem] . identifier[base] identifier[size] = identifier[dest] . identifier[size] identifier[arg0] = identifier[dest] . identifier[read] () identifier[arg1] = identifier[src] . identifier[read] () identifier[res] = identifier[arg0] - identifier[arg1] identifier[cpu] . identifier[_calculate_CMP_flags] ( identifier[size] , identifier[res] , identifier[arg0] , identifier[arg1] ) identifier[increment] = identifier[Operators] . identifier[ITEBV] ( identifier[cpu] . identifier[address_bit_size] , identifier[cpu] . identifier[DF] ,- identifier[size] // literal[int] , identifier[size] // literal[int] ) identifier[cpu] . identifier[write_register] ( identifier[mem_reg] , identifier[cpu] . identifier[read_register] ( identifier[mem_reg] )+ identifier[increment] )
def SCAS(cpu, dest, src): """ Scans String. Compares the byte, word, or double word specified with the memory operand with the value in the AL, AX, EAX, or RAX register, and sets the status flags according to the results. The memory operand address is read from either the ES:RDI, ES:EDI or the ES:DI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively):: IF (byte comparison) THEN temp = AL - SRC; SetStatusFlags(temp); THEN IF DF = 0 THEN (E)DI = (E)DI + 1; ELSE (E)DI = (E)DI - 1; FI; ELSE IF (word comparison) THEN temp = AX - SRC; SetStatusFlags(temp) THEN IF DF = 0 THEN (E)DI = (E)DI + 2; ELSE (E)DI = (E)DI - 2; FI; ELSE (* doubleword comparison *) temp = EAX - SRC; SetStatusFlags(temp) THEN IF DF = 0 THEN (E)DI = (E)DI + 4; ELSE (E)DI = (E)DI - 4; FI; FI; FI; :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ dest_reg = dest.reg mem_reg = src.mem.base # , src.type, src.read() size = dest.size arg0 = dest.read() arg1 = src.read() res = arg0 - arg1 cpu._calculate_CMP_flags(size, res, arg0, arg1) increment = Operators.ITEBV(cpu.address_bit_size, cpu.DF, -size // 8, size // 8) cpu.write_register(mem_reg, cpu.read_register(mem_reg) + increment)
def convert_pathway_mapping(self, other_pathway_mapping): """Used to convert the pathway-to-vertex id mapping in one CoNetwork to the one used in the current CoNetwork (`self`). The following tasks are carried out in the remapping: (1) If `self.pathways` contains the pathway to be merged, map the vertex id in `other_pathway_mapping` to the vertex id in `self.pathways`. (2) If not, create a vertex in `self` and then add the key-value pair to `self.pathways` accordingly. Parameters ----------- other_pathway_mapping : dict(str -> int) the `pathways` field in the CoNetwork class. This is a (pathway -> vertex id) map. Returns ----------- dict(int -> int), the (other vertex id -> `self` vertex id) conversion map """ vertex_id_conversion = {} for pathway, vertex_id in other_pathway_mapping.items(): if pathway in self.pathways: vertex_id_conversion[vertex_id] = self.pathways[pathway] else: self_vertex_id = self.add_pathway(pathway) self.vertices[self_vertex_id] = Vertex(self_vertex_id) vertex_id_conversion[vertex_id] = self_vertex_id return vertex_id_conversion
def function[convert_pathway_mapping, parameter[self, other_pathway_mapping]]: constant[Used to convert the pathway-to-vertex id mapping in one CoNetwork to the one used in the current CoNetwork (`self`). The following tasks are carried out in the remapping: (1) If `self.pathways` contains the pathway to be merged, map the vertex id in `other_pathway_mapping` to the vertex id in `self.pathways`. (2) If not, create a vertex in `self` and then add the key-value pair to `self.pathways` accordingly. Parameters ----------- other_pathway_mapping : dict(str -> int) the `pathways` field in the CoNetwork class. This is a (pathway -> vertex id) map. Returns ----------- dict(int -> int), the (other vertex id -> `self` vertex id) conversion map ] variable[vertex_id_conversion] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da20c76fb20>, <ast.Name object at 0x7da20c76d6c0>]]] in starred[call[name[other_pathway_mapping].items, parameter[]]] begin[:] if compare[name[pathway] in name[self].pathways] begin[:] call[name[vertex_id_conversion]][name[vertex_id]] assign[=] call[name[self].pathways][name[pathway]] return[name[vertex_id_conversion]]
keyword[def] identifier[convert_pathway_mapping] ( identifier[self] , identifier[other_pathway_mapping] ): literal[string] identifier[vertex_id_conversion] ={} keyword[for] identifier[pathway] , identifier[vertex_id] keyword[in] identifier[other_pathway_mapping] . identifier[items] (): keyword[if] identifier[pathway] keyword[in] identifier[self] . identifier[pathways] : identifier[vertex_id_conversion] [ identifier[vertex_id] ]= identifier[self] . identifier[pathways] [ identifier[pathway] ] keyword[else] : identifier[self_vertex_id] = identifier[self] . identifier[add_pathway] ( identifier[pathway] ) identifier[self] . identifier[vertices] [ identifier[self_vertex_id] ]= identifier[Vertex] ( identifier[self_vertex_id] ) identifier[vertex_id_conversion] [ identifier[vertex_id] ]= identifier[self_vertex_id] keyword[return] identifier[vertex_id_conversion]
def convert_pathway_mapping(self, other_pathway_mapping): """Used to convert the pathway-to-vertex id mapping in one CoNetwork to the one used in the current CoNetwork (`self`). The following tasks are carried out in the remapping: (1) If `self.pathways` contains the pathway to be merged, map the vertex id in `other_pathway_mapping` to the vertex id in `self.pathways`. (2) If not, create a vertex in `self` and then add the key-value pair to `self.pathways` accordingly. Parameters ----------- other_pathway_mapping : dict(str -> int) the `pathways` field in the CoNetwork class. This is a (pathway -> vertex id) map. Returns ----------- dict(int -> int), the (other vertex id -> `self` vertex id) conversion map """ vertex_id_conversion = {} for (pathway, vertex_id) in other_pathway_mapping.items(): if pathway in self.pathways: vertex_id_conversion[vertex_id] = self.pathways[pathway] # depends on [control=['if'], data=['pathway']] else: self_vertex_id = self.add_pathway(pathway) self.vertices[self_vertex_id] = Vertex(self_vertex_id) vertex_id_conversion[vertex_id] = self_vertex_id # depends on [control=['for'], data=[]] return vertex_id_conversion
def dt_second(x): """Extracts the second out of a datetime samples. :returns: an expression containing the second extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.second Expression = dt_second(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 0 1 34 2 22 """ import pandas as pd return pd.Series(x).dt.second.values
def function[dt_second, parameter[x]]: constant[Extracts the second out of a datetime samples. :returns: an expression containing the second extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.second Expression = dt_second(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 0 1 34 2 22 ] import module[pandas] as alias[pd] return[call[name[pd].Series, parameter[name[x]]].dt.second.values]
keyword[def] identifier[dt_second] ( identifier[x] ): literal[string] keyword[import] identifier[pandas] keyword[as] identifier[pd] keyword[return] identifier[pd] . identifier[Series] ( identifier[x] ). identifier[dt] . identifier[second] . identifier[values]
def dt_second(x): """Extracts the second out of a datetime samples. :returns: an expression containing the second extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.second Expression = dt_second(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 0 1 34 2 22 """ import pandas as pd return pd.Series(x).dt.second.values
def parse_field(fld, selectable, aggregated=True, default_aggregation='sum'): """ Parse a field object from yaml into a sqlalchemy expression """ # An aggregation is a callable that takes a single field expression # None will perform no aggregation aggregation_lookup = { 'sum': func.sum, 'min': func.min, 'max': func.max, 'avg': func.avg, 'count': func.count, 'count_distinct': lambda fld: func.count(distinct(fld)), 'month': lambda fld: func.date_trunc('month', fld), 'week': lambda fld: func.date_trunc('week', fld), 'year': lambda fld: func.date_trunc('year', fld), 'quarter': lambda fld: func.date_trunc('quarter', fld), 'age': lambda fld: func.date_part('year', func.age(fld)), None: lambda fld: fld, } # Ensure that the dictionary contains: # { # 'value': str, # 'aggregation': str|None, # 'condition': dict|None # } if isinstance(fld, basestring): fld = { 'value': fld, } if not isinstance(fld, dict): raise BadIngredient('fields must be a string or a dict') if 'value' not in fld: raise BadIngredient('fields must contain a value') if not isinstance(fld['value'], basestring): raise BadIngredient('field value must be a string') # Ensure a condition if 'condition' in fld: if not isinstance(fld['condition'], dict) and \ not fld['condition'] is None: raise BadIngredient('condition must be null or an object') else: fld['condition'] = None # Ensure an aggregation initial_aggregation = default_aggregation if aggregated else None if 'aggregation' in fld: if not isinstance(fld['aggregation'], basestring) and \ not fld['aggregation'] is None: raise BadIngredient('aggregation must be null or an string') if fld['aggregation'] is None: fld['aggregation'] = initial_aggregation else: fld['aggregation'] = initial_aggregation value = fld.get('value', None) if value is None: raise BadIngredient('field value is not defined') field_parts = [] for word in tokenize(value): if word in ('MINUS', 'PLUS', 'DIVIDE', 'MULTIPLY'): field_parts.append(word) else: field_parts.append(find_column(selectable, word)) if len(field_parts) is None: raise BadIngredient('field is not defined.') # Fields should have an odd number of parts if len(field_parts) % 2 != 1: raise BadIngredient('field does not have the right number of parts') field = field_parts[0] if len(field_parts) > 1: # if we need to add and subtract from the field # join the field parts into pairs, for instance if field parts is # [MyTable.first, 'MINUS', MyTable.second, 'PLUS', MyTable.third] # we will get two pairs here # [('MINUS', MyTable.second), ('PLUS', MyTable.third)] for operator, other_field in zip(field_parts[1::2], field_parts[2::2]): if operator == 'PLUS': field = field.__add__(other_field) elif operator == 'MINUS': field = field.__sub__(other_field) elif operator == 'DIVIDE': field = field.__div__(other_field) elif operator == 'MULTIPLY': field = field.__mul__(other_field) else: raise BadIngredient('Unknown operator {}'.format(operator)) # Handle the aggregator aggr = fld.get('aggregation', 'sum') if aggr is not None: aggr = aggr.strip() if aggr not in aggregation_lookup: raise BadIngredient('unknown aggregation {}'.format(aggr)) aggregator = aggregation_lookup[aggr] condition = parse_condition( fld.get('condition', None), selectable, aggregated=False, default_aggregation=default_aggregation ) if condition is not None: field = case([(condition, field)]) return aggregator(field)
def function[parse_field, parameter[fld, selectable, aggregated, default_aggregation]]: constant[ Parse a field object from yaml into a sqlalchemy expression ] variable[aggregation_lookup] assign[=] dictionary[[<ast.Constant object at 0x7da1b26aee90>, <ast.Constant object at 0x7da1b26afa30>, <ast.Constant object at 0x7da1b26ad150>, <ast.Constant object at 0x7da1b26af160>, <ast.Constant object at 0x7da1b26ad0f0>, <ast.Constant object at 0x7da1b26af1f0>, <ast.Constant object at 0x7da1b26ae650>, <ast.Constant object at 0x7da1b26af010>, <ast.Constant object at 0x7da1b26ae380>, <ast.Constant object at 0x7da1b26aca90>, <ast.Constant object at 0x7da1b26ac190>, <ast.Constant object at 0x7da1b26acaf0>], [<ast.Attribute object at 0x7da1b26ae230>, <ast.Attribute object at 0x7da1b26aec20>, <ast.Attribute object at 0x7da1b26ade70>, <ast.Attribute object at 0x7da1b26ae350>, <ast.Attribute object at 0x7da1b26af3a0>, <ast.Lambda object at 0x7da1b26aef20>, <ast.Lambda object at 0x7da1b26ad1b0>, <ast.Lambda object at 0x7da1b26adfc0>, <ast.Lambda object at 0x7da1b26acb20>, <ast.Lambda object at 0x7da1b26ad600>, <ast.Lambda object at 0x7da1b26ad4e0>, <ast.Lambda object at 0x7da1b26af430>]] if call[name[isinstance], parameter[name[fld], name[basestring]]] begin[:] variable[fld] assign[=] dictionary[[<ast.Constant object at 0x7da1b26aca60>], [<ast.Name object at 0x7da1b26ad480>]] if <ast.UnaryOp object at 0x7da1b26ad210> begin[:] <ast.Raise object at 0x7da1b26ae3b0> if compare[constant[value] <ast.NotIn object at 0x7da2590d7190> name[fld]] begin[:] <ast.Raise object at 0x7da1b26ac310> if <ast.UnaryOp object at 0x7da1b26ae6b0> begin[:] <ast.Raise object at 0x7da1b26ace80> if compare[constant[condition] in name[fld]] begin[:] if <ast.BoolOp object at 0x7da1b26ac730> begin[:] <ast.Raise object at 0x7da1b26af070> variable[initial_aggregation] assign[=] <ast.IfExp object at 0x7da1b26afcd0> if compare[constant[aggregation] in name[fld]] begin[:] if <ast.BoolOp object at 0x7da1b26adff0> begin[:] <ast.Raise object at 0x7da1b1973280> if compare[call[name[fld]][constant[aggregation]] is constant[None]] begin[:] call[name[fld]][constant[aggregation]] assign[=] name[initial_aggregation] variable[value] assign[=] call[name[fld].get, parameter[constant[value], constant[None]]] if compare[name[value] is constant[None]] begin[:] <ast.Raise object at 0x7da1b1970cd0> variable[field_parts] assign[=] list[[]] for taget[name[word]] in starred[call[name[tokenize], parameter[name[value]]]] begin[:] if compare[name[word] in tuple[[<ast.Constant object at 0x7da1b1972dd0>, <ast.Constant object at 0x7da1b1972200>, <ast.Constant object at 0x7da1b19724d0>, <ast.Constant object at 0x7da1b1971a20>]]] begin[:] call[name[field_parts].append, parameter[name[word]]] if compare[call[name[len], parameter[name[field_parts]]] is constant[None]] begin[:] <ast.Raise object at 0x7da1b1971f90> if compare[binary_operation[call[name[len], parameter[name[field_parts]]] <ast.Mod object at 0x7da2590d6920> constant[2]] not_equal[!=] constant[1]] begin[:] <ast.Raise object at 0x7da1b19707f0> variable[field] assign[=] call[name[field_parts]][constant[0]] if compare[call[name[len], parameter[name[field_parts]]] greater[>] constant[1]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b1970580>, <ast.Name object at 0x7da1b19720b0>]]] in starred[call[name[zip], parameter[call[name[field_parts]][<ast.Slice object at 0x7da1b19721a0>], call[name[field_parts]][<ast.Slice object at 0x7da1b19719f0>]]]] begin[:] if compare[name[operator] equal[==] constant[PLUS]] begin[:] variable[field] assign[=] call[name[field].__add__, parameter[name[other_field]]] variable[aggr] assign[=] call[name[fld].get, parameter[constant[aggregation], constant[sum]]] if compare[name[aggr] is_not constant[None]] begin[:] variable[aggr] assign[=] call[name[aggr].strip, parameter[]] if compare[name[aggr] <ast.NotIn object at 0x7da2590d7190> name[aggregation_lookup]] begin[:] <ast.Raise object at 0x7da1b1973910> variable[aggregator] assign[=] call[name[aggregation_lookup]][name[aggr]] variable[condition] assign[=] call[name[parse_condition], parameter[call[name[fld].get, parameter[constant[condition], constant[None]]], name[selectable]]] if compare[name[condition] is_not constant[None]] begin[:] variable[field] assign[=] call[name[case], parameter[list[[<ast.Tuple object at 0x7da1b1973af0>]]]] return[call[name[aggregator], parameter[name[field]]]]
keyword[def] identifier[parse_field] ( identifier[fld] , identifier[selectable] , identifier[aggregated] = keyword[True] , identifier[default_aggregation] = literal[string] ): literal[string] identifier[aggregation_lookup] ={ literal[string] : identifier[func] . identifier[sum] , literal[string] : identifier[func] . identifier[min] , literal[string] : identifier[func] . identifier[max] , literal[string] : identifier[func] . identifier[avg] , literal[string] : identifier[func] . identifier[count] , literal[string] : keyword[lambda] identifier[fld] : identifier[func] . identifier[count] ( identifier[distinct] ( identifier[fld] )), literal[string] : keyword[lambda] identifier[fld] : identifier[func] . identifier[date_trunc] ( literal[string] , identifier[fld] ), literal[string] : keyword[lambda] identifier[fld] : identifier[func] . identifier[date_trunc] ( literal[string] , identifier[fld] ), literal[string] : keyword[lambda] identifier[fld] : identifier[func] . identifier[date_trunc] ( literal[string] , identifier[fld] ), literal[string] : keyword[lambda] identifier[fld] : identifier[func] . identifier[date_trunc] ( literal[string] , identifier[fld] ), literal[string] : keyword[lambda] identifier[fld] : identifier[func] . identifier[date_part] ( literal[string] , identifier[func] . identifier[age] ( identifier[fld] )), keyword[None] : keyword[lambda] identifier[fld] : identifier[fld] , } keyword[if] identifier[isinstance] ( identifier[fld] , identifier[basestring] ): identifier[fld] ={ literal[string] : identifier[fld] , } keyword[if] keyword[not] identifier[isinstance] ( identifier[fld] , identifier[dict] ): keyword[raise] identifier[BadIngredient] ( literal[string] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[fld] : keyword[raise] identifier[BadIngredient] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[fld] [ literal[string] ], identifier[basestring] ): keyword[raise] identifier[BadIngredient] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[fld] : keyword[if] keyword[not] identifier[isinstance] ( identifier[fld] [ literal[string] ], identifier[dict] ) keyword[and] keyword[not] identifier[fld] [ literal[string] ] keyword[is] keyword[None] : keyword[raise] identifier[BadIngredient] ( literal[string] ) keyword[else] : identifier[fld] [ literal[string] ]= keyword[None] identifier[initial_aggregation] = identifier[default_aggregation] keyword[if] identifier[aggregated] keyword[else] keyword[None] keyword[if] literal[string] keyword[in] identifier[fld] : keyword[if] keyword[not] identifier[isinstance] ( identifier[fld] [ literal[string] ], identifier[basestring] ) keyword[and] keyword[not] identifier[fld] [ literal[string] ] keyword[is] keyword[None] : keyword[raise] identifier[BadIngredient] ( literal[string] ) keyword[if] identifier[fld] [ literal[string] ] keyword[is] keyword[None] : identifier[fld] [ literal[string] ]= identifier[initial_aggregation] keyword[else] : identifier[fld] [ literal[string] ]= identifier[initial_aggregation] identifier[value] = identifier[fld] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[value] keyword[is] keyword[None] : keyword[raise] identifier[BadIngredient] ( literal[string] ) identifier[field_parts] =[] keyword[for] identifier[word] keyword[in] identifier[tokenize] ( identifier[value] ): keyword[if] identifier[word] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ): identifier[field_parts] . identifier[append] ( identifier[word] ) keyword[else] : identifier[field_parts] . identifier[append] ( identifier[find_column] ( identifier[selectable] , identifier[word] )) keyword[if] identifier[len] ( identifier[field_parts] ) keyword[is] keyword[None] : keyword[raise] identifier[BadIngredient] ( literal[string] ) keyword[if] identifier[len] ( identifier[field_parts] )% literal[int] != literal[int] : keyword[raise] identifier[BadIngredient] ( literal[string] ) identifier[field] = identifier[field_parts] [ literal[int] ] keyword[if] identifier[len] ( identifier[field_parts] )> literal[int] : keyword[for] identifier[operator] , identifier[other_field] keyword[in] identifier[zip] ( identifier[field_parts] [ literal[int] :: literal[int] ], identifier[field_parts] [ literal[int] :: literal[int] ]): keyword[if] identifier[operator] == literal[string] : identifier[field] = identifier[field] . identifier[__add__] ( identifier[other_field] ) keyword[elif] identifier[operator] == literal[string] : identifier[field] = identifier[field] . identifier[__sub__] ( identifier[other_field] ) keyword[elif] identifier[operator] == literal[string] : identifier[field] = identifier[field] . identifier[__div__] ( identifier[other_field] ) keyword[elif] identifier[operator] == literal[string] : identifier[field] = identifier[field] . identifier[__mul__] ( identifier[other_field] ) keyword[else] : keyword[raise] identifier[BadIngredient] ( literal[string] . identifier[format] ( identifier[operator] )) identifier[aggr] = identifier[fld] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[aggr] keyword[is] keyword[not] keyword[None] : identifier[aggr] = identifier[aggr] . identifier[strip] () keyword[if] identifier[aggr] keyword[not] keyword[in] identifier[aggregation_lookup] : keyword[raise] identifier[BadIngredient] ( literal[string] . identifier[format] ( identifier[aggr] )) identifier[aggregator] = identifier[aggregation_lookup] [ identifier[aggr] ] identifier[condition] = identifier[parse_condition] ( identifier[fld] . identifier[get] ( literal[string] , keyword[None] ), identifier[selectable] , identifier[aggregated] = keyword[False] , identifier[default_aggregation] = identifier[default_aggregation] ) keyword[if] identifier[condition] keyword[is] keyword[not] keyword[None] : identifier[field] = identifier[case] ([( identifier[condition] , identifier[field] )]) keyword[return] identifier[aggregator] ( identifier[field] )
def parse_field(fld, selectable, aggregated=True, default_aggregation='sum'): """ Parse a field object from yaml into a sqlalchemy expression """ # An aggregation is a callable that takes a single field expression # None will perform no aggregation aggregation_lookup = {'sum': func.sum, 'min': func.min, 'max': func.max, 'avg': func.avg, 'count': func.count, 'count_distinct': lambda fld: func.count(distinct(fld)), 'month': lambda fld: func.date_trunc('month', fld), 'week': lambda fld: func.date_trunc('week', fld), 'year': lambda fld: func.date_trunc('year', fld), 'quarter': lambda fld: func.date_trunc('quarter', fld), 'age': lambda fld: func.date_part('year', func.age(fld)), None: lambda fld: fld} # Ensure that the dictionary contains: # { # 'value': str, # 'aggregation': str|None, # 'condition': dict|None # } if isinstance(fld, basestring): fld = {'value': fld} # depends on [control=['if'], data=[]] if not isinstance(fld, dict): raise BadIngredient('fields must be a string or a dict') # depends on [control=['if'], data=[]] if 'value' not in fld: raise BadIngredient('fields must contain a value') # depends on [control=['if'], data=[]] if not isinstance(fld['value'], basestring): raise BadIngredient('field value must be a string') # depends on [control=['if'], data=[]] # Ensure a condition if 'condition' in fld: if not isinstance(fld['condition'], dict) and (not fld['condition'] is None): raise BadIngredient('condition must be null or an object') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['fld']] else: fld['condition'] = None # Ensure an aggregation initial_aggregation = default_aggregation if aggregated else None if 'aggregation' in fld: if not isinstance(fld['aggregation'], basestring) and (not fld['aggregation'] is None): raise BadIngredient('aggregation must be null or an string') # depends on [control=['if'], data=[]] if fld['aggregation'] is None: fld['aggregation'] = initial_aggregation # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['fld']] else: fld['aggregation'] = initial_aggregation value = fld.get('value', None) if value is None: raise BadIngredient('field value is not defined') # depends on [control=['if'], data=[]] field_parts = [] for word in tokenize(value): if word in ('MINUS', 'PLUS', 'DIVIDE', 'MULTIPLY'): field_parts.append(word) # depends on [control=['if'], data=['word']] else: field_parts.append(find_column(selectable, word)) # depends on [control=['for'], data=['word']] if len(field_parts) is None: raise BadIngredient('field is not defined.') # depends on [control=['if'], data=[]] # Fields should have an odd number of parts if len(field_parts) % 2 != 1: raise BadIngredient('field does not have the right number of parts') # depends on [control=['if'], data=[]] field = field_parts[0] if len(field_parts) > 1: # if we need to add and subtract from the field # join the field parts into pairs, for instance if field parts is # [MyTable.first, 'MINUS', MyTable.second, 'PLUS', MyTable.third] # we will get two pairs here # [('MINUS', MyTable.second), ('PLUS', MyTable.third)] for (operator, other_field) in zip(field_parts[1::2], field_parts[2::2]): if operator == 'PLUS': field = field.__add__(other_field) # depends on [control=['if'], data=[]] elif operator == 'MINUS': field = field.__sub__(other_field) # depends on [control=['if'], data=[]] elif operator == 'DIVIDE': field = field.__div__(other_field) # depends on [control=['if'], data=[]] elif operator == 'MULTIPLY': field = field.__mul__(other_field) # depends on [control=['if'], data=[]] else: raise BadIngredient('Unknown operator {}'.format(operator)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # Handle the aggregator aggr = fld.get('aggregation', 'sum') if aggr is not None: aggr = aggr.strip() # depends on [control=['if'], data=['aggr']] if aggr not in aggregation_lookup: raise BadIngredient('unknown aggregation {}'.format(aggr)) # depends on [control=['if'], data=['aggr']] aggregator = aggregation_lookup[aggr] condition = parse_condition(fld.get('condition', None), selectable, aggregated=False, default_aggregation=default_aggregation) if condition is not None: field = case([(condition, field)]) # depends on [control=['if'], data=['condition']] return aggregator(field)
def get_ccle_mutations(gene_list, cell_lines, mutation_type=None): """Return a dict of mutations in given genes and cell lines from CCLE. This is a specialized call to get_mutations tailored to CCLE cell lines. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site Returns ------- mutations : dict The result from cBioPortal as a dict in the format {cell_line : {gene : [mutation1, mutation2, ...] }} Example: {'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']}, 'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}} """ mutations = {cl: {g: [] for g in gene_list} for cl in cell_lines} for cell_line in cell_lines: mutations_cl = get_mutations(ccle_study, gene_list, mutation_type=mutation_type, case_id=cell_line) for gene, aa_change in zip(mutations_cl['gene_symbol'], mutations_cl['amino_acid_change']): aa_change = str(aa_change) mutations[cell_line][gene].append(aa_change) return mutations
def function[get_ccle_mutations, parameter[gene_list, cell_lines, mutation_type]]: constant[Return a dict of mutations in given genes and cell lines from CCLE. This is a specialized call to get_mutations tailored to CCLE cell lines. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site Returns ------- mutations : dict The result from cBioPortal as a dict in the format {cell_line : {gene : [mutation1, mutation2, ...] }} Example: {'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']}, 'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}} ] variable[mutations] assign[=] <ast.DictComp object at 0x7da18bccbd60> for taget[name[cell_line]] in starred[name[cell_lines]] begin[:] variable[mutations_cl] assign[=] call[name[get_mutations], parameter[name[ccle_study], name[gene_list]]] for taget[tuple[[<ast.Name object at 0x7da1b0d1afe0>, <ast.Name object at 0x7da1b0d1ab00>]]] in starred[call[name[zip], parameter[call[name[mutations_cl]][constant[gene_symbol]], call[name[mutations_cl]][constant[amino_acid_change]]]]] begin[:] variable[aa_change] assign[=] call[name[str], parameter[name[aa_change]]] call[call[call[name[mutations]][name[cell_line]]][name[gene]].append, parameter[name[aa_change]]] return[name[mutations]]
keyword[def] identifier[get_ccle_mutations] ( identifier[gene_list] , identifier[cell_lines] , identifier[mutation_type] = keyword[None] ): literal[string] identifier[mutations] ={ identifier[cl] :{ identifier[g] :[] keyword[for] identifier[g] keyword[in] identifier[gene_list] } keyword[for] identifier[cl] keyword[in] identifier[cell_lines] } keyword[for] identifier[cell_line] keyword[in] identifier[cell_lines] : identifier[mutations_cl] = identifier[get_mutations] ( identifier[ccle_study] , identifier[gene_list] , identifier[mutation_type] = identifier[mutation_type] , identifier[case_id] = identifier[cell_line] ) keyword[for] identifier[gene] , identifier[aa_change] keyword[in] identifier[zip] ( identifier[mutations_cl] [ literal[string] ], identifier[mutations_cl] [ literal[string] ]): identifier[aa_change] = identifier[str] ( identifier[aa_change] ) identifier[mutations] [ identifier[cell_line] ][ identifier[gene] ]. identifier[append] ( identifier[aa_change] ) keyword[return] identifier[mutations]
def get_ccle_mutations(gene_list, cell_lines, mutation_type=None): """Return a dict of mutations in given genes and cell lines from CCLE. This is a specialized call to get_mutations tailored to CCLE cell lines. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site Returns ------- mutations : dict The result from cBioPortal as a dict in the format {cell_line : {gene : [mutation1, mutation2, ...] }} Example: {'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']}, 'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}} """ mutations = {cl: {g: [] for g in gene_list} for cl in cell_lines} for cell_line in cell_lines: mutations_cl = get_mutations(ccle_study, gene_list, mutation_type=mutation_type, case_id=cell_line) for (gene, aa_change) in zip(mutations_cl['gene_symbol'], mutations_cl['amino_acid_change']): aa_change = str(aa_change) mutations[cell_line][gene].append(aa_change) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['cell_line']] return mutations
def p_common_scalar_magic_line(p): 'common_scalar : LINE' p[0] = ast.MagicConstant(p[1].upper(), p.lineno(1), lineno=p.lineno(1))
def function[p_common_scalar_magic_line, parameter[p]]: constant[common_scalar : LINE] call[name[p]][constant[0]] assign[=] call[name[ast].MagicConstant, parameter[call[call[name[p]][constant[1]].upper, parameter[]], call[name[p].lineno, parameter[constant[1]]]]]
keyword[def] identifier[p_common_scalar_magic_line] ( identifier[p] ): literal[string] identifier[p] [ literal[int] ]= identifier[ast] . identifier[MagicConstant] ( identifier[p] [ literal[int] ]. identifier[upper] (), identifier[p] . identifier[lineno] ( literal[int] ), identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))
def p_common_scalar_magic_line(p): """common_scalar : LINE""" p[0] = ast.MagicConstant(p[1].upper(), p.lineno(1), lineno=p.lineno(1))
def from_config(cls, config): """Instantiates an initializer from a configuration dictionary.""" return cls(**{ 'initializers': [tf.compat.v2.initializers.deserialize(init) for init in config.get('initializers', [])], 'sizes': config.get('sizes', []), 'validate_args': config.get('validate_args', False), })
def function[from_config, parameter[cls, config]]: constant[Instantiates an initializer from a configuration dictionary.] return[call[name[cls], parameter[]]]
keyword[def] identifier[from_config] ( identifier[cls] , identifier[config] ): literal[string] keyword[return] identifier[cls] (**{ literal[string] :[ identifier[tf] . identifier[compat] . identifier[v2] . identifier[initializers] . identifier[deserialize] ( identifier[init] ) keyword[for] identifier[init] keyword[in] identifier[config] . identifier[get] ( literal[string] ,[])], literal[string] : identifier[config] . identifier[get] ( literal[string] ,[]), literal[string] : identifier[config] . identifier[get] ( literal[string] , keyword[False] ), })
def from_config(cls, config): """Instantiates an initializer from a configuration dictionary.""" return cls(**{'initializers': [tf.compat.v2.initializers.deserialize(init) for init in config.get('initializers', [])], 'sizes': config.get('sizes', []), 'validate_args': config.get('validate_args', False)})
def get_geophysical_variables(ds): ''' Returns a list of variable names for the variables detected as geophysical variables. :param netCDF4.Dataset nc: An open netCDF dataset ''' parameters = [] for variable in ds.variables: if is_geophysical(ds, variable): parameters.append(variable) return parameters
def function[get_geophysical_variables, parameter[ds]]: constant[ Returns a list of variable names for the variables detected as geophysical variables. :param netCDF4.Dataset nc: An open netCDF dataset ] variable[parameters] assign[=] list[[]] for taget[name[variable]] in starred[name[ds].variables] begin[:] if call[name[is_geophysical], parameter[name[ds], name[variable]]] begin[:] call[name[parameters].append, parameter[name[variable]]] return[name[parameters]]
keyword[def] identifier[get_geophysical_variables] ( identifier[ds] ): literal[string] identifier[parameters] =[] keyword[for] identifier[variable] keyword[in] identifier[ds] . identifier[variables] : keyword[if] identifier[is_geophysical] ( identifier[ds] , identifier[variable] ): identifier[parameters] . identifier[append] ( identifier[variable] ) keyword[return] identifier[parameters]
def get_geophysical_variables(ds): """ Returns a list of variable names for the variables detected as geophysical variables. :param netCDF4.Dataset nc: An open netCDF dataset """ parameters = [] for variable in ds.variables: if is_geophysical(ds, variable): parameters.append(variable) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['variable']] return parameters
def createRepoObjects(): """Imports each 'plugin' in this package and creates a repo file from it""" repositories = {} repodir = os.path.join(getScriptLocation()) for importer, name, ispkg in pkgutil.iter_modules([repodir]): module = importer.find_module(name).load_module(name) repo_name = module.name if module.enabled: repositories[repo_name] = module.getRepository() return repositories
def function[createRepoObjects, parameter[]]: constant[Imports each 'plugin' in this package and creates a repo file from it] variable[repositories] assign[=] dictionary[[], []] variable[repodir] assign[=] call[name[os].path.join, parameter[call[name[getScriptLocation], parameter[]]]] for taget[tuple[[<ast.Name object at 0x7da18bc70a60>, <ast.Name object at 0x7da18bc71720>, <ast.Name object at 0x7da18bc73d60>]]] in starred[call[name[pkgutil].iter_modules, parameter[list[[<ast.Name object at 0x7da18bc73700>]]]]] begin[:] variable[module] assign[=] call[call[name[importer].find_module, parameter[name[name]]].load_module, parameter[name[name]]] variable[repo_name] assign[=] name[module].name if name[module].enabled begin[:] call[name[repositories]][name[repo_name]] assign[=] call[name[module].getRepository, parameter[]] return[name[repositories]]
keyword[def] identifier[createRepoObjects] (): literal[string] identifier[repositories] ={} identifier[repodir] = identifier[os] . identifier[path] . identifier[join] ( identifier[getScriptLocation] ()) keyword[for] identifier[importer] , identifier[name] , identifier[ispkg] keyword[in] identifier[pkgutil] . identifier[iter_modules] ([ identifier[repodir] ]): identifier[module] = identifier[importer] . identifier[find_module] ( identifier[name] ). identifier[load_module] ( identifier[name] ) identifier[repo_name] = identifier[module] . identifier[name] keyword[if] identifier[module] . identifier[enabled] : identifier[repositories] [ identifier[repo_name] ]= identifier[module] . identifier[getRepository] () keyword[return] identifier[repositories]
def createRepoObjects(): """Imports each 'plugin' in this package and creates a repo file from it""" repositories = {} repodir = os.path.join(getScriptLocation()) for (importer, name, ispkg) in pkgutil.iter_modules([repodir]): module = importer.find_module(name).load_module(name) repo_name = module.name if module.enabled: repositories[repo_name] = module.getRepository() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return repositories
def CreateAttachmentAndUploadMedia(self, document_link, readable_stream, options=None): """Creates an attachment and upload media. :param str document_link: The link to the document. :param (file-like stream object) readable_stream: :param dict options: The request options for the request. :return: The created Attachment. :rtype: dict """ if options is None: options = {} document_id, initial_headers, path = self._GetItemIdWithPathForAttachmentMedia(document_link, options) return self.Create(readable_stream, path, 'attachments', document_id, initial_headers, options)
def function[CreateAttachmentAndUploadMedia, parameter[self, document_link, readable_stream, options]]: constant[Creates an attachment and upload media. :param str document_link: The link to the document. :param (file-like stream object) readable_stream: :param dict options: The request options for the request. :return: The created Attachment. :rtype: dict ] if compare[name[options] is constant[None]] begin[:] variable[options] assign[=] dictionary[[], []] <ast.Tuple object at 0x7da1b18e7970> assign[=] call[name[self]._GetItemIdWithPathForAttachmentMedia, parameter[name[document_link], name[options]]] return[call[name[self].Create, parameter[name[readable_stream], name[path], constant[attachments], name[document_id], name[initial_headers], name[options]]]]
keyword[def] identifier[CreateAttachmentAndUploadMedia] ( identifier[self] , identifier[document_link] , identifier[readable_stream] , identifier[options] = keyword[None] ): literal[string] keyword[if] identifier[options] keyword[is] keyword[None] : identifier[options] ={} identifier[document_id] , identifier[initial_headers] , identifier[path] = identifier[self] . identifier[_GetItemIdWithPathForAttachmentMedia] ( identifier[document_link] , identifier[options] ) keyword[return] identifier[self] . identifier[Create] ( identifier[readable_stream] , identifier[path] , literal[string] , identifier[document_id] , identifier[initial_headers] , identifier[options] )
def CreateAttachmentAndUploadMedia(self, document_link, readable_stream, options=None): """Creates an attachment and upload media. :param str document_link: The link to the document. :param (file-like stream object) readable_stream: :param dict options: The request options for the request. :return: The created Attachment. :rtype: dict """ if options is None: options = {} # depends on [control=['if'], data=['options']] (document_id, initial_headers, path) = self._GetItemIdWithPathForAttachmentMedia(document_link, options) return self.Create(readable_stream, path, 'attachments', document_id, initial_headers, options)
def _unblast(name2vals, name_map): """Helper function to lift str -> bool maps used by aiger to the word level. Dual of the `_blast` function.""" def _collect(names): return tuple(name2vals[n] for n in names) return {bvname: _collect(names) for bvname, names in name_map}
def function[_unblast, parameter[name2vals, name_map]]: constant[Helper function to lift str -> bool maps used by aiger to the word level. Dual of the `_blast` function.] def function[_collect, parameter[names]]: return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da18f09d330>]]] return[<ast.DictComp object at 0x7da18f09d990>]
keyword[def] identifier[_unblast] ( identifier[name2vals] , identifier[name_map] ): literal[string] keyword[def] identifier[_collect] ( identifier[names] ): keyword[return] identifier[tuple] ( identifier[name2vals] [ identifier[n] ] keyword[for] identifier[n] keyword[in] identifier[names] ) keyword[return] { identifier[bvname] : identifier[_collect] ( identifier[names] ) keyword[for] identifier[bvname] , identifier[names] keyword[in] identifier[name_map] }
def _unblast(name2vals, name_map): """Helper function to lift str -> bool maps used by aiger to the word level. Dual of the `_blast` function.""" def _collect(names): return tuple((name2vals[n] for n in names)) return {bvname: _collect(names) for (bvname, names) in name_map}
def _find_cfgs(path, cfgs=None): ''' Find all buildout configs in a subdirectory. only buildout.cfg and etc/buildout.cfg are valid in:: path directory where to start to search cfg a optional list to append to . ├── buildout.cfg ├── etc │   └── buildout.cfg ├── foo │   └── buildout.cfg └── var └── buildout.cfg ''' ignored = ['var', 'parts'] dirs = [] if not cfgs: cfgs = [] for i in os.listdir(path): fi = os.path.join(path, i) if fi.endswith('.cfg') and os.path.isfile(fi): cfgs.append(fi) if os.path.isdir(fi) and (i not in ignored): dirs.append(fi) for fpath in dirs: for p, ids, ifs in salt.utils.path.os_walk(fpath): for i in ifs: if i.endswith('.cfg'): cfgs.append(os.path.join(p, i)) return cfgs
def function[_find_cfgs, parameter[path, cfgs]]: constant[ Find all buildout configs in a subdirectory. only buildout.cfg and etc/buildout.cfg are valid in:: path directory where to start to search cfg a optional list to append to . ├── buildout.cfg ├── etc │   └── buildout.cfg ├── foo │   └── buildout.cfg └── var └── buildout.cfg ] variable[ignored] assign[=] list[[<ast.Constant object at 0x7da18bc70a60>, <ast.Constant object at 0x7da18bc71840>]] variable[dirs] assign[=] list[[]] if <ast.UnaryOp object at 0x7da18bc722c0> begin[:] variable[cfgs] assign[=] list[[]] for taget[name[i]] in starred[call[name[os].listdir, parameter[name[path]]]] begin[:] variable[fi] assign[=] call[name[os].path.join, parameter[name[path], name[i]]] if <ast.BoolOp object at 0x7da18bc72da0> begin[:] call[name[cfgs].append, parameter[name[fi]]] if <ast.BoolOp object at 0x7da18bc70910> begin[:] call[name[dirs].append, parameter[name[fi]]] for taget[name[fpath]] in starred[name[dirs]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18bc72590>, <ast.Name object at 0x7da18bc70070>, <ast.Name object at 0x7da18bc721d0>]]] in starred[call[name[salt].utils.path.os_walk, parameter[name[fpath]]]] begin[:] for taget[name[i]] in starred[name[ifs]] begin[:] if call[name[i].endswith, parameter[constant[.cfg]]] begin[:] call[name[cfgs].append, parameter[call[name[os].path.join, parameter[name[p], name[i]]]]] return[name[cfgs]]
keyword[def] identifier[_find_cfgs] ( identifier[path] , identifier[cfgs] = keyword[None] ): literal[string] identifier[ignored] =[ literal[string] , literal[string] ] identifier[dirs] =[] keyword[if] keyword[not] identifier[cfgs] : identifier[cfgs] =[] keyword[for] identifier[i] keyword[in] identifier[os] . identifier[listdir] ( identifier[path] ): identifier[fi] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[i] ) keyword[if] identifier[fi] . identifier[endswith] ( literal[string] ) keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[fi] ): identifier[cfgs] . identifier[append] ( identifier[fi] ) keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[fi] ) keyword[and] ( identifier[i] keyword[not] keyword[in] identifier[ignored] ): identifier[dirs] . identifier[append] ( identifier[fi] ) keyword[for] identifier[fpath] keyword[in] identifier[dirs] : keyword[for] identifier[p] , identifier[ids] , identifier[ifs] keyword[in] identifier[salt] . identifier[utils] . identifier[path] . identifier[os_walk] ( identifier[fpath] ): keyword[for] identifier[i] keyword[in] identifier[ifs] : keyword[if] identifier[i] . identifier[endswith] ( literal[string] ): identifier[cfgs] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[p] , identifier[i] )) keyword[return] identifier[cfgs]
def _find_cfgs(path, cfgs=None): """ Find all buildout configs in a subdirectory. only buildout.cfg and etc/buildout.cfg are valid in:: path directory where to start to search cfg a optional list to append to . ├── buildout.cfg ├── etc │\xa0\xa0 └── buildout.cfg ├── foo │\xa0\xa0 └── buildout.cfg └── var └── buildout.cfg """ ignored = ['var', 'parts'] dirs = [] if not cfgs: cfgs = [] # depends on [control=['if'], data=[]] for i in os.listdir(path): fi = os.path.join(path, i) if fi.endswith('.cfg') and os.path.isfile(fi): cfgs.append(fi) # depends on [control=['if'], data=[]] if os.path.isdir(fi) and i not in ignored: dirs.append(fi) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] for fpath in dirs: for (p, ids, ifs) in salt.utils.path.os_walk(fpath): for i in ifs: if i.endswith('.cfg'): cfgs.append(os.path.join(p, i)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['fpath']] return cfgs
def convert_pooling(builder, layer, input_names, output_names, keras_layer): """ Convert pooling layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ _check_data_format(keras_layer) # Get input and output names input_name, output_name = (input_names[0], output_names[0]) # Pooling layer type if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling2D) or \ isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D): layer_type_str = 'MAX' elif isinstance(keras_layer, _keras.layers.convolutional.AveragePooling2D) or \ isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D): layer_type_str = 'AVERAGE' else: raise TypeError("Pooling type %s not supported" % keras_layer) # if it's global, set the global flag if isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D): # 2D global pooling global_pooling = True height, width = (0, 0) stride_height, stride_width = (0,0) padding_type = 'VALID' elif isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D): # 1D global pooling: 1D global pooling seems problematic in the backend, # use this work-around global_pooling = False _, width, channels = keras_layer.input_shape height = 1 stride_height, stride_width = height, width padding_type = 'VALID' else: global_pooling = False # Set pool sizes and strides # 1D cases: if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \ isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \ isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D): pool_size = keras_layer.pool_size if type(keras_layer.pool_size) is int else keras_layer.pool_size[0] height, width = 1, pool_size if keras_layer.strides is not None: strides = keras_layer.strides if type(keras_layer.strides) is int else keras_layer.strides[0] stride_height, stride_width = 1, strides else: stride_height, stride_width = 1, pool_size # 2D cases: else: height, width = keras_layer.pool_size if keras_layer.strides is None: stride_height, stride_width = height, width else: stride_height, stride_width = keras_layer.strides # Padding padding = keras_layer.padding if keras_layer.padding == 'valid': padding_type = 'VALID' elif keras_layer.padding == 'same': padding_type = 'SAME' else: raise TypeError("Border mode %s not supported" % padding) builder.add_pooling(name = layer, height = height, width = width, stride_height = stride_height, stride_width = stride_width, layer_type = layer_type_str, padding_type = padding_type, input_name = input_name, output_name = output_name, exclude_pad_area = True, is_global = global_pooling)
def function[convert_pooling, parameter[builder, layer, input_names, output_names, keras_layer]]: constant[ Convert pooling layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. ] call[name[_check_data_format], parameter[name[keras_layer]]] <ast.Tuple object at 0x7da1b2059180> assign[=] tuple[[<ast.Subscript object at 0x7da1b20581f0>, <ast.Subscript object at 0x7da1b205b190>]] if <ast.BoolOp object at 0x7da1b2059510> begin[:] variable[layer_type_str] assign[=] constant[MAX] if <ast.BoolOp object at 0x7da1b20590c0> begin[:] variable[global_pooling] assign[=] constant[True] <ast.Tuple object at 0x7da1b205ac80> assign[=] tuple[[<ast.Constant object at 0x7da1b2058430>, <ast.Constant object at 0x7da1b2058040>]] <ast.Tuple object at 0x7da1b2058490> assign[=] tuple[[<ast.Constant object at 0x7da1b205a200>, <ast.Constant object at 0x7da1b205aa40>]] variable[padding_type] assign[=] constant[VALID] call[name[builder].add_pooling, parameter[]]
keyword[def] identifier[convert_pooling] ( identifier[builder] , identifier[layer] , identifier[input_names] , identifier[output_names] , identifier[keras_layer] ): literal[string] identifier[_check_data_format] ( identifier[keras_layer] ) identifier[input_name] , identifier[output_name] =( identifier[input_names] [ literal[int] ], identifier[output_names] [ literal[int] ]) keyword[if] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[convolutional] . identifier[MaxPooling2D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[convolutional] . identifier[MaxPooling1D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[pooling] . identifier[GlobalMaxPooling2D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[pooling] . identifier[GlobalMaxPooling1D] ): identifier[layer_type_str] = literal[string] keyword[elif] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[convolutional] . identifier[AveragePooling2D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[convolutional] . identifier[AveragePooling1D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[pooling] . identifier[GlobalAveragePooling2D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[pooling] . identifier[GlobalAveragePooling1D] ): identifier[layer_type_str] = literal[string] keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] % identifier[keras_layer] ) keyword[if] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[pooling] . identifier[GlobalMaxPooling2D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[pooling] . identifier[GlobalAveragePooling2D] ): identifier[global_pooling] = keyword[True] identifier[height] , identifier[width] =( literal[int] , literal[int] ) identifier[stride_height] , identifier[stride_width] =( literal[int] , literal[int] ) identifier[padding_type] = literal[string] keyword[elif] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[pooling] . identifier[GlobalMaxPooling1D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[pooling] . identifier[GlobalAveragePooling1D] ): identifier[global_pooling] = keyword[False] identifier[_] , identifier[width] , identifier[channels] = identifier[keras_layer] . identifier[input_shape] identifier[height] = literal[int] identifier[stride_height] , identifier[stride_width] = identifier[height] , identifier[width] identifier[padding_type] = literal[string] keyword[else] : identifier[global_pooling] = keyword[False] keyword[if] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[convolutional] . identifier[MaxPooling1D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[pooling] . identifier[GlobalMaxPooling1D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[convolutional] . identifier[AveragePooling1D] ) keyword[or] identifier[isinstance] ( identifier[keras_layer] , identifier[_keras] . identifier[layers] . identifier[pooling] . identifier[GlobalAveragePooling1D] ): identifier[pool_size] = identifier[keras_layer] . identifier[pool_size] keyword[if] identifier[type] ( identifier[keras_layer] . identifier[pool_size] ) keyword[is] identifier[int] keyword[else] identifier[keras_layer] . identifier[pool_size] [ literal[int] ] identifier[height] , identifier[width] = literal[int] , identifier[pool_size] keyword[if] identifier[keras_layer] . identifier[strides] keyword[is] keyword[not] keyword[None] : identifier[strides] = identifier[keras_layer] . identifier[strides] keyword[if] identifier[type] ( identifier[keras_layer] . identifier[strides] ) keyword[is] identifier[int] keyword[else] identifier[keras_layer] . identifier[strides] [ literal[int] ] identifier[stride_height] , identifier[stride_width] = literal[int] , identifier[strides] keyword[else] : identifier[stride_height] , identifier[stride_width] = literal[int] , identifier[pool_size] keyword[else] : identifier[height] , identifier[width] = identifier[keras_layer] . identifier[pool_size] keyword[if] identifier[keras_layer] . identifier[strides] keyword[is] keyword[None] : identifier[stride_height] , identifier[stride_width] = identifier[height] , identifier[width] keyword[else] : identifier[stride_height] , identifier[stride_width] = identifier[keras_layer] . identifier[strides] identifier[padding] = identifier[keras_layer] . identifier[padding] keyword[if] identifier[keras_layer] . identifier[padding] == literal[string] : identifier[padding_type] = literal[string] keyword[elif] identifier[keras_layer] . identifier[padding] == literal[string] : identifier[padding_type] = literal[string] keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] % identifier[padding] ) identifier[builder] . identifier[add_pooling] ( identifier[name] = identifier[layer] , identifier[height] = identifier[height] , identifier[width] = identifier[width] , identifier[stride_height] = identifier[stride_height] , identifier[stride_width] = identifier[stride_width] , identifier[layer_type] = identifier[layer_type_str] , identifier[padding_type] = identifier[padding_type] , identifier[input_name] = identifier[input_name] , identifier[output_name] = identifier[output_name] , identifier[exclude_pad_area] = keyword[True] , identifier[is_global] = identifier[global_pooling] )
def convert_pooling(builder, layer, input_names, output_names, keras_layer): """ Convert pooling layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ _check_data_format(keras_layer) # Get input and output names (input_name, output_name) = (input_names[0], output_names[0]) # Pooling layer type if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling2D) or isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D): layer_type_str = 'MAX' # depends on [control=['if'], data=[]] elif isinstance(keras_layer, _keras.layers.convolutional.AveragePooling2D) or isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D) or isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D): layer_type_str = 'AVERAGE' # depends on [control=['if'], data=[]] else: raise TypeError('Pooling type %s not supported' % keras_layer) # if it's global, set the global flag if isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D): # 2D global pooling global_pooling = True (height, width) = (0, 0) (stride_height, stride_width) = (0, 0) padding_type = 'VALID' # depends on [control=['if'], data=[]] elif isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D): # 1D global pooling: 1D global pooling seems problematic in the backend, # use this work-around global_pooling = False (_, width, channels) = keras_layer.input_shape height = 1 (stride_height, stride_width) = (height, width) padding_type = 'VALID' # depends on [control=['if'], data=[]] else: global_pooling = False # Set pool sizes and strides # 1D cases: if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D): pool_size = keras_layer.pool_size if type(keras_layer.pool_size) is int else keras_layer.pool_size[0] (height, width) = (1, pool_size) if keras_layer.strides is not None: strides = keras_layer.strides if type(keras_layer.strides) is int else keras_layer.strides[0] (stride_height, stride_width) = (1, strides) # depends on [control=['if'], data=[]] else: (stride_height, stride_width) = (1, pool_size) # depends on [control=['if'], data=[]] else: # 2D cases: (height, width) = keras_layer.pool_size if keras_layer.strides is None: (stride_height, stride_width) = (height, width) # depends on [control=['if'], data=[]] else: (stride_height, stride_width) = keras_layer.strides # Padding padding = keras_layer.padding if keras_layer.padding == 'valid': padding_type = 'VALID' # depends on [control=['if'], data=[]] elif keras_layer.padding == 'same': padding_type = 'SAME' # depends on [control=['if'], data=[]] else: raise TypeError('Border mode %s not supported' % padding) builder.add_pooling(name=layer, height=height, width=width, stride_height=stride_height, stride_width=stride_width, layer_type=layer_type_str, padding_type=padding_type, input_name=input_name, output_name=output_name, exclude_pad_area=True, is_global=global_pooling)
def inFootprint(config, pixels, nside=None): """ Open each valid filename for the set of pixels and determine the set of subpixels with valid data. """ config = Config(config) nside_catalog = config['coords']['nside_catalog'] nside_likelihood = config['coords']['nside_likelihood'] nside_pixel = config['coords']['nside_pixel'] if np.isscalar(pixels): pixels = np.array([pixels]) if nside is None: nside = nside_likelihood filenames = config.getFilenames() catalog_pixels = filenames['pix'].compressed() inside = np.zeros(len(pixels), dtype=bool) if not nside_catalog: catalog_pix = [0] else: catalog_pix = superpixel(pixels,nside,nside_catalog) catalog_pix = np.intersect1d(catalog_pix,catalog_pixels) for fnames in filenames[catalog_pix]: logger.debug("Loading %s"%filenames['mask_1']) #subpix_1,val_1 = ugali.utils.skymap.readSparseHealpixMap(fnames['mask_1'],'MAGLIM',construct_map=False) _nside,subpix_1,val_1 = ugali.utils.healpix.read_partial_map(fnames['mask_1'],'MAGLIM',fullsky=False) logger.debug("Loading %s"%fnames['mask_2']) #subpix_2,val_2 = ugali.utils.skymap.readSparseHealpixMap(fnames['mask_2'],'MAGLIM',construct_map=False) _nside,subpix_2,val_2 = ugali.utils.healpix.read_partial_map(fnames['mask_2'],'MAGLIM',fullsky=False) subpix = np.intersect1d(subpix_1,subpix_2) superpix = np.unique(superpixel(subpix,nside_pixel,nside)) inside |= np.in1d(pixels, superpix) return inside
def function[inFootprint, parameter[config, pixels, nside]]: constant[ Open each valid filename for the set of pixels and determine the set of subpixels with valid data. ] variable[config] assign[=] call[name[Config], parameter[name[config]]] variable[nside_catalog] assign[=] call[call[name[config]][constant[coords]]][constant[nside_catalog]] variable[nside_likelihood] assign[=] call[call[name[config]][constant[coords]]][constant[nside_likelihood]] variable[nside_pixel] assign[=] call[call[name[config]][constant[coords]]][constant[nside_pixel]] if call[name[np].isscalar, parameter[name[pixels]]] begin[:] variable[pixels] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da18dc07be0>]]]] if compare[name[nside] is constant[None]] begin[:] variable[nside] assign[=] name[nside_likelihood] variable[filenames] assign[=] call[name[config].getFilenames, parameter[]] variable[catalog_pixels] assign[=] call[call[name[filenames]][constant[pix]].compressed, parameter[]] variable[inside] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[pixels]]]]] if <ast.UnaryOp object at 0x7da1b24e7730> begin[:] variable[catalog_pix] assign[=] list[[<ast.Constant object at 0x7da1b24e56f0>]] for taget[name[fnames]] in starred[call[name[filenames]][name[catalog_pix]]] begin[:] call[name[logger].debug, parameter[binary_operation[constant[Loading %s] <ast.Mod object at 0x7da2590d6920> call[name[filenames]][constant[mask_1]]]]] <ast.Tuple object at 0x7da20c6aa050> assign[=] call[name[ugali].utils.healpix.read_partial_map, parameter[call[name[fnames]][constant[mask_1]], constant[MAGLIM]]] call[name[logger].debug, parameter[binary_operation[constant[Loading %s] <ast.Mod object at 0x7da2590d6920> call[name[fnames]][constant[mask_2]]]]] <ast.Tuple object at 0x7da1b25d1b70> assign[=] call[name[ugali].utils.healpix.read_partial_map, parameter[call[name[fnames]][constant[mask_2]], constant[MAGLIM]]] variable[subpix] assign[=] call[name[np].intersect1d, parameter[name[subpix_1], name[subpix_2]]] variable[superpix] assign[=] call[name[np].unique, parameter[call[name[superpixel], parameter[name[subpix], name[nside_pixel], name[nside]]]]] <ast.AugAssign object at 0x7da18fe932b0> return[name[inside]]
keyword[def] identifier[inFootprint] ( identifier[config] , identifier[pixels] , identifier[nside] = keyword[None] ): literal[string] identifier[config] = identifier[Config] ( identifier[config] ) identifier[nside_catalog] = identifier[config] [ literal[string] ][ literal[string] ] identifier[nside_likelihood] = identifier[config] [ literal[string] ][ literal[string] ] identifier[nside_pixel] = identifier[config] [ literal[string] ][ literal[string] ] keyword[if] identifier[np] . identifier[isscalar] ( identifier[pixels] ): identifier[pixels] = identifier[np] . identifier[array] ([ identifier[pixels] ]) keyword[if] identifier[nside] keyword[is] keyword[None] : identifier[nside] = identifier[nside_likelihood] identifier[filenames] = identifier[config] . identifier[getFilenames] () identifier[catalog_pixels] = identifier[filenames] [ literal[string] ]. identifier[compressed] () identifier[inside] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[pixels] ), identifier[dtype] = identifier[bool] ) keyword[if] keyword[not] identifier[nside_catalog] : identifier[catalog_pix] =[ literal[int] ] keyword[else] : identifier[catalog_pix] = identifier[superpixel] ( identifier[pixels] , identifier[nside] , identifier[nside_catalog] ) identifier[catalog_pix] = identifier[np] . identifier[intersect1d] ( identifier[catalog_pix] , identifier[catalog_pixels] ) keyword[for] identifier[fnames] keyword[in] identifier[filenames] [ identifier[catalog_pix] ]: identifier[logger] . identifier[debug] ( literal[string] % identifier[filenames] [ literal[string] ]) identifier[_nside] , identifier[subpix_1] , identifier[val_1] = identifier[ugali] . identifier[utils] . identifier[healpix] . identifier[read_partial_map] ( identifier[fnames] [ literal[string] ], literal[string] , identifier[fullsky] = keyword[False] ) identifier[logger] . identifier[debug] ( literal[string] % identifier[fnames] [ literal[string] ]) identifier[_nside] , identifier[subpix_2] , identifier[val_2] = identifier[ugali] . identifier[utils] . identifier[healpix] . identifier[read_partial_map] ( identifier[fnames] [ literal[string] ], literal[string] , identifier[fullsky] = keyword[False] ) identifier[subpix] = identifier[np] . identifier[intersect1d] ( identifier[subpix_1] , identifier[subpix_2] ) identifier[superpix] = identifier[np] . identifier[unique] ( identifier[superpixel] ( identifier[subpix] , identifier[nside_pixel] , identifier[nside] )) identifier[inside] |= identifier[np] . identifier[in1d] ( identifier[pixels] , identifier[superpix] ) keyword[return] identifier[inside]
def inFootprint(config, pixels, nside=None): """ Open each valid filename for the set of pixels and determine the set of subpixels with valid data. """ config = Config(config) nside_catalog = config['coords']['nside_catalog'] nside_likelihood = config['coords']['nside_likelihood'] nside_pixel = config['coords']['nside_pixel'] if np.isscalar(pixels): pixels = np.array([pixels]) # depends on [control=['if'], data=[]] if nside is None: nside = nside_likelihood # depends on [control=['if'], data=['nside']] filenames = config.getFilenames() catalog_pixels = filenames['pix'].compressed() inside = np.zeros(len(pixels), dtype=bool) if not nside_catalog: catalog_pix = [0] # depends on [control=['if'], data=[]] else: catalog_pix = superpixel(pixels, nside, nside_catalog) catalog_pix = np.intersect1d(catalog_pix, catalog_pixels) for fnames in filenames[catalog_pix]: logger.debug('Loading %s' % filenames['mask_1']) #subpix_1,val_1 = ugali.utils.skymap.readSparseHealpixMap(fnames['mask_1'],'MAGLIM',construct_map=False) (_nside, subpix_1, val_1) = ugali.utils.healpix.read_partial_map(fnames['mask_1'], 'MAGLIM', fullsky=False) logger.debug('Loading %s' % fnames['mask_2']) #subpix_2,val_2 = ugali.utils.skymap.readSparseHealpixMap(fnames['mask_2'],'MAGLIM',construct_map=False) (_nside, subpix_2, val_2) = ugali.utils.healpix.read_partial_map(fnames['mask_2'], 'MAGLIM', fullsky=False) subpix = np.intersect1d(subpix_1, subpix_2) superpix = np.unique(superpixel(subpix, nside_pixel, nside)) inside |= np.in1d(pixels, superpix) # depends on [control=['for'], data=['fnames']] return inside
def print_matrix(self, one_vs_all=False, class_name=None): """ Print confusion matrix. :param one_vs_all : One-Vs-All mode flag :type one_vs_all : bool :param class_name : target class name for One-Vs-All mode :type class_name : any valid type :return: None """ classes = self.classes table = self.table if one_vs_all: [classes, table] = one_vs_all_func( classes, table, self.TP, self.TN, self.FP, self.FN, class_name) print(table_print(classes, table))
def function[print_matrix, parameter[self, one_vs_all, class_name]]: constant[ Print confusion matrix. :param one_vs_all : One-Vs-All mode flag :type one_vs_all : bool :param class_name : target class name for One-Vs-All mode :type class_name : any valid type :return: None ] variable[classes] assign[=] name[self].classes variable[table] assign[=] name[self].table if name[one_vs_all] begin[:] <ast.List object at 0x7da1b160b430> assign[=] call[name[one_vs_all_func], parameter[name[classes], name[table], name[self].TP, name[self].TN, name[self].FP, name[self].FN, name[class_name]]] call[name[print], parameter[call[name[table_print], parameter[name[classes], name[table]]]]]
keyword[def] identifier[print_matrix] ( identifier[self] , identifier[one_vs_all] = keyword[False] , identifier[class_name] = keyword[None] ): literal[string] identifier[classes] = identifier[self] . identifier[classes] identifier[table] = identifier[self] . identifier[table] keyword[if] identifier[one_vs_all] : [ identifier[classes] , identifier[table] ]= identifier[one_vs_all_func] ( identifier[classes] , identifier[table] , identifier[self] . identifier[TP] , identifier[self] . identifier[TN] , identifier[self] . identifier[FP] , identifier[self] . identifier[FN] , identifier[class_name] ) identifier[print] ( identifier[table_print] ( identifier[classes] , identifier[table] ))
def print_matrix(self, one_vs_all=False, class_name=None): """ Print confusion matrix. :param one_vs_all : One-Vs-All mode flag :type one_vs_all : bool :param class_name : target class name for One-Vs-All mode :type class_name : any valid type :return: None """ classes = self.classes table = self.table if one_vs_all: [classes, table] = one_vs_all_func(classes, table, self.TP, self.TN, self.FP, self.FN, class_name) # depends on [control=['if'], data=[]] print(table_print(classes, table))
def get_link(cls, source, target, topology=None): """ Find link between source and target, (or vice versa, order is irrelevant). :param source: ip or mac addresses :param target: ip or mac addresses :param topology: optional topology relation :returns: Link object :raises: LinkNotFound """ a = source b = target # ensure parameters are coherent if not (valid_ipv4(a) and valid_ipv4(b)) and not (valid_ipv6(a) and valid_ipv6(b)) and not (valid_mac(a) and valid_mac(b)): raise ValueError('Expecting valid ipv4, ipv6 or mac address') # get interfaces a = cls._get_link_interface(a) b = cls._get_link_interface(b) # raise LinkDataNotFound if an interface is not found not_found = [] if a is None: not_found.append(source) if b is None: not_found.append(target) if not_found: msg = 'the following interfaces could not be found: {0}'.format(', '.join(not_found)) raise LinkDataNotFound(msg) # find link with interfaces # inverse order is also ok q = (Q(interface_a=a, interface_b=b) | Q(interface_a=b, interface_b=a)) # add topology to lookup if topology: q = q & Q(topology=topology) link = Link.objects.filter(q).first() if link is None: raise LinkNotFound('Link matching query does not exist', interface_a=a, interface_b=b, topology=topology) return link
def function[get_link, parameter[cls, source, target, topology]]: constant[ Find link between source and target, (or vice versa, order is irrelevant). :param source: ip or mac addresses :param target: ip or mac addresses :param topology: optional topology relation :returns: Link object :raises: LinkNotFound ] variable[a] assign[=] name[source] variable[b] assign[=] name[target] if <ast.BoolOp object at 0x7da1b26afa90> begin[:] <ast.Raise object at 0x7da1b26aeb00> variable[a] assign[=] call[name[cls]._get_link_interface, parameter[name[a]]] variable[b] assign[=] call[name[cls]._get_link_interface, parameter[name[b]]] variable[not_found] assign[=] list[[]] if compare[name[a] is constant[None]] begin[:] call[name[not_found].append, parameter[name[source]]] if compare[name[b] is constant[None]] begin[:] call[name[not_found].append, parameter[name[target]]] if name[not_found] begin[:] variable[msg] assign[=] call[constant[the following interfaces could not be found: {0}].format, parameter[call[constant[, ].join, parameter[name[not_found]]]]] <ast.Raise object at 0x7da1b26aec50> variable[q] assign[=] binary_operation[call[name[Q], parameter[]] <ast.BitOr object at 0x7da2590d6aa0> call[name[Q], parameter[]]] if name[topology] begin[:] variable[q] assign[=] binary_operation[name[q] <ast.BitAnd object at 0x7da2590d6b60> call[name[Q], parameter[]]] variable[link] assign[=] call[call[name[Link].objects.filter, parameter[name[q]]].first, parameter[]] if compare[name[link] is constant[None]] begin[:] <ast.Raise object at 0x7da1b26aedd0> return[name[link]]
keyword[def] identifier[get_link] ( identifier[cls] , identifier[source] , identifier[target] , identifier[topology] = keyword[None] ): literal[string] identifier[a] = identifier[source] identifier[b] = identifier[target] keyword[if] keyword[not] ( identifier[valid_ipv4] ( identifier[a] ) keyword[and] identifier[valid_ipv4] ( identifier[b] )) keyword[and] keyword[not] ( identifier[valid_ipv6] ( identifier[a] ) keyword[and] identifier[valid_ipv6] ( identifier[b] )) keyword[and] keyword[not] ( identifier[valid_mac] ( identifier[a] ) keyword[and] identifier[valid_mac] ( identifier[b] )): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[a] = identifier[cls] . identifier[_get_link_interface] ( identifier[a] ) identifier[b] = identifier[cls] . identifier[_get_link_interface] ( identifier[b] ) identifier[not_found] =[] keyword[if] identifier[a] keyword[is] keyword[None] : identifier[not_found] . identifier[append] ( identifier[source] ) keyword[if] identifier[b] keyword[is] keyword[None] : identifier[not_found] . identifier[append] ( identifier[target] ) keyword[if] identifier[not_found] : identifier[msg] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[not_found] )) keyword[raise] identifier[LinkDataNotFound] ( identifier[msg] ) identifier[q] =( identifier[Q] ( identifier[interface_a] = identifier[a] , identifier[interface_b] = identifier[b] )| identifier[Q] ( identifier[interface_a] = identifier[b] , identifier[interface_b] = identifier[a] )) keyword[if] identifier[topology] : identifier[q] = identifier[q] & identifier[Q] ( identifier[topology] = identifier[topology] ) identifier[link] = identifier[Link] . identifier[objects] . identifier[filter] ( identifier[q] ). identifier[first] () keyword[if] identifier[link] keyword[is] keyword[None] : keyword[raise] identifier[LinkNotFound] ( literal[string] , identifier[interface_a] = identifier[a] , identifier[interface_b] = identifier[b] , identifier[topology] = identifier[topology] ) keyword[return] identifier[link]
def get_link(cls, source, target, topology=None): """ Find link between source and target, (or vice versa, order is irrelevant). :param source: ip or mac addresses :param target: ip or mac addresses :param topology: optional topology relation :returns: Link object :raises: LinkNotFound """ a = source b = target # ensure parameters are coherent if not (valid_ipv4(a) and valid_ipv4(b)) and (not (valid_ipv6(a) and valid_ipv6(b))) and (not (valid_mac(a) and valid_mac(b))): raise ValueError('Expecting valid ipv4, ipv6 or mac address') # depends on [control=['if'], data=[]] # get interfaces a = cls._get_link_interface(a) b = cls._get_link_interface(b) # raise LinkDataNotFound if an interface is not found not_found = [] if a is None: not_found.append(source) # depends on [control=['if'], data=[]] if b is None: not_found.append(target) # depends on [control=['if'], data=[]] if not_found: msg = 'the following interfaces could not be found: {0}'.format(', '.join(not_found)) raise LinkDataNotFound(msg) # depends on [control=['if'], data=[]] # find link with interfaces # inverse order is also ok q = Q(interface_a=a, interface_b=b) | Q(interface_a=b, interface_b=a) # add topology to lookup if topology: q = q & Q(topology=topology) # depends on [control=['if'], data=[]] link = Link.objects.filter(q).first() if link is None: raise LinkNotFound('Link matching query does not exist', interface_a=a, interface_b=b, topology=topology) # depends on [control=['if'], data=[]] return link
def enqueue_global(self, message: Message): """ Helper to enqueue a message in the global queue (e.g. Delivered) """ self.enqueue( queue_identifier=QueueIdentifier( recipient=self.receiver, channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE, ), message=message, )
def function[enqueue_global, parameter[self, message]]: constant[ Helper to enqueue a message in the global queue (e.g. Delivered) ] call[name[self].enqueue, parameter[]]
keyword[def] identifier[enqueue_global] ( identifier[self] , identifier[message] : identifier[Message] ): literal[string] identifier[self] . identifier[enqueue] ( identifier[queue_identifier] = identifier[QueueIdentifier] ( identifier[recipient] = identifier[self] . identifier[receiver] , identifier[channel_identifier] = identifier[CHANNEL_IDENTIFIER_GLOBAL_QUEUE] , ), identifier[message] = identifier[message] , )
def enqueue_global(self, message: Message): """ Helper to enqueue a message in the global queue (e.g. Delivered) """ self.enqueue(queue_identifier=QueueIdentifier(recipient=self.receiver, channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE), message=message)
def set_ytick_labels_for_all(self, row_column_list=None, labels=None): """Manually specify the x-axis tick labels. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None :param labels: list of tick labels. """ if row_column_list is None: for subplot in self.subplots: self.set_ytick_labels(subplot.row, subplot.column, labels) else: for row, column in row_column_list: self.set_ytick_labels(row, column, labels)
def function[set_ytick_labels_for_all, parameter[self, row_column_list, labels]]: constant[Manually specify the x-axis tick labels. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None :param labels: list of tick labels. ] if compare[name[row_column_list] is constant[None]] begin[:] for taget[name[subplot]] in starred[name[self].subplots] begin[:] call[name[self].set_ytick_labels, parameter[name[subplot].row, name[subplot].column, name[labels]]]
keyword[def] identifier[set_ytick_labels_for_all] ( identifier[self] , identifier[row_column_list] = keyword[None] , identifier[labels] = keyword[None] ): literal[string] keyword[if] identifier[row_column_list] keyword[is] keyword[None] : keyword[for] identifier[subplot] keyword[in] identifier[self] . identifier[subplots] : identifier[self] . identifier[set_ytick_labels] ( identifier[subplot] . identifier[row] , identifier[subplot] . identifier[column] , identifier[labels] ) keyword[else] : keyword[for] identifier[row] , identifier[column] keyword[in] identifier[row_column_list] : identifier[self] . identifier[set_ytick_labels] ( identifier[row] , identifier[column] , identifier[labels] )
def set_ytick_labels_for_all(self, row_column_list=None, labels=None): """Manually specify the x-axis tick labels. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None :param labels: list of tick labels. """ if row_column_list is None: for subplot in self.subplots: self.set_ytick_labels(subplot.row, subplot.column, labels) # depends on [control=['for'], data=['subplot']] # depends on [control=['if'], data=[]] else: for (row, column) in row_column_list: self.set_ytick_labels(row, column, labels) # depends on [control=['for'], data=[]]
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): """A better wrapper over request for deferred signing""" if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def function[fetch2, parameter[self, path, api, method, params, headers, body]]: constant[A better wrapper over request for deferred signing] if name[self].enableRateLimit begin[:] call[name[self].throttle, parameter[]] name[self].lastRestRequestTimestamp assign[=] call[name[self].milliseconds, parameter[]] variable[request] assign[=] call[name[self].sign, parameter[name[path], name[api], name[method], name[params], name[headers], name[body]]] return[call[name[self].fetch, parameter[call[name[request]][constant[url]], call[name[request]][constant[method]], call[name[request]][constant[headers]], call[name[request]][constant[body]]]]]
keyword[def] identifier[fetch2] ( identifier[self] , identifier[path] , identifier[api] = literal[string] , identifier[method] = literal[string] , identifier[params] ={}, identifier[headers] = keyword[None] , identifier[body] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[enableRateLimit] : identifier[self] . identifier[throttle] () identifier[self] . identifier[lastRestRequestTimestamp] = identifier[self] . identifier[milliseconds] () identifier[request] = identifier[self] . identifier[sign] ( identifier[path] , identifier[api] , identifier[method] , identifier[params] , identifier[headers] , identifier[body] ) keyword[return] identifier[self] . identifier[fetch] ( identifier[request] [ literal[string] ], identifier[request] [ literal[string] ], identifier[request] [ literal[string] ], identifier[request] [ literal[string] ])
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): """A better wrapper over request for deferred signing""" if self.enableRateLimit: self.throttle() # depends on [control=['if'], data=[]] self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def get_channel(self, name): """ Get a channel by name. To get the names, use get_channels. :param string name: Name of channel to get :returns dict conn: A channel attribute dictionary. """ name = quote(name, '') path = Client.urls['channels_by_name'] % name chan = self._call(path, 'GET') return chan
def function[get_channel, parameter[self, name]]: constant[ Get a channel by name. To get the names, use get_channels. :param string name: Name of channel to get :returns dict conn: A channel attribute dictionary. ] variable[name] assign[=] call[name[quote], parameter[name[name], constant[]]] variable[path] assign[=] binary_operation[call[name[Client].urls][constant[channels_by_name]] <ast.Mod object at 0x7da2590d6920> name[name]] variable[chan] assign[=] call[name[self]._call, parameter[name[path], constant[GET]]] return[name[chan]]
keyword[def] identifier[get_channel] ( identifier[self] , identifier[name] ): literal[string] identifier[name] = identifier[quote] ( identifier[name] , literal[string] ) identifier[path] = identifier[Client] . identifier[urls] [ literal[string] ]% identifier[name] identifier[chan] = identifier[self] . identifier[_call] ( identifier[path] , literal[string] ) keyword[return] identifier[chan]
def get_channel(self, name): """ Get a channel by name. To get the names, use get_channels. :param string name: Name of channel to get :returns dict conn: A channel attribute dictionary. """ name = quote(name, '') path = Client.urls['channels_by_name'] % name chan = self._call(path, 'GET') return chan
def run_command(self, config_file): """ :param str config_file: The name of config file. """ config = configparser.ConfigParser() config.read(config_file) rdbms = config.get('database', 'rdbms').lower() label_regex = config.get('constants', 'label_regex') constants = self.create_constants(rdbms) constants.main(config_file, label_regex)
def function[run_command, parameter[self, config_file]]: constant[ :param str config_file: The name of config file. ] variable[config] assign[=] call[name[configparser].ConfigParser, parameter[]] call[name[config].read, parameter[name[config_file]]] variable[rdbms] assign[=] call[call[name[config].get, parameter[constant[database], constant[rdbms]]].lower, parameter[]] variable[label_regex] assign[=] call[name[config].get, parameter[constant[constants], constant[label_regex]]] variable[constants] assign[=] call[name[self].create_constants, parameter[name[rdbms]]] call[name[constants].main, parameter[name[config_file], name[label_regex]]]
keyword[def] identifier[run_command] ( identifier[self] , identifier[config_file] ): literal[string] identifier[config] = identifier[configparser] . identifier[ConfigParser] () identifier[config] . identifier[read] ( identifier[config_file] ) identifier[rdbms] = identifier[config] . identifier[get] ( literal[string] , literal[string] ). identifier[lower] () identifier[label_regex] = identifier[config] . identifier[get] ( literal[string] , literal[string] ) identifier[constants] = identifier[self] . identifier[create_constants] ( identifier[rdbms] ) identifier[constants] . identifier[main] ( identifier[config_file] , identifier[label_regex] )
def run_command(self, config_file): """ :param str config_file: The name of config file. """ config = configparser.ConfigParser() config.read(config_file) rdbms = config.get('database', 'rdbms').lower() label_regex = config.get('constants', 'label_regex') constants = self.create_constants(rdbms) constants.main(config_file, label_regex)
def remove_slug(path): """ Return the remainin part of the path >>> remove_slug('/test/some/function/') test/some """ if path.endswith('/'): path = path[:-1] if path.startswith('/'): path = path[1:] if "/" not in path or not path: return None parts = path.split("/")[:-1] return "/".join(parts)
def function[remove_slug, parameter[path]]: constant[ Return the remainin part of the path >>> remove_slug('/test/some/function/') test/some ] if call[name[path].endswith, parameter[constant[/]]] begin[:] variable[path] assign[=] call[name[path]][<ast.Slice object at 0x7da18f58fcd0>] if call[name[path].startswith, parameter[constant[/]]] begin[:] variable[path] assign[=] call[name[path]][<ast.Slice object at 0x7da18f58ed10>] if <ast.BoolOp object at 0x7da2041db8b0> begin[:] return[constant[None]] variable[parts] assign[=] call[call[name[path].split, parameter[constant[/]]]][<ast.Slice object at 0x7da2041db940>] return[call[constant[/].join, parameter[name[parts]]]]
keyword[def] identifier[remove_slug] ( identifier[path] ): literal[string] keyword[if] identifier[path] . identifier[endswith] ( literal[string] ): identifier[path] = identifier[path] [:- literal[int] ] keyword[if] identifier[path] . identifier[startswith] ( literal[string] ): identifier[path] = identifier[path] [ literal[int] :] keyword[if] literal[string] keyword[not] keyword[in] identifier[path] keyword[or] keyword[not] identifier[path] : keyword[return] keyword[None] identifier[parts] = identifier[path] . identifier[split] ( literal[string] )[:- literal[int] ] keyword[return] literal[string] . identifier[join] ( identifier[parts] )
def remove_slug(path): """ Return the remainin part of the path >>> remove_slug('/test/some/function/') test/some """ if path.endswith('/'): path = path[:-1] # depends on [control=['if'], data=[]] if path.startswith('/'): path = path[1:] # depends on [control=['if'], data=[]] if '/' not in path or not path: return None # depends on [control=['if'], data=[]] parts = path.split('/')[:-1] return '/'.join(parts)
def execution_errors(self): """ Return a list of commands that encountered execution errors, with the error. Each dictionary entry gives the command dictionary and the error dictionary :return: list of commands that gave errors, with their error information """ if self.split_actions: # throttling split this action, get errors from the split return [dict(e) for s in self.split_actions for e in s.errors] else: return [dict(e) for e in self.errors]
def function[execution_errors, parameter[self]]: constant[ Return a list of commands that encountered execution errors, with the error. Each dictionary entry gives the command dictionary and the error dictionary :return: list of commands that gave errors, with their error information ] if name[self].split_actions begin[:] return[<ast.ListComp object at 0x7da18eb56170>]
keyword[def] identifier[execution_errors] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[split_actions] : keyword[return] [ identifier[dict] ( identifier[e] ) keyword[for] identifier[s] keyword[in] identifier[self] . identifier[split_actions] keyword[for] identifier[e] keyword[in] identifier[s] . identifier[errors] ] keyword[else] : keyword[return] [ identifier[dict] ( identifier[e] ) keyword[for] identifier[e] keyword[in] identifier[self] . identifier[errors] ]
def execution_errors(self): """ Return a list of commands that encountered execution errors, with the error. Each dictionary entry gives the command dictionary and the error dictionary :return: list of commands that gave errors, with their error information """ if self.split_actions: # throttling split this action, get errors from the split return [dict(e) for s in self.split_actions for e in s.errors] # depends on [control=['if'], data=[]] else: return [dict(e) for e in self.errors]
def write_all_sequences_file(self, outname, outdir=None): """Write all the stored sequences as a single FASTA file. By default, sets IDs to model gene IDs. Args: outname (str): Name of the output FASTA file without the extension outdir (str): Path to output directory for the file, default is the sequences directory """ if not outdir: outdir = self.sequence_dir if not outdir: raise ValueError('Output directory must be specified') outfile = op.join(outdir, outname + '.faa') SeqIO.write(self.sequences, outfile, "fasta") log.info('{}: wrote all protein sequences to file'.format(outfile)) return outfile
def function[write_all_sequences_file, parameter[self, outname, outdir]]: constant[Write all the stored sequences as a single FASTA file. By default, sets IDs to model gene IDs. Args: outname (str): Name of the output FASTA file without the extension outdir (str): Path to output directory for the file, default is the sequences directory ] if <ast.UnaryOp object at 0x7da2046237c0> begin[:] variable[outdir] assign[=] name[self].sequence_dir if <ast.UnaryOp object at 0x7da204621270> begin[:] <ast.Raise object at 0x7da204622e60> variable[outfile] assign[=] call[name[op].join, parameter[name[outdir], binary_operation[name[outname] + constant[.faa]]]] call[name[SeqIO].write, parameter[name[self].sequences, name[outfile], constant[fasta]]] call[name[log].info, parameter[call[constant[{}: wrote all protein sequences to file].format, parameter[name[outfile]]]]] return[name[outfile]]
keyword[def] identifier[write_all_sequences_file] ( identifier[self] , identifier[outname] , identifier[outdir] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[outdir] : identifier[outdir] = identifier[self] . identifier[sequence_dir] keyword[if] keyword[not] identifier[outdir] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[outfile] = identifier[op] . identifier[join] ( identifier[outdir] , identifier[outname] + literal[string] ) identifier[SeqIO] . identifier[write] ( identifier[self] . identifier[sequences] , identifier[outfile] , literal[string] ) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[outfile] )) keyword[return] identifier[outfile]
def write_all_sequences_file(self, outname, outdir=None): """Write all the stored sequences as a single FASTA file. By default, sets IDs to model gene IDs. Args: outname (str): Name of the output FASTA file without the extension outdir (str): Path to output directory for the file, default is the sequences directory """ if not outdir: outdir = self.sequence_dir if not outdir: raise ValueError('Output directory must be specified') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] outfile = op.join(outdir, outname + '.faa') SeqIO.write(self.sequences, outfile, 'fasta') log.info('{}: wrote all protein sequences to file'.format(outfile)) return outfile
def pop( self, *args, **kwargs): """ Removes and returns item at specified index (default= ``last``). Supports both ``list`` and ``dict`` semantics for ``pop()``. If passed no argument or an integer argument, it will use ``list`` semantics and pop tokens from the list of parsed tokens. If passed a non-integer argument (most likely a string), it will use ``dict`` semantics and pop the corresponding value from any defined results names. A second default return value argument is supported, just as in ``dict.pop()``. Example:: def remove_first(tokens): tokens.pop(0) print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] label = Word(alphas) patt = label("LABEL") + OneOrMore(Word(nums)) print(patt.parseString("AAB 123 321").dump()) # Use pop() in a parse action to remove named result (note that corresponding value is not # removed from list form of results) def remove_LABEL(tokens): tokens.pop("LABEL") return tokens patt.addParseAction(remove_LABEL) print(patt.parseString("AAB 123 321").dump()) prints:: ['AAB', '123', '321'] - LABEL: AAB ['AAB', '123', '321'] """ if not args: args = [-1] for k,v in kwargs.items(): if k == 'default': args = (args[0], v) else: raise TypeError("pop() got an unexpected keyword argument '%s'" % k) if (isinstance(args[0], int) or len(args) == 1 or args[0] in self): index = args[0] ret = self[index] del self[index] return ret else: defaultvalue = args[1] return defaultvalue
def function[pop, parameter[self]]: constant[ Removes and returns item at specified index (default= ``last``). Supports both ``list`` and ``dict`` semantics for ``pop()``. If passed no argument or an integer argument, it will use ``list`` semantics and pop tokens from the list of parsed tokens. If passed a non-integer argument (most likely a string), it will use ``dict`` semantics and pop the corresponding value from any defined results names. A second default return value argument is supported, just as in ``dict.pop()``. Example:: def remove_first(tokens): tokens.pop(0) print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] label = Word(alphas) patt = label("LABEL") + OneOrMore(Word(nums)) print(patt.parseString("AAB 123 321").dump()) # Use pop() in a parse action to remove named result (note that corresponding value is not # removed from list form of results) def remove_LABEL(tokens): tokens.pop("LABEL") return tokens patt.addParseAction(remove_LABEL) print(patt.parseString("AAB 123 321").dump()) prints:: ['AAB', '123', '321'] - LABEL: AAB ['AAB', '123', '321'] ] if <ast.UnaryOp object at 0x7da20c6c6350> begin[:] variable[args] assign[=] list[[<ast.UnaryOp object at 0x7da20c6c7d90>]] for taget[tuple[[<ast.Name object at 0x7da20c6c4070>, <ast.Name object at 0x7da20c6c6710>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:] if compare[name[k] equal[==] constant[default]] begin[:] variable[args] assign[=] tuple[[<ast.Subscript object at 0x7da20c6c5030>, <ast.Name object at 0x7da20c6c46d0>]] if <ast.BoolOp object at 0x7da20c6c6860> begin[:] variable[index] assign[=] call[name[args]][constant[0]] variable[ret] assign[=] call[name[self]][name[index]] <ast.Delete object at 0x7da18bcc9780> return[name[ret]]
keyword[def] identifier[pop] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[args] : identifier[args] =[- literal[int] ] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] (): keyword[if] identifier[k] == literal[string] : identifier[args] =( identifier[args] [ literal[int] ], identifier[v] ) keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] % identifier[k] ) keyword[if] ( identifier[isinstance] ( identifier[args] [ literal[int] ], identifier[int] ) keyword[or] identifier[len] ( identifier[args] )== literal[int] keyword[or] identifier[args] [ literal[int] ] keyword[in] identifier[self] ): identifier[index] = identifier[args] [ literal[int] ] identifier[ret] = identifier[self] [ identifier[index] ] keyword[del] identifier[self] [ identifier[index] ] keyword[return] identifier[ret] keyword[else] : identifier[defaultvalue] = identifier[args] [ literal[int] ] keyword[return] identifier[defaultvalue]
def pop(self, *args, **kwargs): """ Removes and returns item at specified index (default= ``last``). Supports both ``list`` and ``dict`` semantics for ``pop()``. If passed no argument or an integer argument, it will use ``list`` semantics and pop tokens from the list of parsed tokens. If passed a non-integer argument (most likely a string), it will use ``dict`` semantics and pop the corresponding value from any defined results names. A second default return value argument is supported, just as in ``dict.pop()``. Example:: def remove_first(tokens): tokens.pop(0) print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] label = Word(alphas) patt = label("LABEL") + OneOrMore(Word(nums)) print(patt.parseString("AAB 123 321").dump()) # Use pop() in a parse action to remove named result (note that corresponding value is not # removed from list form of results) def remove_LABEL(tokens): tokens.pop("LABEL") return tokens patt.addParseAction(remove_LABEL) print(patt.parseString("AAB 123 321").dump()) prints:: ['AAB', '123', '321'] - LABEL: AAB ['AAB', '123', '321'] """ if not args: args = [-1] # depends on [control=['if'], data=[]] for (k, v) in kwargs.items(): if k == 'default': args = (args[0], v) # depends on [control=['if'], data=[]] else: raise TypeError("pop() got an unexpected keyword argument '%s'" % k) # depends on [control=['for'], data=[]] if isinstance(args[0], int) or len(args) == 1 or args[0] in self: index = args[0] ret = self[index] del self[index] return ret # depends on [control=['if'], data=[]] else: defaultvalue = args[1] return defaultvalue
def verify_transaction(signed_hextx, op_return_value): """ Verify OP_RETURN field in transaction :param signed_hextx: :param op_return_value: :return: """ logging.info('verifying op_return value for transaction') op_return_hash = signed_hextx[-72:-8] result = (op_return_value == op_return_hash) if not result: error_message = 'There was a problem verifying the transaction' raise UnverifiedTransactionError(error_message) logging.info('verified OP_RETURN')
def function[verify_transaction, parameter[signed_hextx, op_return_value]]: constant[ Verify OP_RETURN field in transaction :param signed_hextx: :param op_return_value: :return: ] call[name[logging].info, parameter[constant[verifying op_return value for transaction]]] variable[op_return_hash] assign[=] call[name[signed_hextx]][<ast.Slice object at 0x7da20c6c6d10>] variable[result] assign[=] compare[name[op_return_value] equal[==] name[op_return_hash]] if <ast.UnaryOp object at 0x7da20c6c5720> begin[:] variable[error_message] assign[=] constant[There was a problem verifying the transaction] <ast.Raise object at 0x7da20c9907c0> call[name[logging].info, parameter[constant[verified OP_RETURN]]]
keyword[def] identifier[verify_transaction] ( identifier[signed_hextx] , identifier[op_return_value] ): literal[string] identifier[logging] . identifier[info] ( literal[string] ) identifier[op_return_hash] = identifier[signed_hextx] [- literal[int] :- literal[int] ] identifier[result] =( identifier[op_return_value] == identifier[op_return_hash] ) keyword[if] keyword[not] identifier[result] : identifier[error_message] = literal[string] keyword[raise] identifier[UnverifiedTransactionError] ( identifier[error_message] ) identifier[logging] . identifier[info] ( literal[string] )
def verify_transaction(signed_hextx, op_return_value): """ Verify OP_RETURN field in transaction :param signed_hextx: :param op_return_value: :return: """ logging.info('verifying op_return value for transaction') op_return_hash = signed_hextx[-72:-8] result = op_return_value == op_return_hash if not result: error_message = 'There was a problem verifying the transaction' raise UnverifiedTransactionError(error_message) # depends on [control=['if'], data=[]] logging.info('verified OP_RETURN')
def lcopt_bw2_setup(ecospold_path, overwrite=False, db_name=None): # pragma: no cover """ Utility function to set up brightway2 to work correctly with lcopt. It requires the path to the ecospold files containing the Ecoinvent 3.3 cutoff database. If you don't have these files, log into `ecoinvent.org <http://www.ecoinvent.org/login-databases.html>`_ and go to the Files tab Download the file called ``ecoinvent 3.3_cutoff_ecoSpold02.7z`` Extract the file somewhere sensible on your machine, you might need to download `7-zip <http://www.7-zip.org/download.html>`_ to extract the files. Make a note of the path of the folder that contains the .ecospold files, its probably ``<path/extracted/to>/datasets/`` Use this path (as a string) as the first parameter in this function To overwrite an existing version, set overwrite=True """ default_ei_name = "Ecoinvent3_3_cutoff" if db_name is None: db_name = DEFAULT_PROJECT_STEM + default_ei_name if db_name in bw2.projects: if overwrite: bw2.projects.delete_project(name=db_name, delete_dir=True) else: print('Looks like bw2 is already set up - if you want to overwrite the existing version run lcopt.utils.lcopt_bw2_setup in a python shell using overwrite = True') return False bw2.projects.set_current(db_name) bw2.bw2setup() ei = bw2.SingleOutputEcospold2Importer(fix_mac_path_escapes(ecospold_path), default_ei_name) ei.apply_strategies() ei.statistics() ei.write_database() return True
def function[lcopt_bw2_setup, parameter[ecospold_path, overwrite, db_name]]: constant[ Utility function to set up brightway2 to work correctly with lcopt. It requires the path to the ecospold files containing the Ecoinvent 3.3 cutoff database. If you don't have these files, log into `ecoinvent.org <http://www.ecoinvent.org/login-databases.html>`_ and go to the Files tab Download the file called ``ecoinvent 3.3_cutoff_ecoSpold02.7z`` Extract the file somewhere sensible on your machine, you might need to download `7-zip <http://www.7-zip.org/download.html>`_ to extract the files. Make a note of the path of the folder that contains the .ecospold files, its probably ``<path/extracted/to>/datasets/`` Use this path (as a string) as the first parameter in this function To overwrite an existing version, set overwrite=True ] variable[default_ei_name] assign[=] constant[Ecoinvent3_3_cutoff] if compare[name[db_name] is constant[None]] begin[:] variable[db_name] assign[=] binary_operation[name[DEFAULT_PROJECT_STEM] + name[default_ei_name]] if compare[name[db_name] in name[bw2].projects] begin[:] if name[overwrite] begin[:] call[name[bw2].projects.delete_project, parameter[]] call[name[bw2].projects.set_current, parameter[name[db_name]]] call[name[bw2].bw2setup, parameter[]] variable[ei] assign[=] call[name[bw2].SingleOutputEcospold2Importer, parameter[call[name[fix_mac_path_escapes], parameter[name[ecospold_path]]], name[default_ei_name]]] call[name[ei].apply_strategies, parameter[]] call[name[ei].statistics, parameter[]] call[name[ei].write_database, parameter[]] return[constant[True]]
keyword[def] identifier[lcopt_bw2_setup] ( identifier[ecospold_path] , identifier[overwrite] = keyword[False] , identifier[db_name] = keyword[None] ): literal[string] identifier[default_ei_name] = literal[string] keyword[if] identifier[db_name] keyword[is] keyword[None] : identifier[db_name] = identifier[DEFAULT_PROJECT_STEM] + identifier[default_ei_name] keyword[if] identifier[db_name] keyword[in] identifier[bw2] . identifier[projects] : keyword[if] identifier[overwrite] : identifier[bw2] . identifier[projects] . identifier[delete_project] ( identifier[name] = identifier[db_name] , identifier[delete_dir] = keyword[True] ) keyword[else] : identifier[print] ( literal[string] ) keyword[return] keyword[False] identifier[bw2] . identifier[projects] . identifier[set_current] ( identifier[db_name] ) identifier[bw2] . identifier[bw2setup] () identifier[ei] = identifier[bw2] . identifier[SingleOutputEcospold2Importer] ( identifier[fix_mac_path_escapes] ( identifier[ecospold_path] ), identifier[default_ei_name] ) identifier[ei] . identifier[apply_strategies] () identifier[ei] . identifier[statistics] () identifier[ei] . identifier[write_database] () keyword[return] keyword[True]
def lcopt_bw2_setup(ecospold_path, overwrite=False, db_name=None): # pragma: no cover "\n Utility function to set up brightway2 to work correctly with lcopt.\n\n It requires the path to the ecospold files containing the Ecoinvent 3.3 cutoff database.\n\n If you don't have these files, log into `ecoinvent.org <http://www.ecoinvent.org/login-databases.html>`_ and go to the Files tab\n\n Download the file called ``ecoinvent 3.3_cutoff_ecoSpold02.7z``\n\n Extract the file somewhere sensible on your machine, you might need to download `7-zip <http://www.7-zip.org/download.html>`_ to extract the files.\n\n Make a note of the path of the folder that contains the .ecospold files, its probably ``<path/extracted/to>/datasets/``\n\n Use this path (as a string) as the first parameter in this function\n\n To overwrite an existing version, set overwrite=True\n " default_ei_name = 'Ecoinvent3_3_cutoff' if db_name is None: db_name = DEFAULT_PROJECT_STEM + default_ei_name # depends on [control=['if'], data=['db_name']] if db_name in bw2.projects: if overwrite: bw2.projects.delete_project(name=db_name, delete_dir=True) # depends on [control=['if'], data=[]] else: print('Looks like bw2 is already set up - if you want to overwrite the existing version run lcopt.utils.lcopt_bw2_setup in a python shell using overwrite = True') return False # depends on [control=['if'], data=['db_name']] bw2.projects.set_current(db_name) bw2.bw2setup() ei = bw2.SingleOutputEcospold2Importer(fix_mac_path_escapes(ecospold_path), default_ei_name) ei.apply_strategies() ei.statistics() ei.write_database() return True
def complexity_fd_petrosian(signal): """ Computes the Petrosian Fractal Dimension of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann. Parameters ---------- signal : list or array List or array of values. Returns ---------- fd_petrosian : float The Petrosian FD as float value. Example ---------- >>> import neurokit as nk >>> >>> signal = np.sin(np.log(np.random.sample(666))) >>> fd_petrosian = nk.complexity_fd_petrosian(signal, 1, 2) Notes ---------- *Details* - **Petrosian Fractal Dimension**: Provide a fast computation of the FD of a signal by translating the series into a binary sequence. *Authors* - Quentin Geissmann (https://github.com/qgeissmann) *Dependencies* - numpy *See Also* - pyrem package: https://github.com/gilestrolab/pyrem """ diff = np.diff(signal) # x[i] * x[i-1] for i in t0 -> tmax prod = diff[1:-1] * diff[0:-2] # Number of sign changes in derivative of the signal N_delta = np.sum(prod < 0) n = len(signal) fd_petrosian = np.log(n)/(np.log(n)+np.log(n/(n+0.4*N_delta))) return(fd_petrosian)
def function[complexity_fd_petrosian, parameter[signal]]: constant[ Computes the Petrosian Fractal Dimension of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann. Parameters ---------- signal : list or array List or array of values. Returns ---------- fd_petrosian : float The Petrosian FD as float value. Example ---------- >>> import neurokit as nk >>> >>> signal = np.sin(np.log(np.random.sample(666))) >>> fd_petrosian = nk.complexity_fd_petrosian(signal, 1, 2) Notes ---------- *Details* - **Petrosian Fractal Dimension**: Provide a fast computation of the FD of a signal by translating the series into a binary sequence. *Authors* - Quentin Geissmann (https://github.com/qgeissmann) *Dependencies* - numpy *See Also* - pyrem package: https://github.com/gilestrolab/pyrem ] variable[diff] assign[=] call[name[np].diff, parameter[name[signal]]] variable[prod] assign[=] binary_operation[call[name[diff]][<ast.Slice object at 0x7da20cabc370>] * call[name[diff]][<ast.Slice object at 0x7da1b08e5f00>]] variable[N_delta] assign[=] call[name[np].sum, parameter[compare[name[prod] less[<] constant[0]]]] variable[n] assign[=] call[name[len], parameter[name[signal]]] variable[fd_petrosian] assign[=] binary_operation[call[name[np].log, parameter[name[n]]] / binary_operation[call[name[np].log, parameter[name[n]]] + call[name[np].log, parameter[binary_operation[name[n] / binary_operation[name[n] + binary_operation[constant[0.4] * name[N_delta]]]]]]]] return[name[fd_petrosian]]
keyword[def] identifier[complexity_fd_petrosian] ( identifier[signal] ): literal[string] identifier[diff] = identifier[np] . identifier[diff] ( identifier[signal] ) identifier[prod] = identifier[diff] [ literal[int] :- literal[int] ]* identifier[diff] [ literal[int] :- literal[int] ] identifier[N_delta] = identifier[np] . identifier[sum] ( identifier[prod] < literal[int] ) identifier[n] = identifier[len] ( identifier[signal] ) identifier[fd_petrosian] = identifier[np] . identifier[log] ( identifier[n] )/( identifier[np] . identifier[log] ( identifier[n] )+ identifier[np] . identifier[log] ( identifier[n] /( identifier[n] + literal[int] * identifier[N_delta] ))) keyword[return] ( identifier[fd_petrosian] )
def complexity_fd_petrosian(signal): """ Computes the Petrosian Fractal Dimension of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann. Parameters ---------- signal : list or array List or array of values. Returns ---------- fd_petrosian : float The Petrosian FD as float value. Example ---------- >>> import neurokit as nk >>> >>> signal = np.sin(np.log(np.random.sample(666))) >>> fd_petrosian = nk.complexity_fd_petrosian(signal, 1, 2) Notes ---------- *Details* - **Petrosian Fractal Dimension**: Provide a fast computation of the FD of a signal by translating the series into a binary sequence. *Authors* - Quentin Geissmann (https://github.com/qgeissmann) *Dependencies* - numpy *See Also* - pyrem package: https://github.com/gilestrolab/pyrem """ diff = np.diff(signal) # x[i] * x[i-1] for i in t0 -> tmax prod = diff[1:-1] * diff[0:-2] # Number of sign changes in derivative of the signal N_delta = np.sum(prod < 0) n = len(signal) fd_petrosian = np.log(n) / (np.log(n) + np.log(n / (n + 0.4 * N_delta))) return fd_petrosian
def split_args(args): """ Split a list of argument strings into a dictionary where each key is an argument name. An argument looks like ``crop``, ``crop="some option"`` or ``crop=my_var``. Arguments which provide no value get a value of ``True``. """ args_dict = {} for arg in args: split_arg = arg.split('=', 1) if len(split_arg) > 1: value = split_arg[1] else: value = True args_dict[split_arg[0]] = value return args_dict
def function[split_args, parameter[args]]: constant[ Split a list of argument strings into a dictionary where each key is an argument name. An argument looks like ``crop``, ``crop="some option"`` or ``crop=my_var``. Arguments which provide no value get a value of ``True``. ] variable[args_dict] assign[=] dictionary[[], []] for taget[name[arg]] in starred[name[args]] begin[:] variable[split_arg] assign[=] call[name[arg].split, parameter[constant[=], constant[1]]] if compare[call[name[len], parameter[name[split_arg]]] greater[>] constant[1]] begin[:] variable[value] assign[=] call[name[split_arg]][constant[1]] call[name[args_dict]][call[name[split_arg]][constant[0]]] assign[=] name[value] return[name[args_dict]]
keyword[def] identifier[split_args] ( identifier[args] ): literal[string] identifier[args_dict] ={} keyword[for] identifier[arg] keyword[in] identifier[args] : identifier[split_arg] = identifier[arg] . identifier[split] ( literal[string] , literal[int] ) keyword[if] identifier[len] ( identifier[split_arg] )> literal[int] : identifier[value] = identifier[split_arg] [ literal[int] ] keyword[else] : identifier[value] = keyword[True] identifier[args_dict] [ identifier[split_arg] [ literal[int] ]]= identifier[value] keyword[return] identifier[args_dict]
def split_args(args): """ Split a list of argument strings into a dictionary where each key is an argument name. An argument looks like ``crop``, ``crop="some option"`` or ``crop=my_var``. Arguments which provide no value get a value of ``True``. """ args_dict = {} for arg in args: split_arg = arg.split('=', 1) if len(split_arg) > 1: value = split_arg[1] # depends on [control=['if'], data=[]] else: value = True args_dict[split_arg[0]] = value # depends on [control=['for'], data=['arg']] return args_dict
def first_where(pred, iterable, default=None): """Returns the first element in an iterable that meets the given predicate. :param default: is the default value to use if the predicate matches none of the elements. """ return next(six.moves.filter(pred, iterable), default)
def function[first_where, parameter[pred, iterable, default]]: constant[Returns the first element in an iterable that meets the given predicate. :param default: is the default value to use if the predicate matches none of the elements. ] return[call[name[next], parameter[call[name[six].moves.filter, parameter[name[pred], name[iterable]]], name[default]]]]
keyword[def] identifier[first_where] ( identifier[pred] , identifier[iterable] , identifier[default] = keyword[None] ): literal[string] keyword[return] identifier[next] ( identifier[six] . identifier[moves] . identifier[filter] ( identifier[pred] , identifier[iterable] ), identifier[default] )
def first_where(pred, iterable, default=None): """Returns the first element in an iterable that meets the given predicate. :param default: is the default value to use if the predicate matches none of the elements. """ return next(six.moves.filter(pred, iterable), default)
def _get_feed_cache(self): """If a recent cache exists, return it, else return None""" feed_cache = None if os.path.exists(self._feed_cache_file): maxage = datetime.now() - timedelta(minutes=self._cachetime) file_ts = datetime.fromtimestamp(os.stat(self._feed_cache_file).st_mtime) if file_ts > maxage: try: with open(self._feed_cache_file, 'rb') as cache: feed_cache = cache.read() finally: pass return feed_cache
def function[_get_feed_cache, parameter[self]]: constant[If a recent cache exists, return it, else return None] variable[feed_cache] assign[=] constant[None] if call[name[os].path.exists, parameter[name[self]._feed_cache_file]] begin[:] variable[maxage] assign[=] binary_operation[call[name[datetime].now, parameter[]] - call[name[timedelta], parameter[]]] variable[file_ts] assign[=] call[name[datetime].fromtimestamp, parameter[call[name[os].stat, parameter[name[self]._feed_cache_file]].st_mtime]] if compare[name[file_ts] greater[>] name[maxage]] begin[:] <ast.Try object at 0x7da207f9aec0> return[name[feed_cache]]
keyword[def] identifier[_get_feed_cache] ( identifier[self] ): literal[string] identifier[feed_cache] = keyword[None] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[_feed_cache_file] ): identifier[maxage] = identifier[datetime] . identifier[now] ()- identifier[timedelta] ( identifier[minutes] = identifier[self] . identifier[_cachetime] ) identifier[file_ts] = identifier[datetime] . identifier[fromtimestamp] ( identifier[os] . identifier[stat] ( identifier[self] . identifier[_feed_cache_file] ). identifier[st_mtime] ) keyword[if] identifier[file_ts] > identifier[maxage] : keyword[try] : keyword[with] identifier[open] ( identifier[self] . identifier[_feed_cache_file] , literal[string] ) keyword[as] identifier[cache] : identifier[feed_cache] = identifier[cache] . identifier[read] () keyword[finally] : keyword[pass] keyword[return] identifier[feed_cache]
def _get_feed_cache(self): """If a recent cache exists, return it, else return None""" feed_cache = None if os.path.exists(self._feed_cache_file): maxage = datetime.now() - timedelta(minutes=self._cachetime) file_ts = datetime.fromtimestamp(os.stat(self._feed_cache_file).st_mtime) if file_ts > maxage: try: with open(self._feed_cache_file, 'rb') as cache: feed_cache = cache.read() # depends on [control=['with'], data=['cache']] # depends on [control=['try'], data=[]] finally: pass # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return feed_cache
def kernels_list(self, **kwargs): # noqa: E501 """List kernels # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.kernels_list(async_req=True) >>> result = thread.get() :param async_req bool :param int page: Page number :param int page_size: Page size :param str search: Search terms :param str group: Display only your kernels :param str user: Display kernels by a particular group :param str language: Display kernels in a specific language :param str kernel_type: Display kernels of a specific type :param str output_type: Display kernels with a specific output type :param str sort_by: Sort the results. 'relevance' only works if there is a search query :param str dataset: Display kernels using the specified dataset :param str competition: Display kernels using the specified competition :param str parent_kernel: Display kernels that have forked the specified kernel :return: Result If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.kernels_list_with_http_info(**kwargs) # noqa: E501 else: (data) = self.kernels_list_with_http_info(**kwargs) # noqa: E501 return data
def function[kernels_list, parameter[self]]: constant[List kernels # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.kernels_list(async_req=True) >>> result = thread.get() :param async_req bool :param int page: Page number :param int page_size: Page size :param str search: Search terms :param str group: Display only your kernels :param str user: Display kernels by a particular group :param str language: Display kernels in a specific language :param str kernel_type: Display kernels of a specific type :param str output_type: Display kernels with a specific output type :param str sort_by: Sort the results. 'relevance' only works if there is a search query :param str dataset: Display kernels using the specified dataset :param str competition: Display kernels using the specified competition :param str parent_kernel: Display kernels that have forked the specified kernel :return: Result If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async_req]]] begin[:] return[call[name[self].kernels_list_with_http_info, parameter[]]]
keyword[def] identifier[kernels_list] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[kernels_list_with_http_info] (** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[kernels_list_with_http_info] (** identifier[kwargs] ) keyword[return] identifier[data]
def kernels_list(self, **kwargs): # noqa: E501 "List kernels # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.kernels_list(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int page: Page number\n :param int page_size: Page size\n :param str search: Search terms\n :param str group: Display only your kernels\n :param str user: Display kernels by a particular group\n :param str language: Display kernels in a specific language\n :param str kernel_type: Display kernels of a specific type\n :param str output_type: Display kernels with a specific output type\n :param str sort_by: Sort the results. 'relevance' only works if there is a search query\n :param str dataset: Display kernels using the specified dataset\n :param str competition: Display kernels using the specified competition\n :param str parent_kernel: Display kernels that have forked the specified kernel\n :return: Result\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.kernels_list_with_http_info(**kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.kernels_list_with_http_info(**kwargs) # noqa: E501 return data
def get(self, ids, **kwargs): """ Method to get environments vip by their ids :param ids: List containing identifiers of environments vip :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing environments vip """ uri = build_uri_with_ids('api/v3/environment-vip/%s/', ids) return super(ApiEnvironmentVip, self).get( self.prepare_url(uri, kwargs))
def function[get, parameter[self, ids]]: constant[ Method to get environments vip by their ids :param ids: List containing identifiers of environments vip :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing environments vip ] variable[uri] assign[=] call[name[build_uri_with_ids], parameter[constant[api/v3/environment-vip/%s/], name[ids]]] return[call[call[name[super], parameter[name[ApiEnvironmentVip], name[self]]].get, parameter[call[name[self].prepare_url, parameter[name[uri], name[kwargs]]]]]]
keyword[def] identifier[get] ( identifier[self] , identifier[ids] ,** identifier[kwargs] ): literal[string] identifier[uri] = identifier[build_uri_with_ids] ( literal[string] , identifier[ids] ) keyword[return] identifier[super] ( identifier[ApiEnvironmentVip] , identifier[self] ). identifier[get] ( identifier[self] . identifier[prepare_url] ( identifier[uri] , identifier[kwargs] ))
def get(self, ids, **kwargs): """ Method to get environments vip by their ids :param ids: List containing identifiers of environments vip :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing environments vip """ uri = build_uri_with_ids('api/v3/environment-vip/%s/', ids) return super(ApiEnvironmentVip, self).get(self.prepare_url(uri, kwargs))
def grant_client(self, client_id, publish=False, subscribe=False, publish_protocol=None, publish_topics=None, subscribe_topics=None, scope_prefix='predix-event-hub', **kwargs): """ Grant the given client id all the scopes and authorities needed to work with the eventhub service. """ scopes = ['openid'] authorities = ['uaa.resource'] zone_id = self.get_zone_id() # always must be part of base user scope scopes.append('%s.zones.%s.user' % (scope_prefix, zone_id)) authorities.append('%s.zones.%s.user' % (scope_prefix, zone_id)) if publish_topics is not None or subscribe_topics is not None: raise Exception("multiple topics are not currently available in preidx-py") if publish_topics is None: publish_topics = ['topic'] if subscribe_topics is None: subscribe_topics = ['topic'] if publish: # we are granting just the default topic if publish_protocol is None: scopes.append('%s.zones.%s.grpc.publish' % (scope_prefix, zone_id)) authorities.append('%s.zones.%s.grpc.publish' % (scope_prefix, zone_id)) scopes.append('%s.zones.%s.wss.publish' % (scope_prefix, zone_id)) authorities.append('%s.zones.%s.wss.publish' % (scope_prefix, zone_id)) else: scopes.append('%s.zones.%s.%s.publish' % (scope_prefix, zone_id, publish_protocol)) authorities.append('%s.zones.%s.%s.publish' % (scope_prefix, zone_id, publish_protocol)) # we are requesting multiple topics for topic in publish_topics: if publish_protocol is None: scopes.append('%s.zones.%s.%s.grpc.publish' % (scope_prefix, zone_id, topic)) scopes.append('%s.zones.%s.%s.wss.publish' % (scope_prefix, zone_id, topic)) scopes.append('%s.zones.%s.%s.user' % (scope_prefix, zone_id, topic)) authorities.append('%s.zones.%s.%s.grpc.publish' % (scope_prefix, zone_id, topic)) authorities.append('%s.zones.%s.%s.wss.publish' % (scope_prefix, zone_id, topic)) authorities.append('%s.zones.%s.%s.user' % (scope_prefix, zone_id, topic)) else: scopes.append('%s.zones.%s.%s.%s.publish' % (scope_prefix, zone_id, topic, publish_protocol)) authorities.append('%s.zones.%s.%s.%s.publish' % (scope_prefix, zone_id, topic, publish_protocol)) if subscribe: # we are granting just the default topic scopes.append('%s.zones.%s.grpc.subscribe' % (scope_prefix, zone_id)) authorities.append('%s.zones.%s.grpc.subscribe' % (scope_prefix, zone_id)) # we are requesting multiple topics for topic in subscribe_topics: scopes.append('%s.zones.%s.%s.grpc.subscribe' % (scope_prefix, zone_id, topic)) authorities.append('%s.zones.%s.%s.grpc.subscribe' % (scope_prefix, zone_id, topic)) self.service.uaa.uaac.update_client_grants(client_id, scope=scopes, authorities=authorities) return self.service.uaa.uaac.get_client(client_id)
def function[grant_client, parameter[self, client_id, publish, subscribe, publish_protocol, publish_topics, subscribe_topics, scope_prefix]]: constant[ Grant the given client id all the scopes and authorities needed to work with the eventhub service. ] variable[scopes] assign[=] list[[<ast.Constant object at 0x7da20e9637f0>]] variable[authorities] assign[=] list[[<ast.Constant object at 0x7da20e961e10>]] variable[zone_id] assign[=] call[name[self].get_zone_id, parameter[]] call[name[scopes].append, parameter[binary_operation[constant[%s.zones.%s.user] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e963700>, <ast.Name object at 0x7da20e961d20>]]]]] call[name[authorities].append, parameter[binary_operation[constant[%s.zones.%s.user] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e962ce0>, <ast.Name object at 0x7da20e961060>]]]]] if <ast.BoolOp object at 0x7da20e963e20> begin[:] <ast.Raise object at 0x7da20e960fd0> if compare[name[publish_topics] is constant[None]] begin[:] variable[publish_topics] assign[=] list[[<ast.Constant object at 0x7da20e960ca0>]] if compare[name[subscribe_topics] is constant[None]] begin[:] variable[subscribe_topics] assign[=] list[[<ast.Constant object at 0x7da20e962680>]] if name[publish] begin[:] if compare[name[publish_protocol] is constant[None]] begin[:] call[name[scopes].append, parameter[binary_operation[constant[%s.zones.%s.grpc.publish] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e963ac0>, <ast.Name object at 0x7da20e963ee0>]]]]] call[name[authorities].append, parameter[binary_operation[constant[%s.zones.%s.grpc.publish] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e963160>, <ast.Name object at 0x7da20e9623e0>]]]]] call[name[scopes].append, parameter[binary_operation[constant[%s.zones.%s.wss.publish] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e960af0>, <ast.Name object at 0x7da20e9610f0>]]]]] call[name[authorities].append, parameter[binary_operation[constant[%s.zones.%s.wss.publish] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e962f80>, <ast.Name object at 0x7da20e961c30>]]]]] for taget[name[topic]] in starred[name[publish_topics]] begin[:] if compare[name[publish_protocol] is constant[None]] begin[:] call[name[scopes].append, parameter[binary_operation[constant[%s.zones.%s.%s.grpc.publish] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f812f50>, <ast.Name object at 0x7da18f8102b0>, <ast.Name object at 0x7da18f8114b0>]]]]] call[name[scopes].append, parameter[binary_operation[constant[%s.zones.%s.%s.wss.publish] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f810b80>, <ast.Name object at 0x7da18f8121d0>, <ast.Name object at 0x7da18f811ba0>]]]]] call[name[scopes].append, parameter[binary_operation[constant[%s.zones.%s.%s.user] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f811450>, <ast.Name object at 0x7da18f811120>, <ast.Name object at 0x7da18f813b80>]]]]] call[name[authorities].append, parameter[binary_operation[constant[%s.zones.%s.%s.grpc.publish] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f812bf0>, <ast.Name object at 0x7da18f810190>, <ast.Name object at 0x7da18f8139a0>]]]]] call[name[authorities].append, parameter[binary_operation[constant[%s.zones.%s.%s.wss.publish] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f813dc0>, <ast.Name object at 0x7da18f8105e0>, <ast.Name object at 0x7da18f813a60>]]]]] call[name[authorities].append, parameter[binary_operation[constant[%s.zones.%s.%s.user] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f810fa0>, <ast.Name object at 0x7da18f811b40>, <ast.Name object at 0x7da18f8135b0>]]]]] if name[subscribe] begin[:] call[name[scopes].append, parameter[binary_operation[constant[%s.zones.%s.grpc.subscribe] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f812d10>, <ast.Name object at 0x7da18f8101c0>]]]]] call[name[authorities].append, parameter[binary_operation[constant[%s.zones.%s.grpc.subscribe] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f812c80>, <ast.Name object at 0x7da18f812650>]]]]] for taget[name[topic]] in starred[name[subscribe_topics]] begin[:] call[name[scopes].append, parameter[binary_operation[constant[%s.zones.%s.%s.grpc.subscribe] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f811cf0>, <ast.Name object at 0x7da18f810c40>, <ast.Name object at 0x7da18f810d90>]]]]] call[name[authorities].append, parameter[binary_operation[constant[%s.zones.%s.%s.grpc.subscribe] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f811840>, <ast.Name object at 0x7da18f812230>, <ast.Name object at 0x7da18f8130d0>]]]]] call[name[self].service.uaa.uaac.update_client_grants, parameter[name[client_id]]] return[call[name[self].service.uaa.uaac.get_client, parameter[name[client_id]]]]
keyword[def] identifier[grant_client] ( identifier[self] , identifier[client_id] , identifier[publish] = keyword[False] , identifier[subscribe] = keyword[False] , identifier[publish_protocol] = keyword[None] , identifier[publish_topics] = keyword[None] , identifier[subscribe_topics] = keyword[None] , identifier[scope_prefix] = literal[string] ,** identifier[kwargs] ): literal[string] identifier[scopes] =[ literal[string] ] identifier[authorities] =[ literal[string] ] identifier[zone_id] = identifier[self] . identifier[get_zone_id] () identifier[scopes] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] )) identifier[authorities] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] )) keyword[if] identifier[publish_topics] keyword[is] keyword[not] keyword[None] keyword[or] identifier[subscribe_topics] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] identifier[publish_topics] keyword[is] keyword[None] : identifier[publish_topics] =[ literal[string] ] keyword[if] identifier[subscribe_topics] keyword[is] keyword[None] : identifier[subscribe_topics] =[ literal[string] ] keyword[if] identifier[publish] : keyword[if] identifier[publish_protocol] keyword[is] keyword[None] : identifier[scopes] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] )) identifier[authorities] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] )) identifier[scopes] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] )) identifier[authorities] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] )) keyword[else] : identifier[scopes] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[publish_protocol] )) identifier[authorities] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[publish_protocol] )) keyword[for] identifier[topic] keyword[in] identifier[publish_topics] : keyword[if] identifier[publish_protocol] keyword[is] keyword[None] : identifier[scopes] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[topic] )) identifier[scopes] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[topic] )) identifier[scopes] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[topic] )) identifier[authorities] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[topic] )) identifier[authorities] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[topic] )) identifier[authorities] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[topic] )) keyword[else] : identifier[scopes] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[topic] , identifier[publish_protocol] )) identifier[authorities] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[topic] , identifier[publish_protocol] )) keyword[if] identifier[subscribe] : identifier[scopes] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] )) identifier[authorities] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] )) keyword[for] identifier[topic] keyword[in] identifier[subscribe_topics] : identifier[scopes] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[topic] )) identifier[authorities] . identifier[append] ( literal[string] %( identifier[scope_prefix] , identifier[zone_id] , identifier[topic] )) identifier[self] . identifier[service] . identifier[uaa] . identifier[uaac] . identifier[update_client_grants] ( identifier[client_id] , identifier[scope] = identifier[scopes] , identifier[authorities] = identifier[authorities] ) keyword[return] identifier[self] . identifier[service] . identifier[uaa] . identifier[uaac] . identifier[get_client] ( identifier[client_id] )
def grant_client(self, client_id, publish=False, subscribe=False, publish_protocol=None, publish_topics=None, subscribe_topics=None, scope_prefix='predix-event-hub', **kwargs): """ Grant the given client id all the scopes and authorities needed to work with the eventhub service. """ scopes = ['openid'] authorities = ['uaa.resource'] zone_id = self.get_zone_id() # always must be part of base user scope scopes.append('%s.zones.%s.user' % (scope_prefix, zone_id)) authorities.append('%s.zones.%s.user' % (scope_prefix, zone_id)) if publish_topics is not None or subscribe_topics is not None: raise Exception('multiple topics are not currently available in preidx-py') # depends on [control=['if'], data=[]] if publish_topics is None: publish_topics = ['topic'] # depends on [control=['if'], data=['publish_topics']] if subscribe_topics is None: subscribe_topics = ['topic'] # depends on [control=['if'], data=['subscribe_topics']] if publish: # we are granting just the default topic if publish_protocol is None: scopes.append('%s.zones.%s.grpc.publish' % (scope_prefix, zone_id)) authorities.append('%s.zones.%s.grpc.publish' % (scope_prefix, zone_id)) scopes.append('%s.zones.%s.wss.publish' % (scope_prefix, zone_id)) authorities.append('%s.zones.%s.wss.publish' % (scope_prefix, zone_id)) # depends on [control=['if'], data=[]] else: scopes.append('%s.zones.%s.%s.publish' % (scope_prefix, zone_id, publish_protocol)) authorities.append('%s.zones.%s.%s.publish' % (scope_prefix, zone_id, publish_protocol)) # we are requesting multiple topics for topic in publish_topics: if publish_protocol is None: scopes.append('%s.zones.%s.%s.grpc.publish' % (scope_prefix, zone_id, topic)) scopes.append('%s.zones.%s.%s.wss.publish' % (scope_prefix, zone_id, topic)) scopes.append('%s.zones.%s.%s.user' % (scope_prefix, zone_id, topic)) authorities.append('%s.zones.%s.%s.grpc.publish' % (scope_prefix, zone_id, topic)) authorities.append('%s.zones.%s.%s.wss.publish' % (scope_prefix, zone_id, topic)) authorities.append('%s.zones.%s.%s.user' % (scope_prefix, zone_id, topic)) # depends on [control=['if'], data=[]] else: scopes.append('%s.zones.%s.%s.%s.publish' % (scope_prefix, zone_id, topic, publish_protocol)) authorities.append('%s.zones.%s.%s.%s.publish' % (scope_prefix, zone_id, topic, publish_protocol)) # depends on [control=['for'], data=['topic']] # depends on [control=['if'], data=[]] if subscribe: # we are granting just the default topic scopes.append('%s.zones.%s.grpc.subscribe' % (scope_prefix, zone_id)) authorities.append('%s.zones.%s.grpc.subscribe' % (scope_prefix, zone_id)) # we are requesting multiple topics for topic in subscribe_topics: scopes.append('%s.zones.%s.%s.grpc.subscribe' % (scope_prefix, zone_id, topic)) authorities.append('%s.zones.%s.%s.grpc.subscribe' % (scope_prefix, zone_id, topic)) # depends on [control=['for'], data=['topic']] # depends on [control=['if'], data=[]] self.service.uaa.uaac.update_client_grants(client_id, scope=scopes, authorities=authorities) return self.service.uaa.uaac.get_client(client_id)
def _diff(self, dir1, dir2): """ Private function which only does directory diff """ self._dcmp = self._compare(dir1, dir2) if self._dcmp.left_only: self.log('Only in %s' % dir1) for x in sorted(self._dcmp.left_only): self.log('>> %s' % x) self.log('') if self._dcmp.right_only: self.log('Only in %s' % dir2) for x in sorted(self._dcmp.right_only): self.log('<< %s' % x) self.log('') if self._dcmp.common: self.log('Common to %s and %s' % (self._dir1, self._dir2)) for x in sorted(self._dcmp.common): self.log('-- %s' % x) else: self.log('No common files or sub-directories!')
def function[_diff, parameter[self, dir1, dir2]]: constant[ Private function which only does directory diff ] name[self]._dcmp assign[=] call[name[self]._compare, parameter[name[dir1], name[dir2]]] if name[self]._dcmp.left_only begin[:] call[name[self].log, parameter[binary_operation[constant[Only in %s] <ast.Mod object at 0x7da2590d6920> name[dir1]]]] for taget[name[x]] in starred[call[name[sorted], parameter[name[self]._dcmp.left_only]]] begin[:] call[name[self].log, parameter[binary_operation[constant[>> %s] <ast.Mod object at 0x7da2590d6920> name[x]]]] call[name[self].log, parameter[constant[]]] if name[self]._dcmp.right_only begin[:] call[name[self].log, parameter[binary_operation[constant[Only in %s] <ast.Mod object at 0x7da2590d6920> name[dir2]]]] for taget[name[x]] in starred[call[name[sorted], parameter[name[self]._dcmp.right_only]]] begin[:] call[name[self].log, parameter[binary_operation[constant[<< %s] <ast.Mod object at 0x7da2590d6920> name[x]]]] call[name[self].log, parameter[constant[]]] if name[self]._dcmp.common begin[:] call[name[self].log, parameter[binary_operation[constant[Common to %s and %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b253ce80>, <ast.Attribute object at 0x7da1b253ea70>]]]]] for taget[name[x]] in starred[call[name[sorted], parameter[name[self]._dcmp.common]]] begin[:] call[name[self].log, parameter[binary_operation[constant[-- %s] <ast.Mod object at 0x7da2590d6920> name[x]]]]
keyword[def] identifier[_diff] ( identifier[self] , identifier[dir1] , identifier[dir2] ): literal[string] identifier[self] . identifier[_dcmp] = identifier[self] . identifier[_compare] ( identifier[dir1] , identifier[dir2] ) keyword[if] identifier[self] . identifier[_dcmp] . identifier[left_only] : identifier[self] . identifier[log] ( literal[string] % identifier[dir1] ) keyword[for] identifier[x] keyword[in] identifier[sorted] ( identifier[self] . identifier[_dcmp] . identifier[left_only] ): identifier[self] . identifier[log] ( literal[string] % identifier[x] ) identifier[self] . identifier[log] ( literal[string] ) keyword[if] identifier[self] . identifier[_dcmp] . identifier[right_only] : identifier[self] . identifier[log] ( literal[string] % identifier[dir2] ) keyword[for] identifier[x] keyword[in] identifier[sorted] ( identifier[self] . identifier[_dcmp] . identifier[right_only] ): identifier[self] . identifier[log] ( literal[string] % identifier[x] ) identifier[self] . identifier[log] ( literal[string] ) keyword[if] identifier[self] . identifier[_dcmp] . identifier[common] : identifier[self] . identifier[log] ( literal[string] %( identifier[self] . identifier[_dir1] , identifier[self] . identifier[_dir2] )) keyword[for] identifier[x] keyword[in] identifier[sorted] ( identifier[self] . identifier[_dcmp] . identifier[common] ): identifier[self] . identifier[log] ( literal[string] % identifier[x] ) keyword[else] : identifier[self] . identifier[log] ( literal[string] )
def _diff(self, dir1, dir2): """ Private function which only does directory diff """ self._dcmp = self._compare(dir1, dir2) if self._dcmp.left_only: self.log('Only in %s' % dir1) for x in sorted(self._dcmp.left_only): self.log('>> %s' % x) # depends on [control=['for'], data=['x']] self.log('') # depends on [control=['if'], data=[]] if self._dcmp.right_only: self.log('Only in %s' % dir2) for x in sorted(self._dcmp.right_only): self.log('<< %s' % x) # depends on [control=['for'], data=['x']] self.log('') # depends on [control=['if'], data=[]] if self._dcmp.common: self.log('Common to %s and %s' % (self._dir1, self._dir2)) for x in sorted(self._dcmp.common): self.log('-- %s' % x) # depends on [control=['for'], data=['x']] # depends on [control=['if'], data=[]] else: self.log('No common files or sub-directories!')
def gtom(adj, nr_steps): ''' The m-th step generalized topological overlap measure (GTOM) quantifies the extent to which a pair of nodes have similar m-th step neighbors. Mth-step neighbors are nodes that are reachable by a path of at most length m. This function computes the the M x M generalized topological overlap measure (GTOM) matrix for number of steps, numSteps. Parameters ---------- adj : NxN np.ndarray connection matrix nr_steps : int number of steps Returns ------- gt : NxN np.ndarray GTOM matrix Notes ----- When numSteps is equal to 1, GTOM is identical to the topological overlap measure (TOM) from reference [2]. In that case the 'gt' matrix records, for each pair of nodes, the fraction of neighbors the two nodes share in common, where "neighbors" are one step removed. As 'numSteps' is increased, neighbors that are furter out are considered. Elements of 'gt' are bounded between 0 and 1. The 'gt' matrix can be converted from a similarity to a distance matrix by taking 1-gt. ''' bm = binarize(adj, copy=True) bm_aux = bm.copy() nr_nodes = len(adj) if nr_steps > nr_nodes: print("Warning: nr_steps exceeded nr_nodes. Setting nr_steps=nr_nodes") if nr_steps == 0: return bm else: for steps in range(2, nr_steps): for i in range(nr_nodes): # neighbors of node i ng_col, = np.where(bm_aux[i, :] == 1) # neighbors of neighbors of node i nng_row, nng_col = np.where(bm_aux[ng_col, :] == 1) new_ng = np.setdiff1d(nng_col, (i,)) # neighbors of neighbors of i become considered neighbors of i bm_aux[i, new_ng] = 1 bm_aux[new_ng, i] = 1 # numerator of GTOM formula numerator_mat = np.dot(bm_aux, bm_aux) + bm + np.eye(nr_nodes) # vector of node degrees bms = np.sum(bm_aux, axis=0) bms_r = np.tile(bms, (nr_nodes, 1)) denominator_mat = -bm + np.where(bms_r > bms_r.T, bms_r, bms_r.T) + 1 return numerator_mat / denominator_mat
def function[gtom, parameter[adj, nr_steps]]: constant[ The m-th step generalized topological overlap measure (GTOM) quantifies the extent to which a pair of nodes have similar m-th step neighbors. Mth-step neighbors are nodes that are reachable by a path of at most length m. This function computes the the M x M generalized topological overlap measure (GTOM) matrix for number of steps, numSteps. Parameters ---------- adj : NxN np.ndarray connection matrix nr_steps : int number of steps Returns ------- gt : NxN np.ndarray GTOM matrix Notes ----- When numSteps is equal to 1, GTOM is identical to the topological overlap measure (TOM) from reference [2]. In that case the 'gt' matrix records, for each pair of nodes, the fraction of neighbors the two nodes share in common, where "neighbors" are one step removed. As 'numSteps' is increased, neighbors that are furter out are considered. Elements of 'gt' are bounded between 0 and 1. The 'gt' matrix can be converted from a similarity to a distance matrix by taking 1-gt. ] variable[bm] assign[=] call[name[binarize], parameter[name[adj]]] variable[bm_aux] assign[=] call[name[bm].copy, parameter[]] variable[nr_nodes] assign[=] call[name[len], parameter[name[adj]]] if compare[name[nr_steps] greater[>] name[nr_nodes]] begin[:] call[name[print], parameter[constant[Warning: nr_steps exceeded nr_nodes. Setting nr_steps=nr_nodes]]] if compare[name[nr_steps] equal[==] constant[0]] begin[:] return[name[bm]]
keyword[def] identifier[gtom] ( identifier[adj] , identifier[nr_steps] ): literal[string] identifier[bm] = identifier[binarize] ( identifier[adj] , identifier[copy] = keyword[True] ) identifier[bm_aux] = identifier[bm] . identifier[copy] () identifier[nr_nodes] = identifier[len] ( identifier[adj] ) keyword[if] identifier[nr_steps] > identifier[nr_nodes] : identifier[print] ( literal[string] ) keyword[if] identifier[nr_steps] == literal[int] : keyword[return] identifier[bm] keyword[else] : keyword[for] identifier[steps] keyword[in] identifier[range] ( literal[int] , identifier[nr_steps] ): keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nr_nodes] ): identifier[ng_col] ,= identifier[np] . identifier[where] ( identifier[bm_aux] [ identifier[i] ,:]== literal[int] ) identifier[nng_row] , identifier[nng_col] = identifier[np] . identifier[where] ( identifier[bm_aux] [ identifier[ng_col] ,:]== literal[int] ) identifier[new_ng] = identifier[np] . identifier[setdiff1d] ( identifier[nng_col] ,( identifier[i] ,)) identifier[bm_aux] [ identifier[i] , identifier[new_ng] ]= literal[int] identifier[bm_aux] [ identifier[new_ng] , identifier[i] ]= literal[int] identifier[numerator_mat] = identifier[np] . identifier[dot] ( identifier[bm_aux] , identifier[bm_aux] )+ identifier[bm] + identifier[np] . identifier[eye] ( identifier[nr_nodes] ) identifier[bms] = identifier[np] . identifier[sum] ( identifier[bm_aux] , identifier[axis] = literal[int] ) identifier[bms_r] = identifier[np] . identifier[tile] ( identifier[bms] ,( identifier[nr_nodes] , literal[int] )) identifier[denominator_mat] =- identifier[bm] + identifier[np] . identifier[where] ( identifier[bms_r] > identifier[bms_r] . identifier[T] , identifier[bms_r] , identifier[bms_r] . identifier[T] )+ literal[int] keyword[return] identifier[numerator_mat] / identifier[denominator_mat]
def gtom(adj, nr_steps): """ The m-th step generalized topological overlap measure (GTOM) quantifies the extent to which a pair of nodes have similar m-th step neighbors. Mth-step neighbors are nodes that are reachable by a path of at most length m. This function computes the the M x M generalized topological overlap measure (GTOM) matrix for number of steps, numSteps. Parameters ---------- adj : NxN np.ndarray connection matrix nr_steps : int number of steps Returns ------- gt : NxN np.ndarray GTOM matrix Notes ----- When numSteps is equal to 1, GTOM is identical to the topological overlap measure (TOM) from reference [2]. In that case the 'gt' matrix records, for each pair of nodes, the fraction of neighbors the two nodes share in common, where "neighbors" are one step removed. As 'numSteps' is increased, neighbors that are furter out are considered. Elements of 'gt' are bounded between 0 and 1. The 'gt' matrix can be converted from a similarity to a distance matrix by taking 1-gt. """ bm = binarize(adj, copy=True) bm_aux = bm.copy() nr_nodes = len(adj) if nr_steps > nr_nodes: print('Warning: nr_steps exceeded nr_nodes. Setting nr_steps=nr_nodes') # depends on [control=['if'], data=[]] if nr_steps == 0: return bm # depends on [control=['if'], data=[]] else: for steps in range(2, nr_steps): for i in range(nr_nodes): # neighbors of node i (ng_col,) = np.where(bm_aux[i, :] == 1) # neighbors of neighbors of node i (nng_row, nng_col) = np.where(bm_aux[ng_col, :] == 1) new_ng = np.setdiff1d(nng_col, (i,)) # neighbors of neighbors of i become considered neighbors of i bm_aux[i, new_ng] = 1 bm_aux[new_ng, i] = 1 # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]] # numerator of GTOM formula numerator_mat = np.dot(bm_aux, bm_aux) + bm + np.eye(nr_nodes) # vector of node degrees bms = np.sum(bm_aux, axis=0) bms_r = np.tile(bms, (nr_nodes, 1)) denominator_mat = -bm + np.where(bms_r > bms_r.T, bms_r, bms_r.T) + 1 return numerator_mat / denominator_mat
def FibreCouple(pupils,modeDiameter): """ Return the complex amplitudes coupled into a set of fibers """ gridSize=pupils.shape[-1] pupilsVector=np.reshape(pupils,(-1,gridSize**2)) mode=np.reshape(FibreMode(gridSize,modeDiameter),(gridSize**2,)) return np.inner(pupilsVector,mode)
def function[FibreCouple, parameter[pupils, modeDiameter]]: constant[ Return the complex amplitudes coupled into a set of fibers ] variable[gridSize] assign[=] call[name[pupils].shape][<ast.UnaryOp object at 0x7da18f812680>] variable[pupilsVector] assign[=] call[name[np].reshape, parameter[name[pupils], tuple[[<ast.UnaryOp object at 0x7da18f810b50>, <ast.BinOp object at 0x7da18f813670>]]]] variable[mode] assign[=] call[name[np].reshape, parameter[call[name[FibreMode], parameter[name[gridSize], name[modeDiameter]]], tuple[[<ast.BinOp object at 0x7da18f8119f0>]]]] return[call[name[np].inner, parameter[name[pupilsVector], name[mode]]]]
keyword[def] identifier[FibreCouple] ( identifier[pupils] , identifier[modeDiameter] ): literal[string] identifier[gridSize] = identifier[pupils] . identifier[shape] [- literal[int] ] identifier[pupilsVector] = identifier[np] . identifier[reshape] ( identifier[pupils] ,(- literal[int] , identifier[gridSize] ** literal[int] )) identifier[mode] = identifier[np] . identifier[reshape] ( identifier[FibreMode] ( identifier[gridSize] , identifier[modeDiameter] ),( identifier[gridSize] ** literal[int] ,)) keyword[return] identifier[np] . identifier[inner] ( identifier[pupilsVector] , identifier[mode] )
def FibreCouple(pupils, modeDiameter): """ Return the complex amplitudes coupled into a set of fibers """ gridSize = pupils.shape[-1] pupilsVector = np.reshape(pupils, (-1, gridSize ** 2)) mode = np.reshape(FibreMode(gridSize, modeDiameter), (gridSize ** 2,)) return np.inner(pupilsVector, mode)
def log(self, logfile=None): """Log the ASCII traceback into a file object.""" if logfile is None: logfile = sys.stderr tb = self.plaintext.rstrip() + u"\n" logfile.write(to_native(tb, "utf-8", "replace"))
def function[log, parameter[self, logfile]]: constant[Log the ASCII traceback into a file object.] if compare[name[logfile] is constant[None]] begin[:] variable[logfile] assign[=] name[sys].stderr variable[tb] assign[=] binary_operation[call[name[self].plaintext.rstrip, parameter[]] + constant[ ]] call[name[logfile].write, parameter[call[name[to_native], parameter[name[tb], constant[utf-8], constant[replace]]]]]
keyword[def] identifier[log] ( identifier[self] , identifier[logfile] = keyword[None] ): literal[string] keyword[if] identifier[logfile] keyword[is] keyword[None] : identifier[logfile] = identifier[sys] . identifier[stderr] identifier[tb] = identifier[self] . identifier[plaintext] . identifier[rstrip] ()+ literal[string] identifier[logfile] . identifier[write] ( identifier[to_native] ( identifier[tb] , literal[string] , literal[string] ))
def log(self, logfile=None): """Log the ASCII traceback into a file object.""" if logfile is None: logfile = sys.stderr # depends on [control=['if'], data=['logfile']] tb = self.plaintext.rstrip() + u'\n' logfile.write(to_native(tb, 'utf-8', 'replace'))
def index_template(self, tpl): """ Indexes a template by `name` into the `name_to_template` dictionary. :param tpl: The template to index :type tpl: alignak.objects.item.Item :return: None """ objcls = self.inner_class.my_type name = getattr(tpl, 'name', '') if not name: mesg = "a %s template has been defined without name, from: %s" % \ (objcls, tpl.imported_from) tpl.add_error(mesg) elif name in self.name_to_template: tpl = self.manage_conflict(tpl, name) self.name_to_template[name] = tpl logger.debug("Indexed a %s template: %s, uses: %s", tpl.my_type, name, getattr(tpl, 'use', 'Nothing')) return tpl
def function[index_template, parameter[self, tpl]]: constant[ Indexes a template by `name` into the `name_to_template` dictionary. :param tpl: The template to index :type tpl: alignak.objects.item.Item :return: None ] variable[objcls] assign[=] name[self].inner_class.my_type variable[name] assign[=] call[name[getattr], parameter[name[tpl], constant[name], constant[]]] if <ast.UnaryOp object at 0x7da18f58f0d0> begin[:] variable[mesg] assign[=] binary_operation[constant[a %s template has been defined without name, from: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2344910>, <ast.Attribute object at 0x7da1b2347fd0>]]] call[name[tpl].add_error, parameter[name[mesg]]] call[name[self].name_to_template][name[name]] assign[=] name[tpl] call[name[logger].debug, parameter[constant[Indexed a %s template: %s, uses: %s], name[tpl].my_type, name[name], call[name[getattr], parameter[name[tpl], constant[use], constant[Nothing]]]]] return[name[tpl]]
keyword[def] identifier[index_template] ( identifier[self] , identifier[tpl] ): literal[string] identifier[objcls] = identifier[self] . identifier[inner_class] . identifier[my_type] identifier[name] = identifier[getattr] ( identifier[tpl] , literal[string] , literal[string] ) keyword[if] keyword[not] identifier[name] : identifier[mesg] = literal[string] %( identifier[objcls] , identifier[tpl] . identifier[imported_from] ) identifier[tpl] . identifier[add_error] ( identifier[mesg] ) keyword[elif] identifier[name] keyword[in] identifier[self] . identifier[name_to_template] : identifier[tpl] = identifier[self] . identifier[manage_conflict] ( identifier[tpl] , identifier[name] ) identifier[self] . identifier[name_to_template] [ identifier[name] ]= identifier[tpl] identifier[logger] . identifier[debug] ( literal[string] , identifier[tpl] . identifier[my_type] , identifier[name] , identifier[getattr] ( identifier[tpl] , literal[string] , literal[string] )) keyword[return] identifier[tpl]
def index_template(self, tpl): """ Indexes a template by `name` into the `name_to_template` dictionary. :param tpl: The template to index :type tpl: alignak.objects.item.Item :return: None """ objcls = self.inner_class.my_type name = getattr(tpl, 'name', '') if not name: mesg = 'a %s template has been defined without name, from: %s' % (objcls, tpl.imported_from) tpl.add_error(mesg) # depends on [control=['if'], data=[]] elif name in self.name_to_template: tpl = self.manage_conflict(tpl, name) # depends on [control=['if'], data=['name']] self.name_to_template[name] = tpl logger.debug('Indexed a %s template: %s, uses: %s', tpl.my_type, name, getattr(tpl, 'use', 'Nothing')) return tpl
def list_partitions(self, table, retry=DEFAULT_RETRY): """List the partitions in a table. Arguments: table (Union[ \ :class:`~google.cloud.bigquery.table.Table`, \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ]): The table or reference from which to get partition info retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: List[str]: A list of the partition ids present in the partitioned table """ table = _table_arg_to_table_ref(table, default_project=self.project) meta_table = self.get_table( TableReference( self.dataset(table.dataset_id, project=table.project), "%s$__PARTITIONS_SUMMARY__" % table.table_id, ) ) subset = [col for col in meta_table.schema if col.name == "partition_id"] return [ row[0] for row in self.list_rows(meta_table, selected_fields=subset, retry=retry) ]
def function[list_partitions, parameter[self, table, retry]]: constant[List the partitions in a table. Arguments: table (Union[ :class:`~google.cloud.bigquery.table.Table`, :class:`~google.cloud.bigquery.table.TableReference`, str, ]): The table or reference from which to get partition info retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: List[str]: A list of the partition ids present in the partitioned table ] variable[table] assign[=] call[name[_table_arg_to_table_ref], parameter[name[table]]] variable[meta_table] assign[=] call[name[self].get_table, parameter[call[name[TableReference], parameter[call[name[self].dataset, parameter[name[table].dataset_id]], binary_operation[constant[%s$__PARTITIONS_SUMMARY__] <ast.Mod object at 0x7da2590d6920> name[table].table_id]]]]] variable[subset] assign[=] <ast.ListComp object at 0x7da207f029e0> return[<ast.ListComp object at 0x7da207f025f0>]
keyword[def] identifier[list_partitions] ( identifier[self] , identifier[table] , identifier[retry] = identifier[DEFAULT_RETRY] ): literal[string] identifier[table] = identifier[_table_arg_to_table_ref] ( identifier[table] , identifier[default_project] = identifier[self] . identifier[project] ) identifier[meta_table] = identifier[self] . identifier[get_table] ( identifier[TableReference] ( identifier[self] . identifier[dataset] ( identifier[table] . identifier[dataset_id] , identifier[project] = identifier[table] . identifier[project] ), literal[string] % identifier[table] . identifier[table_id] , ) ) identifier[subset] =[ identifier[col] keyword[for] identifier[col] keyword[in] identifier[meta_table] . identifier[schema] keyword[if] identifier[col] . identifier[name] == literal[string] ] keyword[return] [ identifier[row] [ literal[int] ] keyword[for] identifier[row] keyword[in] identifier[self] . identifier[list_rows] ( identifier[meta_table] , identifier[selected_fields] = identifier[subset] , identifier[retry] = identifier[retry] ) ]
def list_partitions(self, table, retry=DEFAULT_RETRY): """List the partitions in a table. Arguments: table (Union[ :class:`~google.cloud.bigquery.table.Table`, :class:`~google.cloud.bigquery.table.TableReference`, str, ]): The table or reference from which to get partition info retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: List[str]: A list of the partition ids present in the partitioned table """ table = _table_arg_to_table_ref(table, default_project=self.project) meta_table = self.get_table(TableReference(self.dataset(table.dataset_id, project=table.project), '%s$__PARTITIONS_SUMMARY__' % table.table_id)) subset = [col for col in meta_table.schema if col.name == 'partition_id'] return [row[0] for row in self.list_rows(meta_table, selected_fields=subset, retry=retry)]
def remove_event_type(self, name): """Remove event type based on name.""" if name not in self.event_types: lg.info('Event type ' + name + ' was not found.') events = self.rater.find('events') # list is necessary so that it does not remove in place for e in list(events): if e.get('type') == name: events.remove(e) self.save()
def function[remove_event_type, parameter[self, name]]: constant[Remove event type based on name.] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].event_types] begin[:] call[name[lg].info, parameter[binary_operation[binary_operation[constant[Event type ] + name[name]] + constant[ was not found.]]]] variable[events] assign[=] call[name[self].rater.find, parameter[constant[events]]] for taget[name[e]] in starred[call[name[list], parameter[name[events]]]] begin[:] if compare[call[name[e].get, parameter[constant[type]]] equal[==] name[name]] begin[:] call[name[events].remove, parameter[name[e]]] call[name[self].save, parameter[]]
keyword[def] identifier[remove_event_type] ( identifier[self] , identifier[name] ): literal[string] keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[event_types] : identifier[lg] . identifier[info] ( literal[string] + identifier[name] + literal[string] ) identifier[events] = identifier[self] . identifier[rater] . identifier[find] ( literal[string] ) keyword[for] identifier[e] keyword[in] identifier[list] ( identifier[events] ): keyword[if] identifier[e] . identifier[get] ( literal[string] )== identifier[name] : identifier[events] . identifier[remove] ( identifier[e] ) identifier[self] . identifier[save] ()
def remove_event_type(self, name): """Remove event type based on name.""" if name not in self.event_types: lg.info('Event type ' + name + ' was not found.') # depends on [control=['if'], data=['name']] events = self.rater.find('events') # list is necessary so that it does not remove in place for e in list(events): if e.get('type') == name: events.remove(e) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']] self.save()
def postinit(self, type=None, name=None, body=None): """Do some setup after initialisation. :param type: The types that the block handles. :type type: Tuple or NodeNG or None :param name: The name that the caught exception is assigned to. :type name: AssignName or None :param body:The contents of the block. :type body: list(NodeNG) or None """ self.type = type self.name = name self.body = body
def function[postinit, parameter[self, type, name, body]]: constant[Do some setup after initialisation. :param type: The types that the block handles. :type type: Tuple or NodeNG or None :param name: The name that the caught exception is assigned to. :type name: AssignName or None :param body:The contents of the block. :type body: list(NodeNG) or None ] name[self].type assign[=] name[type] name[self].name assign[=] name[name] name[self].body assign[=] name[body]
keyword[def] identifier[postinit] ( identifier[self] , identifier[type] = keyword[None] , identifier[name] = keyword[None] , identifier[body] = keyword[None] ): literal[string] identifier[self] . identifier[type] = identifier[type] identifier[self] . identifier[name] = identifier[name] identifier[self] . identifier[body] = identifier[body]
def postinit(self, type=None, name=None, body=None): """Do some setup after initialisation. :param type: The types that the block handles. :type type: Tuple or NodeNG or None :param name: The name that the caught exception is assigned to. :type name: AssignName or None :param body:The contents of the block. :type body: list(NodeNG) or None """ self.type = type self.name = name self.body = body
def _decorate_urlconf(urlpatterns, decorator=require_auth, *args, **kwargs): '''Decorate all urlpatterns by specified decorator''' if isinstance(urlpatterns, (list, tuple)): for pattern in urlpatterns: if getattr(pattern, 'callback', None): pattern._callback = decorator( pattern.callback, *args, **kwargs) if getattr(pattern, 'url_patterns', []): _decorate_urlconf( pattern.url_patterns, decorator, *args, **kwargs) else: if getattr(urlpatterns, 'callback', None): urlpatterns._callback = decorator( urlpatterns.callback, *args, **kwargs)
def function[_decorate_urlconf, parameter[urlpatterns, decorator]]: constant[Decorate all urlpatterns by specified decorator] if call[name[isinstance], parameter[name[urlpatterns], tuple[[<ast.Name object at 0x7da1b0d03c40>, <ast.Name object at 0x7da1b0d007c0>]]]] begin[:] for taget[name[pattern]] in starred[name[urlpatterns]] begin[:] if call[name[getattr], parameter[name[pattern], constant[callback], constant[None]]] begin[:] name[pattern]._callback assign[=] call[name[decorator], parameter[name[pattern].callback, <ast.Starred object at 0x7da1b0d01720>]] if call[name[getattr], parameter[name[pattern], constant[url_patterns], list[[]]]] begin[:] call[name[_decorate_urlconf], parameter[name[pattern].url_patterns, name[decorator], <ast.Starred object at 0x7da1b0f5add0>]]
keyword[def] identifier[_decorate_urlconf] ( identifier[urlpatterns] , identifier[decorator] = identifier[require_auth] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[isinstance] ( identifier[urlpatterns] ,( identifier[list] , identifier[tuple] )): keyword[for] identifier[pattern] keyword[in] identifier[urlpatterns] : keyword[if] identifier[getattr] ( identifier[pattern] , literal[string] , keyword[None] ): identifier[pattern] . identifier[_callback] = identifier[decorator] ( identifier[pattern] . identifier[callback] ,* identifier[args] ,** identifier[kwargs] ) keyword[if] identifier[getattr] ( identifier[pattern] , literal[string] ,[]): identifier[_decorate_urlconf] ( identifier[pattern] . identifier[url_patterns] , identifier[decorator] ,* identifier[args] ,** identifier[kwargs] ) keyword[else] : keyword[if] identifier[getattr] ( identifier[urlpatterns] , literal[string] , keyword[None] ): identifier[urlpatterns] . identifier[_callback] = identifier[decorator] ( identifier[urlpatterns] . identifier[callback] ,* identifier[args] ,** identifier[kwargs] )
def _decorate_urlconf(urlpatterns, decorator=require_auth, *args, **kwargs): """Decorate all urlpatterns by specified decorator""" if isinstance(urlpatterns, (list, tuple)): for pattern in urlpatterns: if getattr(pattern, 'callback', None): pattern._callback = decorator(pattern.callback, *args, **kwargs) # depends on [control=['if'], data=[]] if getattr(pattern, 'url_patterns', []): _decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pattern']] # depends on [control=['if'], data=[]] elif getattr(urlpatterns, 'callback', None): urlpatterns._callback = decorator(urlpatterns.callback, *args, **kwargs) # depends on [control=['if'], data=[]]
def md5hash(self): """Return the MD5 hash string of the file content""" digest = hashlib.md5(self.content).digest() return b64_string(digest)
def function[md5hash, parameter[self]]: constant[Return the MD5 hash string of the file content] variable[digest] assign[=] call[call[name[hashlib].md5, parameter[name[self].content]].digest, parameter[]] return[call[name[b64_string], parameter[name[digest]]]]
keyword[def] identifier[md5hash] ( identifier[self] ): literal[string] identifier[digest] = identifier[hashlib] . identifier[md5] ( identifier[self] . identifier[content] ). identifier[digest] () keyword[return] identifier[b64_string] ( identifier[digest] )
def md5hash(self): """Return the MD5 hash string of the file content""" digest = hashlib.md5(self.content).digest() return b64_string(digest)
def on(self, event, handler=None): """Register an event handler. :param event: The event name. Can be ``'connect'``, ``'message'`` or ``'disconnect'``. :param handler: The function that should be invoked to handle the event. When this parameter is not given, the method acts as a decorator for the handler function. Example usage:: # as a decorator: @eio.on('connect') def connect_handler(): print('Connection request') # as a method: def message_handler(msg): print('Received message: ', msg) eio.send('response') eio.on('message', message_handler) """ if event not in self.event_names: raise ValueError('Invalid event') def set_handler(handler): self.handlers[event] = handler return handler if handler is None: return set_handler set_handler(handler)
def function[on, parameter[self, event, handler]]: constant[Register an event handler. :param event: The event name. Can be ``'connect'``, ``'message'`` or ``'disconnect'``. :param handler: The function that should be invoked to handle the event. When this parameter is not given, the method acts as a decorator for the handler function. Example usage:: # as a decorator: @eio.on('connect') def connect_handler(): print('Connection request') # as a method: def message_handler(msg): print('Received message: ', msg) eio.send('response') eio.on('message', message_handler) ] if compare[name[event] <ast.NotIn object at 0x7da2590d7190> name[self].event_names] begin[:] <ast.Raise object at 0x7da1b0983a90> def function[set_handler, parameter[handler]]: call[name[self].handlers][name[event]] assign[=] name[handler] return[name[handler]] if compare[name[handler] is constant[None]] begin[:] return[name[set_handler]] call[name[set_handler], parameter[name[handler]]]
keyword[def] identifier[on] ( identifier[self] , identifier[event] , identifier[handler] = keyword[None] ): literal[string] keyword[if] identifier[event] keyword[not] keyword[in] identifier[self] . identifier[event_names] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[def] identifier[set_handler] ( identifier[handler] ): identifier[self] . identifier[handlers] [ identifier[event] ]= identifier[handler] keyword[return] identifier[handler] keyword[if] identifier[handler] keyword[is] keyword[None] : keyword[return] identifier[set_handler] identifier[set_handler] ( identifier[handler] )
def on(self, event, handler=None): """Register an event handler. :param event: The event name. Can be ``'connect'``, ``'message'`` or ``'disconnect'``. :param handler: The function that should be invoked to handle the event. When this parameter is not given, the method acts as a decorator for the handler function. Example usage:: # as a decorator: @eio.on('connect') def connect_handler(): print('Connection request') # as a method: def message_handler(msg): print('Received message: ', msg) eio.send('response') eio.on('message', message_handler) """ if event not in self.event_names: raise ValueError('Invalid event') # depends on [control=['if'], data=[]] def set_handler(handler): self.handlers[event] = handler return handler if handler is None: return set_handler # depends on [control=['if'], data=[]] set_handler(handler)
def make_level_set(level): '''make level set will convert one level into a set''' new_level = dict() for key,value in level.items(): if isinstance(value,list): new_level[key] = set(value) else: new_level[key] = value return new_level
def function[make_level_set, parameter[level]]: constant[make level set will convert one level into a set] variable[new_level] assign[=] call[name[dict], parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b11e57e0>, <ast.Name object at 0x7da1b11e5720>]]] in starred[call[name[level].items, parameter[]]] begin[:] if call[name[isinstance], parameter[name[value], name[list]]] begin[:] call[name[new_level]][name[key]] assign[=] call[name[set], parameter[name[value]]] return[name[new_level]]
keyword[def] identifier[make_level_set] ( identifier[level] ): literal[string] identifier[new_level] = identifier[dict] () keyword[for] identifier[key] , identifier[value] keyword[in] identifier[level] . identifier[items] (): keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ): identifier[new_level] [ identifier[key] ]= identifier[set] ( identifier[value] ) keyword[else] : identifier[new_level] [ identifier[key] ]= identifier[value] keyword[return] identifier[new_level]
def make_level_set(level): """make level set will convert one level into a set""" new_level = dict() for (key, value) in level.items(): if isinstance(value, list): new_level[key] = set(value) # depends on [control=['if'], data=[]] else: new_level[key] = value # depends on [control=['for'], data=[]] return new_level
def _single_orbit_find_actions(orbit, N_max, toy_potential=None, force_harmonic_oscillator=False): """ Find approximate actions and angles for samples of a phase-space orbit, `w`, at times `t`. Uses toy potentials with known, analytic action-angle transformations to approximate the true coordinates as a Fourier sum. This code is adapted from Jason Sanders' `genfunc <https://github.com/jlsanders/genfunc>`_ .. todo:: Wrong shape for w -- should be (6,n) as usual... Parameters ---------- orbit : `~gala.dynamics.Orbit` N_max : int Maximum integer Fourier mode vector length, |n|. toy_potential : Potential (optional) Fix the toy potential class. force_harmonic_oscillator : bool (optional) Force using the harmonic oscillator potential as the toy potential. """ if orbit.norbits > 1: raise ValueError("must be a single orbit") if toy_potential is None: toy_potential = fit_toy_potential( orbit, force_harmonic_oscillator=force_harmonic_oscillator) else: logger.debug("Using *fixed* toy potential: {}" .format(toy_potential.parameters)) if isinstance(toy_potential, IsochronePotential): orbit_align = orbit.align_circulation_with_z() w = orbit_align.w() dxyz = (1, 2, 2) circ = np.sign(w[0, 0]*w[4, 0]-w[1, 0]*w[3, 0]) sign = np.array([1., circ, 1.]) orbit = orbit_align elif isinstance(toy_potential, HarmonicOscillatorPotential): dxyz = (2, 2, 2) sign = 1. w = orbit.w() else: raise ValueError("Invalid toy potential.") t = orbit.t.value # Now find toy actions and angles aaf = toy_potential.action_angle(orbit) if aaf[0].ndim > 2: aa = np.vstack((aaf[0].value[..., 0], aaf[1].value[..., 0])) else: aa = np.vstack((aaf[0].value, aaf[1].value)) if np.any(np.isnan(aa)): ix = ~np.any(np.isnan(aa), axis=0) aa = aa[:, ix] t = t[ix] warnings.warn("NaN value in toy actions or angles!") if sum(ix) > 1: raise ValueError("Too many NaN value in toy actions or angles!") t1 = time.time() A, b, nvecs = _action_prepare(aa, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2]) actions = np.array(solve(A,b)) logger.debug("Action solution found for N_max={}, size {} symmetric" " matrix in {} seconds" .format(N_max, len(actions), time.time()-t1)) t1 = time.time() A, b, nvecs = _angle_prepare(aa, t, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2], sign=sign) angles = np.array(solve(A, b)) logger.debug("Angle solution found for N_max={}, size {} symmetric" " matrix in {} seconds" .format(N_max, len(angles), time.time()-t1)) # Just some checks if len(angles) > len(aa): warnings.warn("More unknowns than equations!") J = actions[:3] # * sign theta = angles[:3] freqs = angles[3:6] # * sign return dict(actions=J*aaf[0].unit, angles=theta*aaf[1].unit, freqs=freqs*aaf[2].unit, Sn=actions[3:], dSn_dJ=angles[6:], nvecs=nvecs)
def function[_single_orbit_find_actions, parameter[orbit, N_max, toy_potential, force_harmonic_oscillator]]: constant[ Find approximate actions and angles for samples of a phase-space orbit, `w`, at times `t`. Uses toy potentials with known, analytic action-angle transformations to approximate the true coordinates as a Fourier sum. This code is adapted from Jason Sanders' `genfunc <https://github.com/jlsanders/genfunc>`_ .. todo:: Wrong shape for w -- should be (6,n) as usual... Parameters ---------- orbit : `~gala.dynamics.Orbit` N_max : int Maximum integer Fourier mode vector length, |n|. toy_potential : Potential (optional) Fix the toy potential class. force_harmonic_oscillator : bool (optional) Force using the harmonic oscillator potential as the toy potential. ] if compare[name[orbit].norbits greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da1b0e77ca0> if compare[name[toy_potential] is constant[None]] begin[:] variable[toy_potential] assign[=] call[name[fit_toy_potential], parameter[name[orbit]]] if call[name[isinstance], parameter[name[toy_potential], name[IsochronePotential]]] begin[:] variable[orbit_align] assign[=] call[name[orbit].align_circulation_with_z, parameter[]] variable[w] assign[=] call[name[orbit_align].w, parameter[]] variable[dxyz] assign[=] tuple[[<ast.Constant object at 0x7da1b0e76560>, <ast.Constant object at 0x7da1b0e76530>, <ast.Constant object at 0x7da1b0e76500>]] variable[circ] assign[=] call[name[np].sign, parameter[binary_operation[binary_operation[call[name[w]][tuple[[<ast.Constant object at 0x7da1b0e762f0>, <ast.Constant object at 0x7da1b0e762c0>]]] * call[name[w]][tuple[[<ast.Constant object at 0x7da1b0e76200>, <ast.Constant object at 0x7da1b0e761d0>]]]] - binary_operation[call[name[w]][tuple[[<ast.Constant object at 0x7da1b0e760e0>, <ast.Constant object at 0x7da1b0e760b0>]]] * call[name[w]][tuple[[<ast.Constant object at 0x7da1b0e75ff0>, <ast.Constant object at 0x7da1b0e75fc0>]]]]]]] variable[sign] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b0e75e70>, <ast.Name object at 0x7da1b0e75e40>, <ast.Constant object at 0x7da1b0e75e10>]]]] variable[orbit] assign[=] name[orbit_align] variable[t] assign[=] name[orbit].t.value variable[aaf] assign[=] call[name[toy_potential].action_angle, parameter[name[orbit]]] if compare[call[name[aaf]][constant[0]].ndim greater[>] constant[2]] begin[:] variable[aa] assign[=] call[name[np].vstack, parameter[tuple[[<ast.Subscript object at 0x7da1b0e75480>, <ast.Subscript object at 0x7da1b0e75300>]]]] if call[name[np].any, parameter[call[name[np].isnan, parameter[name[aa]]]]] begin[:] variable[ix] assign[=] <ast.UnaryOp object at 0x7da1b0e74cd0> variable[aa] assign[=] call[name[aa]][tuple[[<ast.Slice object at 0x7da1b0e74a00>, <ast.Name object at 0x7da1b0e749d0>]]] variable[t] assign[=] call[name[t]][name[ix]] call[name[warnings].warn, parameter[constant[NaN value in toy actions or angles!]]] if compare[call[name[sum], parameter[name[ix]]] greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da1b0e74670> variable[t1] assign[=] call[name[time].time, parameter[]] <ast.Tuple object at 0x7da1b0e74490> assign[=] call[name[_action_prepare], parameter[name[aa], name[N_max]]] variable[actions] assign[=] call[name[np].array, parameter[call[name[solve], parameter[name[A], name[b]]]]] call[name[logger].debug, parameter[call[constant[Action solution found for N_max={}, size {} symmetric matrix in {} seconds].format, parameter[name[N_max], call[name[len], parameter[name[actions]]], binary_operation[call[name[time].time, parameter[]] - name[t1]]]]]] variable[t1] assign[=] call[name[time].time, parameter[]] <ast.Tuple object at 0x7da1b0e39f90> assign[=] call[name[_angle_prepare], parameter[name[aa], name[t], name[N_max]]] variable[angles] assign[=] call[name[np].array, parameter[call[name[solve], parameter[name[A], name[b]]]]] call[name[logger].debug, parameter[call[constant[Angle solution found for N_max={}, size {} symmetric matrix in {} seconds].format, parameter[name[N_max], call[name[len], parameter[name[angles]]], binary_operation[call[name[time].time, parameter[]] - name[t1]]]]]] if compare[call[name[len], parameter[name[angles]]] greater[>] call[name[len], parameter[name[aa]]]] begin[:] call[name[warnings].warn, parameter[constant[More unknowns than equations!]]] variable[J] assign[=] call[name[actions]][<ast.Slice object at 0x7da1b0e3a4a0>] variable[theta] assign[=] call[name[angles]][<ast.Slice object at 0x7da1b0e3b1f0>] variable[freqs] assign[=] call[name[angles]][<ast.Slice object at 0x7da1b0e38280>] return[call[name[dict], parameter[]]]
keyword[def] identifier[_single_orbit_find_actions] ( identifier[orbit] , identifier[N_max] , identifier[toy_potential] = keyword[None] , identifier[force_harmonic_oscillator] = keyword[False] ): literal[string] keyword[if] identifier[orbit] . identifier[norbits] > literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[toy_potential] keyword[is] keyword[None] : identifier[toy_potential] = identifier[fit_toy_potential] ( identifier[orbit] , identifier[force_harmonic_oscillator] = identifier[force_harmonic_oscillator] ) keyword[else] : identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[toy_potential] . identifier[parameters] )) keyword[if] identifier[isinstance] ( identifier[toy_potential] , identifier[IsochronePotential] ): identifier[orbit_align] = identifier[orbit] . identifier[align_circulation_with_z] () identifier[w] = identifier[orbit_align] . identifier[w] () identifier[dxyz] =( literal[int] , literal[int] , literal[int] ) identifier[circ] = identifier[np] . identifier[sign] ( identifier[w] [ literal[int] , literal[int] ]* identifier[w] [ literal[int] , literal[int] ]- identifier[w] [ literal[int] , literal[int] ]* identifier[w] [ literal[int] , literal[int] ]) identifier[sign] = identifier[np] . identifier[array] ([ literal[int] , identifier[circ] , literal[int] ]) identifier[orbit] = identifier[orbit_align] keyword[elif] identifier[isinstance] ( identifier[toy_potential] , identifier[HarmonicOscillatorPotential] ): identifier[dxyz] =( literal[int] , literal[int] , literal[int] ) identifier[sign] = literal[int] identifier[w] = identifier[orbit] . identifier[w] () keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[t] = identifier[orbit] . identifier[t] . identifier[value] identifier[aaf] = identifier[toy_potential] . identifier[action_angle] ( identifier[orbit] ) keyword[if] identifier[aaf] [ literal[int] ]. identifier[ndim] > literal[int] : identifier[aa] = identifier[np] . identifier[vstack] (( identifier[aaf] [ literal[int] ]. identifier[value] [..., literal[int] ], identifier[aaf] [ literal[int] ]. identifier[value] [..., literal[int] ])) keyword[else] : identifier[aa] = identifier[np] . identifier[vstack] (( identifier[aaf] [ literal[int] ]. identifier[value] , identifier[aaf] [ literal[int] ]. identifier[value] )) keyword[if] identifier[np] . identifier[any] ( identifier[np] . identifier[isnan] ( identifier[aa] )): identifier[ix] =~ identifier[np] . identifier[any] ( identifier[np] . identifier[isnan] ( identifier[aa] ), identifier[axis] = literal[int] ) identifier[aa] = identifier[aa] [:, identifier[ix] ] identifier[t] = identifier[t] [ identifier[ix] ] identifier[warnings] . identifier[warn] ( literal[string] ) keyword[if] identifier[sum] ( identifier[ix] )> literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[t1] = identifier[time] . identifier[time] () identifier[A] , identifier[b] , identifier[nvecs] = identifier[_action_prepare] ( identifier[aa] , identifier[N_max] , identifier[dx] = identifier[dxyz] [ literal[int] ], identifier[dy] = identifier[dxyz] [ literal[int] ], identifier[dz] = identifier[dxyz] [ literal[int] ]) identifier[actions] = identifier[np] . identifier[array] ( identifier[solve] ( identifier[A] , identifier[b] )) identifier[logger] . identifier[debug] ( literal[string] literal[string] . identifier[format] ( identifier[N_max] , identifier[len] ( identifier[actions] ), identifier[time] . identifier[time] ()- identifier[t1] )) identifier[t1] = identifier[time] . identifier[time] () identifier[A] , identifier[b] , identifier[nvecs] = identifier[_angle_prepare] ( identifier[aa] , identifier[t] , identifier[N_max] , identifier[dx] = identifier[dxyz] [ literal[int] ], identifier[dy] = identifier[dxyz] [ literal[int] ], identifier[dz] = identifier[dxyz] [ literal[int] ], identifier[sign] = identifier[sign] ) identifier[angles] = identifier[np] . identifier[array] ( identifier[solve] ( identifier[A] , identifier[b] )) identifier[logger] . identifier[debug] ( literal[string] literal[string] . identifier[format] ( identifier[N_max] , identifier[len] ( identifier[angles] ), identifier[time] . identifier[time] ()- identifier[t1] )) keyword[if] identifier[len] ( identifier[angles] )> identifier[len] ( identifier[aa] ): identifier[warnings] . identifier[warn] ( literal[string] ) identifier[J] = identifier[actions] [: literal[int] ] identifier[theta] = identifier[angles] [: literal[int] ] identifier[freqs] = identifier[angles] [ literal[int] : literal[int] ] keyword[return] identifier[dict] ( identifier[actions] = identifier[J] * identifier[aaf] [ literal[int] ]. identifier[unit] , identifier[angles] = identifier[theta] * identifier[aaf] [ literal[int] ]. identifier[unit] , identifier[freqs] = identifier[freqs] * identifier[aaf] [ literal[int] ]. identifier[unit] , identifier[Sn] = identifier[actions] [ literal[int] :], identifier[dSn_dJ] = identifier[angles] [ literal[int] :], identifier[nvecs] = identifier[nvecs] )
def _single_orbit_find_actions(orbit, N_max, toy_potential=None, force_harmonic_oscillator=False): """ Find approximate actions and angles for samples of a phase-space orbit, `w`, at times `t`. Uses toy potentials with known, analytic action-angle transformations to approximate the true coordinates as a Fourier sum. This code is adapted from Jason Sanders' `genfunc <https://github.com/jlsanders/genfunc>`_ .. todo:: Wrong shape for w -- should be (6,n) as usual... Parameters ---------- orbit : `~gala.dynamics.Orbit` N_max : int Maximum integer Fourier mode vector length, |n|. toy_potential : Potential (optional) Fix the toy potential class. force_harmonic_oscillator : bool (optional) Force using the harmonic oscillator potential as the toy potential. """ if orbit.norbits > 1: raise ValueError('must be a single orbit') # depends on [control=['if'], data=[]] if toy_potential is None: toy_potential = fit_toy_potential(orbit, force_harmonic_oscillator=force_harmonic_oscillator) # depends on [control=['if'], data=['toy_potential']] else: logger.debug('Using *fixed* toy potential: {}'.format(toy_potential.parameters)) if isinstance(toy_potential, IsochronePotential): orbit_align = orbit.align_circulation_with_z() w = orbit_align.w() dxyz = (1, 2, 2) circ = np.sign(w[0, 0] * w[4, 0] - w[1, 0] * w[3, 0]) sign = np.array([1.0, circ, 1.0]) orbit = orbit_align # depends on [control=['if'], data=[]] elif isinstance(toy_potential, HarmonicOscillatorPotential): dxyz = (2, 2, 2) sign = 1.0 w = orbit.w() # depends on [control=['if'], data=[]] else: raise ValueError('Invalid toy potential.') t = orbit.t.value # Now find toy actions and angles aaf = toy_potential.action_angle(orbit) if aaf[0].ndim > 2: aa = np.vstack((aaf[0].value[..., 0], aaf[1].value[..., 0])) # depends on [control=['if'], data=[]] else: aa = np.vstack((aaf[0].value, aaf[1].value)) if np.any(np.isnan(aa)): ix = ~np.any(np.isnan(aa), axis=0) aa = aa[:, ix] t = t[ix] warnings.warn('NaN value in toy actions or angles!') if sum(ix) > 1: raise ValueError('Too many NaN value in toy actions or angles!') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] t1 = time.time() (A, b, nvecs) = _action_prepare(aa, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2]) actions = np.array(solve(A, b)) logger.debug('Action solution found for N_max={}, size {} symmetric matrix in {} seconds'.format(N_max, len(actions), time.time() - t1)) t1 = time.time() (A, b, nvecs) = _angle_prepare(aa, t, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2], sign=sign) angles = np.array(solve(A, b)) logger.debug('Angle solution found for N_max={}, size {} symmetric matrix in {} seconds'.format(N_max, len(angles), time.time() - t1)) # Just some checks if len(angles) > len(aa): warnings.warn('More unknowns than equations!') # depends on [control=['if'], data=[]] J = actions[:3] # * sign theta = angles[:3] freqs = angles[3:6] # * sign return dict(actions=J * aaf[0].unit, angles=theta * aaf[1].unit, freqs=freqs * aaf[2].unit, Sn=actions[3:], dSn_dJ=angles[6:], nvecs=nvecs)
def baffle_S(self): """Return the spacing between baffles. :returns: Spacing between baffles :rtype: int """ return ((self.BAFFLE_K / ((2 * self.expansion_H * (self.vel_grad_avg ** 2) * pc.viscosity_kinematic(self.temp))).to_base_units()) ** (1/3) * self.Q / self.channel_W).to(u.cm)
def function[baffle_S, parameter[self]]: constant[Return the spacing between baffles. :returns: Spacing between baffles :rtype: int ] return[call[binary_operation[binary_operation[binary_operation[binary_operation[name[self].BAFFLE_K / call[binary_operation[binary_operation[binary_operation[constant[2] * name[self].expansion_H] * binary_operation[name[self].vel_grad_avg ** constant[2]]] * call[name[pc].viscosity_kinematic, parameter[name[self].temp]]].to_base_units, parameter[]]] ** binary_operation[constant[1] / constant[3]]] * name[self].Q] / name[self].channel_W].to, parameter[name[u].cm]]]
keyword[def] identifier[baffle_S] ( identifier[self] ): literal[string] keyword[return] (( identifier[self] . identifier[BAFFLE_K] / (( literal[int] * identifier[self] . identifier[expansion_H] *( identifier[self] . identifier[vel_grad_avg] ** literal[int] )* identifier[pc] . identifier[viscosity_kinematic] ( identifier[self] . identifier[temp] ))). identifier[to_base_units] ())**( literal[int] / literal[int] )* identifier[self] . identifier[Q] / identifier[self] . identifier[channel_W] ). identifier[to] ( identifier[u] . identifier[cm] )
def baffle_S(self): """Return the spacing between baffles. :returns: Spacing between baffles :rtype: int """ return ((self.BAFFLE_K / (2 * self.expansion_H * self.vel_grad_avg ** 2 * pc.viscosity_kinematic(self.temp)).to_base_units()) ** (1 / 3) * self.Q / self.channel_W).to(u.cm)
def rowsBeforeRow(self, rowObject, count): """ Wrapper around L{rowsBeforeItem} which accepts the web ID for a item instead of the item itself. @param rowObject: a dictionary mapping strings to column values, sent from the client. One of those column values must be C{__id__} to uniquely identify a row. @param count: an integer, the number of rows to return. """ webID = rowObject['__id__'] return self.rowsBeforeItem( self.webTranslator.fromWebID(webID), count)
def function[rowsBeforeRow, parameter[self, rowObject, count]]: constant[ Wrapper around L{rowsBeforeItem} which accepts the web ID for a item instead of the item itself. @param rowObject: a dictionary mapping strings to column values, sent from the client. One of those column values must be C{__id__} to uniquely identify a row. @param count: an integer, the number of rows to return. ] variable[webID] assign[=] call[name[rowObject]][constant[__id__]] return[call[name[self].rowsBeforeItem, parameter[call[name[self].webTranslator.fromWebID, parameter[name[webID]]], name[count]]]]
keyword[def] identifier[rowsBeforeRow] ( identifier[self] , identifier[rowObject] , identifier[count] ): literal[string] identifier[webID] = identifier[rowObject] [ literal[string] ] keyword[return] identifier[self] . identifier[rowsBeforeItem] ( identifier[self] . identifier[webTranslator] . identifier[fromWebID] ( identifier[webID] ), identifier[count] )
def rowsBeforeRow(self, rowObject, count): """ Wrapper around L{rowsBeforeItem} which accepts the web ID for a item instead of the item itself. @param rowObject: a dictionary mapping strings to column values, sent from the client. One of those column values must be C{__id__} to uniquely identify a row. @param count: an integer, the number of rows to return. """ webID = rowObject['__id__'] return self.rowsBeforeItem(self.webTranslator.fromWebID(webID), count)
def decorator_with_args(func, return_original=False, target_pos=0): """Enable a function to work with a decorator with arguments Args: func (callable): The input function. return_original (bool): Whether the resultant decorator returns the decorating target unchanged. If True, will return the target unchanged. Otherwise, return the returned value from *func*. Default to False. This is useful for converting a non-decorator function to a decorator. See examples below. Return: callable: a decorator with arguments. Examples: >>> @decorator_with_args ... def register_plugin(plugin, arg1=1): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) ... return plugin # note register_plugin is an ordinary decorator >>> @register_plugin(arg1=10) ... def plugin1(): pass Registering plugin1 with arg1=10 >>> @decorator_with_args(return_original=True) ... def register_plugin_xx(plugin, arg1=1): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) ... # Note register_plugin_xxx does not return plugin, so it cannot ... # be used as a decorator directly before applying ... # decorator_with_args. >>> @register_plugin_xx(arg1=10) ... def plugin1(): pass Registering plugin1 with arg1=10 >>> plugin1() >>> @decorator_with_args(return_original=True) ... def register_plugin_xxx(plugin, arg1=1): pass >>> # use result decorator as a function >>> register_plugin_xxx(plugin=plugin1, arg1=10) <function plugin1...> >>> @decorator_with_args(return_original=True, target_pos=1) ... def register_plugin_xxxx(arg1, plugin, arg2=10): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) >>> @register_plugin_xxxx(100) ... def plugin2(): pass Registering plugin2 with arg1=100 """ if sys.version_info[0] >= 3: target_name = inspect.getfullargspec(func).args[target_pos] else: target_name = inspect.getargspec(func).args[target_pos] @functools.wraps(func) def wrapper(*args, **kwargs): if len(args) > target_pos: res = func(*args, **kwargs) return args[target_pos] if return_original else res elif len(args) <= 0 and target_name in kwargs: res = func(*args, **kwargs) return kwargs[target_name] if return_original else res else: return wrap_with_args(*args, **kwargs) def wrap_with_args(*args, **kwargs): def wrapped_with_args(target): kwargs2 = dict() kwargs2[target_name] = target kwargs2.update(kwargs) res = func(*args, **kwargs2) return target if return_original else res return wrapped_with_args return wrapper
def function[decorator_with_args, parameter[func, return_original, target_pos]]: constant[Enable a function to work with a decorator with arguments Args: func (callable): The input function. return_original (bool): Whether the resultant decorator returns the decorating target unchanged. If True, will return the target unchanged. Otherwise, return the returned value from *func*. Default to False. This is useful for converting a non-decorator function to a decorator. See examples below. Return: callable: a decorator with arguments. Examples: >>> @decorator_with_args ... def register_plugin(plugin, arg1=1): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) ... return plugin # note register_plugin is an ordinary decorator >>> @register_plugin(arg1=10) ... def plugin1(): pass Registering plugin1 with arg1=10 >>> @decorator_with_args(return_original=True) ... def register_plugin_xx(plugin, arg1=1): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) ... # Note register_plugin_xxx does not return plugin, so it cannot ... # be used as a decorator directly before applying ... # decorator_with_args. >>> @register_plugin_xx(arg1=10) ... def plugin1(): pass Registering plugin1 with arg1=10 >>> plugin1() >>> @decorator_with_args(return_original=True) ... def register_plugin_xxx(plugin, arg1=1): pass >>> # use result decorator as a function >>> register_plugin_xxx(plugin=plugin1, arg1=10) <function plugin1...> >>> @decorator_with_args(return_original=True, target_pos=1) ... def register_plugin_xxxx(arg1, plugin, arg2=10): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) >>> @register_plugin_xxxx(100) ... def plugin2(): pass Registering plugin2 with arg1=100 ] if compare[call[name[sys].version_info][constant[0]] greater_or_equal[>=] constant[3]] begin[:] variable[target_name] assign[=] call[call[name[inspect].getfullargspec, parameter[name[func]]].args][name[target_pos]] def function[wrapper, parameter[]]: if compare[call[name[len], parameter[name[args]]] greater[>] name[target_pos]] begin[:] variable[res] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da1b146f640>]] return[<ast.IfExp object at 0x7da1b146f970>] def function[wrap_with_args, parameter[]]: def function[wrapped_with_args, parameter[target]]: variable[kwargs2] assign[=] call[name[dict], parameter[]] call[name[kwargs2]][name[target_name]] assign[=] name[target] call[name[kwargs2].update, parameter[name[kwargs]]] variable[res] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da20c6a86d0>]] return[<ast.IfExp object at 0x7da20c6a8190>] return[name[wrapped_with_args]] return[name[wrapper]]
keyword[def] identifier[decorator_with_args] ( identifier[func] , identifier[return_original] = keyword[False] , identifier[target_pos] = literal[int] ): literal[string] keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]>= literal[int] : identifier[target_name] = identifier[inspect] . identifier[getfullargspec] ( identifier[func] ). identifier[args] [ identifier[target_pos] ] keyword[else] : identifier[target_name] = identifier[inspect] . identifier[getargspec] ( identifier[func] ). identifier[args] [ identifier[target_pos] ] @ identifier[functools] . identifier[wraps] ( identifier[func] ) keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ): keyword[if] identifier[len] ( identifier[args] )> identifier[target_pos] : identifier[res] = identifier[func] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[args] [ identifier[target_pos] ] keyword[if] identifier[return_original] keyword[else] identifier[res] keyword[elif] identifier[len] ( identifier[args] )<= literal[int] keyword[and] identifier[target_name] keyword[in] identifier[kwargs] : identifier[res] = identifier[func] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[kwargs] [ identifier[target_name] ] keyword[if] identifier[return_original] keyword[else] identifier[res] keyword[else] : keyword[return] identifier[wrap_with_args] (* identifier[args] ,** identifier[kwargs] ) keyword[def] identifier[wrap_with_args] (* identifier[args] ,** identifier[kwargs] ): keyword[def] identifier[wrapped_with_args] ( identifier[target] ): identifier[kwargs2] = identifier[dict] () identifier[kwargs2] [ identifier[target_name] ]= identifier[target] identifier[kwargs2] . identifier[update] ( identifier[kwargs] ) identifier[res] = identifier[func] (* identifier[args] ,** identifier[kwargs2] ) keyword[return] identifier[target] keyword[if] identifier[return_original] keyword[else] identifier[res] keyword[return] identifier[wrapped_with_args] keyword[return] identifier[wrapper]
def decorator_with_args(func, return_original=False, target_pos=0): """Enable a function to work with a decorator with arguments Args: func (callable): The input function. return_original (bool): Whether the resultant decorator returns the decorating target unchanged. If True, will return the target unchanged. Otherwise, return the returned value from *func*. Default to False. This is useful for converting a non-decorator function to a decorator. See examples below. Return: callable: a decorator with arguments. Examples: >>> @decorator_with_args ... def register_plugin(plugin, arg1=1): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) ... return plugin # note register_plugin is an ordinary decorator >>> @register_plugin(arg1=10) ... def plugin1(): pass Registering plugin1 with arg1=10 >>> @decorator_with_args(return_original=True) ... def register_plugin_xx(plugin, arg1=1): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) ... # Note register_plugin_xxx does not return plugin, so it cannot ... # be used as a decorator directly before applying ... # decorator_with_args. >>> @register_plugin_xx(arg1=10) ... def plugin1(): pass Registering plugin1 with arg1=10 >>> plugin1() >>> @decorator_with_args(return_original=True) ... def register_plugin_xxx(plugin, arg1=1): pass >>> # use result decorator as a function >>> register_plugin_xxx(plugin=plugin1, arg1=10) <function plugin1...> >>> @decorator_with_args(return_original=True, target_pos=1) ... def register_plugin_xxxx(arg1, plugin, arg2=10): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) >>> @register_plugin_xxxx(100) ... def plugin2(): pass Registering plugin2 with arg1=100 """ if sys.version_info[0] >= 3: target_name = inspect.getfullargspec(func).args[target_pos] # depends on [control=['if'], data=[]] else: target_name = inspect.getargspec(func).args[target_pos] @functools.wraps(func) def wrapper(*args, **kwargs): if len(args) > target_pos: res = func(*args, **kwargs) return args[target_pos] if return_original else res # depends on [control=['if'], data=['target_pos']] elif len(args) <= 0 and target_name in kwargs: res = func(*args, **kwargs) return kwargs[target_name] if return_original else res # depends on [control=['if'], data=[]] else: return wrap_with_args(*args, **kwargs) def wrap_with_args(*args, **kwargs): def wrapped_with_args(target): kwargs2 = dict() kwargs2[target_name] = target kwargs2.update(kwargs) res = func(*args, **kwargs2) return target if return_original else res return wrapped_with_args return wrapper
def delete_event(self, id, **data): """ DELETE /events/:id/ Deletes an event if the delete is permitted. In order for a delete to be permitted, there must be no pending or completed orders. Returns a boolean indicating success or failure of the delete. """ return self.delete("/events/{0}/".format(id), data=data)
def function[delete_event, parameter[self, id]]: constant[ DELETE /events/:id/ Deletes an event if the delete is permitted. In order for a delete to be permitted, there must be no pending or completed orders. Returns a boolean indicating success or failure of the delete. ] return[call[name[self].delete, parameter[call[constant[/events/{0}/].format, parameter[name[id]]]]]]
keyword[def] identifier[delete_event] ( identifier[self] , identifier[id] ,** identifier[data] ): literal[string] keyword[return] identifier[self] . identifier[delete] ( literal[string] . identifier[format] ( identifier[id] ), identifier[data] = identifier[data] )
def delete_event(self, id, **data): """ DELETE /events/:id/ Deletes an event if the delete is permitted. In order for a delete to be permitted, there must be no pending or completed orders. Returns a boolean indicating success or failure of the delete. """ return self.delete('/events/{0}/'.format(id), data=data)
def execute(self, text, **params): """Execute a SQL statement.""" class UserDefinedType(sqlalchemy.TypeDecorator): """Add support for expandable values, a la https://github.com/sqlalchemy/sqlalchemy/issues/3953.""" # Required class-level attribute # https://docs.sqlalchemy.org/en/latest/core/custom_types.html#sqlalchemy.types.TypeDecorator impl = sqlalchemy.types.UserDefinedType def process_literal_param(self, value, dialect): """Receive a literal parameter value to be rendered inline within a statement.""" def process(value): """Render a literal value, escaping as needed.""" # bool if type(value) is bool: return sqlalchemy.types.Boolean().literal_processor(dialect)(value) # datetime.date elif type(value) is datetime.date: return sqlalchemy.types.String().literal_processor(dialect)(value.strftime("%Y-%m-%d")) # datetime.datetime elif type(value) is datetime.datetime: return sqlalchemy.types.String().literal_processor(dialect)(value.strftime("%Y-%m-%d %H:%M:%S")) # datetime.time elif type(value) is datetime.time: return sqlalchemy.types.String().literal_processor(dialect)(value.strftime("%H:%M:%S")) # float elif type(value) is float: return sqlalchemy.types.Float().literal_processor(dialect)(value) # int elif type(value) is int: return sqlalchemy.types.Integer().literal_processor(dialect)(value) # long elif sys.version_info.major != 3 and type(value) is long: return sqlalchemy.types.Integer().literal_processor(dialect)(value) # str elif type(value) is str: return sqlalchemy.types.String().literal_processor(dialect)(value) # None elif type(value) is sqlalchemy.sql.elements.Null: return sqlalchemy.types.NullType().literal_processor(dialect)(value) # Unsupported value raise RuntimeError("unsupported value") # Process value(s), separating with commas as needed if type(value) is list: return ", ".join([process(v) for v in value]) else: return process(value) # Allow only one statement at a time, since SQLite doesn't support multiple # https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.execute if len(sqlparse.split(text)) > 1: raise RuntimeError("too many statements at once") # Raise exceptions for warnings warnings.filterwarnings("error") # Prepare, execute statement try: # Construct a new TextClause clause statement = sqlalchemy.text(text) # Iterate over parameters for key, value in params.items(): # Translate None to NULL if value is None: value = sqlalchemy.sql.null() # Bind parameters before statement reaches database, so that bound parameters appear in exceptions # http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.text statement = statement.bindparams(sqlalchemy.bindparam( key, value=value, type_=UserDefinedType())) # Stringify bound parameters # http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html#how-do-i-render-sql-expressions-as-strings-possibly-with-bound-parameters-inlined statement = str(statement.compile(compile_kwargs={"literal_binds": True})) # Statement for logging log = re.sub(r"\n\s*", " ", sqlparse.format(statement, reindent=True)) # Execute statement result = self.engine.execute(statement) # If SELECT (or INSERT with RETURNING), return result set as list of dict objects if re.search(r"^\s*SELECT", statement, re.I): # Coerce any decimal.Decimal objects to float objects # https://groups.google.com/d/msg/sqlalchemy/0qXMYJvq8SA/oqtvMD9Uw-kJ rows = [dict(row) for row in result.fetchall()] for row in rows: for column in row: if type(row[column]) is decimal.Decimal: row[column] = float(row[column]) ret = rows # If INSERT, return primary key value for a newly inserted row elif re.search(r"^\s*INSERT", statement, re.I): if self.engine.url.get_backend_name() in ["postgres", "postgresql"]: result = self.engine.execute(sqlalchemy.text("SELECT LASTVAL()")) ret = result.first()[0] else: ret = result.lastrowid # If DELETE or UPDATE, return number of rows matched elif re.search(r"^\s*(?:DELETE|UPDATE)", statement, re.I): ret = result.rowcount # If some other statement, return True unless exception else: ret = True # If constraint violated, return None except sqlalchemy.exc.IntegrityError: self.logger.debug(termcolor.colored(log, "yellow")) return None # If user errror except sqlalchemy.exc.OperationalError as e: self.logger.debug(termcolor.colored(log, "red")) e = RuntimeError(self._parse(e)) e.__cause__ = None raise e # Return value else: self.logger.debug(termcolor.colored(log, "green")) return ret
def function[execute, parameter[self, text]]: constant[Execute a SQL statement.] class class[UserDefinedType, parameter[]] begin[:] constant[Add support for expandable values, a la https://github.com/sqlalchemy/sqlalchemy/issues/3953.] variable[impl] assign[=] name[sqlalchemy].types.UserDefinedType def function[process_literal_param, parameter[self, value, dialect]]: constant[Receive a literal parameter value to be rendered inline within a statement.] def function[process, parameter[value]]: constant[Render a literal value, escaping as needed.] if compare[call[name[type], parameter[name[value]]] is name[bool]] begin[:] return[call[call[call[name[sqlalchemy].types.Boolean, parameter[]].literal_processor, parameter[name[dialect]]], parameter[name[value]]]] <ast.Raise object at 0x7da1b13e54e0> if compare[call[name[type], parameter[name[value]]] is name[list]] begin[:] return[call[constant[, ].join, parameter[<ast.ListComp object at 0x7da1b13e7fa0>]]] if compare[call[name[len], parameter[call[name[sqlparse].split, parameter[name[text]]]]] greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da1b13e7460> call[name[warnings].filterwarnings, parameter[constant[error]]] <ast.Try object at 0x7da1b13e60e0>
keyword[def] identifier[execute] ( identifier[self] , identifier[text] ,** identifier[params] ): literal[string] keyword[class] identifier[UserDefinedType] ( identifier[sqlalchemy] . identifier[TypeDecorator] ): literal[string] identifier[impl] = identifier[sqlalchemy] . identifier[types] . identifier[UserDefinedType] keyword[def] identifier[process_literal_param] ( identifier[self] , identifier[value] , identifier[dialect] ): literal[string] keyword[def] identifier[process] ( identifier[value] ): literal[string] keyword[if] identifier[type] ( identifier[value] ) keyword[is] identifier[bool] : keyword[return] identifier[sqlalchemy] . identifier[types] . identifier[Boolean] (). identifier[literal_processor] ( identifier[dialect] )( identifier[value] ) keyword[elif] identifier[type] ( identifier[value] ) keyword[is] identifier[datetime] . identifier[date] : keyword[return] identifier[sqlalchemy] . identifier[types] . identifier[String] (). identifier[literal_processor] ( identifier[dialect] )( identifier[value] . identifier[strftime] ( literal[string] )) keyword[elif] identifier[type] ( identifier[value] ) keyword[is] identifier[datetime] . identifier[datetime] : keyword[return] identifier[sqlalchemy] . identifier[types] . identifier[String] (). identifier[literal_processor] ( identifier[dialect] )( identifier[value] . identifier[strftime] ( literal[string] )) keyword[elif] identifier[type] ( identifier[value] ) keyword[is] identifier[datetime] . identifier[time] : keyword[return] identifier[sqlalchemy] . identifier[types] . identifier[String] (). identifier[literal_processor] ( identifier[dialect] )( identifier[value] . identifier[strftime] ( literal[string] )) keyword[elif] identifier[type] ( identifier[value] ) keyword[is] identifier[float] : keyword[return] identifier[sqlalchemy] . identifier[types] . identifier[Float] (). identifier[literal_processor] ( identifier[dialect] )( identifier[value] ) keyword[elif] identifier[type] ( identifier[value] ) keyword[is] identifier[int] : keyword[return] identifier[sqlalchemy] . identifier[types] . identifier[Integer] (). identifier[literal_processor] ( identifier[dialect] )( identifier[value] ) keyword[elif] identifier[sys] . identifier[version_info] . identifier[major] != literal[int] keyword[and] identifier[type] ( identifier[value] ) keyword[is] identifier[long] : keyword[return] identifier[sqlalchemy] . identifier[types] . identifier[Integer] (). identifier[literal_processor] ( identifier[dialect] )( identifier[value] ) keyword[elif] identifier[type] ( identifier[value] ) keyword[is] identifier[str] : keyword[return] identifier[sqlalchemy] . identifier[types] . identifier[String] (). identifier[literal_processor] ( identifier[dialect] )( identifier[value] ) keyword[elif] identifier[type] ( identifier[value] ) keyword[is] identifier[sqlalchemy] . identifier[sql] . identifier[elements] . identifier[Null] : keyword[return] identifier[sqlalchemy] . identifier[types] . identifier[NullType] (). identifier[literal_processor] ( identifier[dialect] )( identifier[value] ) keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[if] identifier[type] ( identifier[value] ) keyword[is] identifier[list] : keyword[return] literal[string] . identifier[join] ([ identifier[process] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[value] ]) keyword[else] : keyword[return] identifier[process] ( identifier[value] ) keyword[if] identifier[len] ( identifier[sqlparse] . identifier[split] ( identifier[text] ))> literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[warnings] . identifier[filterwarnings] ( literal[string] ) keyword[try] : identifier[statement] = identifier[sqlalchemy] . identifier[text] ( identifier[text] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[params] . identifier[items] (): keyword[if] identifier[value] keyword[is] keyword[None] : identifier[value] = identifier[sqlalchemy] . identifier[sql] . identifier[null] () identifier[statement] = identifier[statement] . identifier[bindparams] ( identifier[sqlalchemy] . identifier[bindparam] ( identifier[key] , identifier[value] = identifier[value] , identifier[type_] = identifier[UserDefinedType] ())) identifier[statement] = identifier[str] ( identifier[statement] . identifier[compile] ( identifier[compile_kwargs] ={ literal[string] : keyword[True] })) identifier[log] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[sqlparse] . identifier[format] ( identifier[statement] , identifier[reindent] = keyword[True] )) identifier[result] = identifier[self] . identifier[engine] . identifier[execute] ( identifier[statement] ) keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[statement] , identifier[re] . identifier[I] ): identifier[rows] =[ identifier[dict] ( identifier[row] ) keyword[for] identifier[row] keyword[in] identifier[result] . identifier[fetchall] ()] keyword[for] identifier[row] keyword[in] identifier[rows] : keyword[for] identifier[column] keyword[in] identifier[row] : keyword[if] identifier[type] ( identifier[row] [ identifier[column] ]) keyword[is] identifier[decimal] . identifier[Decimal] : identifier[row] [ identifier[column] ]= identifier[float] ( identifier[row] [ identifier[column] ]) identifier[ret] = identifier[rows] keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[statement] , identifier[re] . identifier[I] ): keyword[if] identifier[self] . identifier[engine] . identifier[url] . identifier[get_backend_name] () keyword[in] [ literal[string] , literal[string] ]: identifier[result] = identifier[self] . identifier[engine] . identifier[execute] ( identifier[sqlalchemy] . identifier[text] ( literal[string] )) identifier[ret] = identifier[result] . identifier[first] ()[ literal[int] ] keyword[else] : identifier[ret] = identifier[result] . identifier[lastrowid] keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[statement] , identifier[re] . identifier[I] ): identifier[ret] = identifier[result] . identifier[rowcount] keyword[else] : identifier[ret] = keyword[True] keyword[except] identifier[sqlalchemy] . identifier[exc] . identifier[IntegrityError] : identifier[self] . identifier[logger] . identifier[debug] ( identifier[termcolor] . identifier[colored] ( identifier[log] , literal[string] )) keyword[return] keyword[None] keyword[except] identifier[sqlalchemy] . identifier[exc] . identifier[OperationalError] keyword[as] identifier[e] : identifier[self] . identifier[logger] . identifier[debug] ( identifier[termcolor] . identifier[colored] ( identifier[log] , literal[string] )) identifier[e] = identifier[RuntimeError] ( identifier[self] . identifier[_parse] ( identifier[e] )) identifier[e] . identifier[__cause__] = keyword[None] keyword[raise] identifier[e] keyword[else] : identifier[self] . identifier[logger] . identifier[debug] ( identifier[termcolor] . identifier[colored] ( identifier[log] , literal[string] )) keyword[return] identifier[ret]
def execute(self, text, **params): """Execute a SQL statement.""" class UserDefinedType(sqlalchemy.TypeDecorator): """Add support for expandable values, a la https://github.com/sqlalchemy/sqlalchemy/issues/3953.""" # Required class-level attribute # https://docs.sqlalchemy.org/en/latest/core/custom_types.html#sqlalchemy.types.TypeDecorator impl = sqlalchemy.types.UserDefinedType def process_literal_param(self, value, dialect): """Receive a literal parameter value to be rendered inline within a statement.""" def process(value): """Render a literal value, escaping as needed.""" # bool if type(value) is bool: return sqlalchemy.types.Boolean().literal_processor(dialect)(value) # depends on [control=['if'], data=[]] # datetime.date elif type(value) is datetime.date: return sqlalchemy.types.String().literal_processor(dialect)(value.strftime('%Y-%m-%d')) # depends on [control=['if'], data=[]] # datetime.datetime elif type(value) is datetime.datetime: return sqlalchemy.types.String().literal_processor(dialect)(value.strftime('%Y-%m-%d %H:%M:%S')) # depends on [control=['if'], data=[]] # datetime.time elif type(value) is datetime.time: return sqlalchemy.types.String().literal_processor(dialect)(value.strftime('%H:%M:%S')) # depends on [control=['if'], data=[]] # float elif type(value) is float: return sqlalchemy.types.Float().literal_processor(dialect)(value) # depends on [control=['if'], data=[]] # int elif type(value) is int: return sqlalchemy.types.Integer().literal_processor(dialect)(value) # depends on [control=['if'], data=[]] # long elif sys.version_info.major != 3 and type(value) is long: return sqlalchemy.types.Integer().literal_processor(dialect)(value) # depends on [control=['if'], data=[]] # str elif type(value) is str: return sqlalchemy.types.String().literal_processor(dialect)(value) # depends on [control=['if'], data=[]] # None elif type(value) is sqlalchemy.sql.elements.Null: return sqlalchemy.types.NullType().literal_processor(dialect)(value) # depends on [control=['if'], data=[]] # Unsupported value raise RuntimeError('unsupported value') # Process value(s), separating with commas as needed if type(value) is list: return ', '.join([process(v) for v in value]) # depends on [control=['if'], data=[]] else: return process(value) # Allow only one statement at a time, since SQLite doesn't support multiple # https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.execute if len(sqlparse.split(text)) > 1: raise RuntimeError('too many statements at once') # depends on [control=['if'], data=[]] # Raise exceptions for warnings warnings.filterwarnings('error') # Prepare, execute statement try: # Construct a new TextClause clause statement = sqlalchemy.text(text) # Iterate over parameters for (key, value) in params.items(): # Translate None to NULL if value is None: value = sqlalchemy.sql.null() # depends on [control=['if'], data=['value']] # Bind parameters before statement reaches database, so that bound parameters appear in exceptions # http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.text statement = statement.bindparams(sqlalchemy.bindparam(key, value=value, type_=UserDefinedType())) # depends on [control=['for'], data=[]] # Stringify bound parameters # http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html#how-do-i-render-sql-expressions-as-strings-possibly-with-bound-parameters-inlined statement = str(statement.compile(compile_kwargs={'literal_binds': True})) # Statement for logging log = re.sub('\\n\\s*', ' ', sqlparse.format(statement, reindent=True)) # Execute statement result = self.engine.execute(statement) # If SELECT (or INSERT with RETURNING), return result set as list of dict objects if re.search('^\\s*SELECT', statement, re.I): # Coerce any decimal.Decimal objects to float objects # https://groups.google.com/d/msg/sqlalchemy/0qXMYJvq8SA/oqtvMD9Uw-kJ rows = [dict(row) for row in result.fetchall()] for row in rows: for column in row: if type(row[column]) is decimal.Decimal: row[column] = float(row[column]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['column']] # depends on [control=['for'], data=['row']] ret = rows # depends on [control=['if'], data=[]] # If INSERT, return primary key value for a newly inserted row elif re.search('^\\s*INSERT', statement, re.I): if self.engine.url.get_backend_name() in ['postgres', 'postgresql']: result = self.engine.execute(sqlalchemy.text('SELECT LASTVAL()')) ret = result.first()[0] # depends on [control=['if'], data=[]] else: ret = result.lastrowid # depends on [control=['if'], data=[]] # If DELETE or UPDATE, return number of rows matched elif re.search('^\\s*(?:DELETE|UPDATE)', statement, re.I): ret = result.rowcount # depends on [control=['if'], data=[]] else: # If some other statement, return True unless exception ret = True # depends on [control=['try'], data=[]] # If constraint violated, return None except sqlalchemy.exc.IntegrityError: self.logger.debug(termcolor.colored(log, 'yellow')) return None # depends on [control=['except'], data=[]] # If user errror except sqlalchemy.exc.OperationalError as e: self.logger.debug(termcolor.colored(log, 'red')) e = RuntimeError(self._parse(e)) e.__cause__ = None raise e # depends on [control=['except'], data=['e']] else: # Return value self.logger.debug(termcolor.colored(log, 'green')) return ret
def convert(model, input_features, output_features): """Convert a one-hot-encoder model to the protobuf spec. Parameters ---------- model: OneHotEncoder A trained one-hot encoder model. input_features: str, optional Name of the input column. output_features: str, optional Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model """ if not(_HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') # Make sure the model is fitted. _sklearn_util.check_expected_type(model, OneHotEncoder) _sklearn_util.check_fitted(model, lambda m: hasattr(m, 'active_features_')) _sklearn_util.check_fitted(model, lambda m: hasattr(m, 'n_values_')) input_dimension = get_input_dimension(model) if input_dimension is not None: # Make sure that our starting dimensions are correctly managed. assert len(input_features) == 1 assert input_features[0][1] == datatypes.Array(input_dimension) input_dimension = input_features[0][1].num_elements expected_output_dimension = update_dimension(model, input_dimension) assert output_features[0][1] == datatypes.Array(expected_output_dimension) # Create a pipeline that can do all of the subsequent feature extraction. feature_vectorizer_input_features = [] feature_vectorizer_size_map = {} if model.categorical_features == 'all': _categorical_features = set(range(input_dimension)) _cat_feature_idx_mapping = dict( (i, i) for i in range(input_dimension)) else: _categorical_features = set(model.categorical_features) _cat_feature_idx_mapping = dict( (_idx, i) for i, _idx in enumerate(sorted(model.categorical_features))) pline = Pipeline(input_features, output_features) # Track the overall packing index, which determines the output ordering. pack_idx = 0 # First, go through all the columns that are encoded. The sklearn OHE puts # all of these first, regardless of their original ordering. for idx in range(input_dimension): f_name = "__OHE_%d__" % pack_idx if idx in _categorical_features: # This input column is one hot encoded feature_extractor_spec = create_array_feature_extractor( input_features, f_name, idx, output_type = 'Int64') pline.add_model(feature_extractor_spec) _cat_feature_idx = _cat_feature_idx_mapping[idx] ohe_input_features = [(f_name, datatypes.Int64())] ohe_output_features = [(f_name, datatypes.Dictionary('Int64'))] # Create a one hot encoder per column o_spec = _Model_pb2.Model() o_spec.specificationVersion = SPECIFICATION_VERSION o_spec = set_transform_interface_params(o_spec, ohe_input_features, ohe_output_features) ohe_spec = o_spec.oneHotEncoder ohe_spec.outputSparse = True if model.handle_unknown == 'error': ohe_spec.handleUnknown = _OHE_pb2.OneHotEncoder.HandleUnknown.Value('ErrorOnUnknown') else: ohe_spec.handleUnknown = _OHE_pb2.OneHotEncoder.HandleUnknown.Value('IgnoreUnknown') # Need to do a quick search to find the part of the active_features_ mask # that represents the categorical variables in our part. Could do this # with binary search, but we probably don't need speed so much here. def bs_find(a, i): lb, k = 0, len(a) while k > 0: _idx = lb + (k // 2) if a[_idx] < i: lb = _idx + 1 k -= 1 k = (k // 2) return lb # Here are the indices we are looking fo f_idx_bottom = model.feature_indices_[_cat_feature_idx] f_idx_top = model.feature_indices_[_cat_feature_idx + 1] # Now find where in the active features list we should look. cat_feat_idx_bottom = bs_find(model.active_features_, f_idx_bottom) cat_feat_idx_top = bs_find(model.active_features_, f_idx_top) n_cat_values = cat_feat_idx_top - cat_feat_idx_bottom for i in range(cat_feat_idx_bottom, cat_feat_idx_top): # The actual categorical value is stored as an offset in the active_features list. cat_idx = model.active_features_[i] - f_idx_bottom ohe_spec.int64Categories.vector.append(cat_idx) # Add the ohe to the pipeline pline.add_model(o_spec) # Add the result to the feature_vectorizer at the end. feature_vectorizer_input_features.append( (f_name, datatypes.Dictionary('Int64')) ) feature_vectorizer_size_map[f_name] = n_cat_values pack_idx += 1 # Now go through all the columns that are not encoded as the sklearn OHE puts # these after the encoded ones. For speed, we can put these all in a single # ArrayFeatureExtractor # pass_through_features = [idx for idx in range(input_dimension) if idx not in _categorical_features] if pass_through_features: f_name = "__OHE_pass_through__" # This input column is not one hot encoded feature_extractor_spec = create_array_feature_extractor( input_features, f_name, pass_through_features) pline.add_model(feature_extractor_spec) feature_vectorizer_input_features.append( (f_name, datatypes.Array(len(pass_through_features))) ) # Finally, add the feature vectorizer to the pipeline. output_feature_name = output_features[0][0] output_feature_dimension = output_features[0][1].num_elements fvec, _num_out_dim = create_feature_vectorizer(feature_vectorizer_input_features, output_features[0][0], feature_vectorizer_size_map) # Make sure that the feature vectorizer input actually matches up with the assert _num_out_dim == output_features[0][1].num_elements pline.add_model(fvec) return _MLModel(pline.spec)
def function[convert, parameter[model, input_features, output_features]]: constant[Convert a one-hot-encoder model to the protobuf spec. Parameters ---------- model: OneHotEncoder A trained one-hot encoder model. input_features: str, optional Name of the input column. output_features: str, optional Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model ] if <ast.UnaryOp object at 0x7da1b1f73df0> begin[:] <ast.Raise object at 0x7da1b1f73d90> call[name[_sklearn_util].check_expected_type, parameter[name[model], name[OneHotEncoder]]] call[name[_sklearn_util].check_fitted, parameter[name[model], <ast.Lambda object at 0x7da1b1f73a90>]] call[name[_sklearn_util].check_fitted, parameter[name[model], <ast.Lambda object at 0x7da1b1f73820>]] variable[input_dimension] assign[=] call[name[get_input_dimension], parameter[name[model]]] if compare[name[input_dimension] is_not constant[None]] begin[:] assert[compare[call[name[len], parameter[name[input_features]]] equal[==] constant[1]]] assert[compare[call[call[name[input_features]][constant[0]]][constant[1]] equal[==] call[name[datatypes].Array, parameter[name[input_dimension]]]]] variable[input_dimension] assign[=] call[call[name[input_features]][constant[0]]][constant[1]].num_elements variable[expected_output_dimension] assign[=] call[name[update_dimension], parameter[name[model], name[input_dimension]]] assert[compare[call[call[name[output_features]][constant[0]]][constant[1]] equal[==] call[name[datatypes].Array, parameter[name[expected_output_dimension]]]]] variable[feature_vectorizer_input_features] assign[=] list[[]] variable[feature_vectorizer_size_map] assign[=] dictionary[[], []] if compare[name[model].categorical_features equal[==] constant[all]] begin[:] variable[_categorical_features] assign[=] call[name[set], parameter[call[name[range], parameter[name[input_dimension]]]]] variable[_cat_feature_idx_mapping] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b1f728f0>]] variable[pline] assign[=] call[name[Pipeline], parameter[name[input_features], name[output_features]]] variable[pack_idx] assign[=] constant[0] for taget[name[idx]] in starred[call[name[range], parameter[name[input_dimension]]]] begin[:] variable[f_name] assign[=] binary_operation[constant[__OHE_%d__] <ast.Mod object at 0x7da2590d6920> name[pack_idx]] if compare[name[idx] in name[_categorical_features]] begin[:] variable[feature_extractor_spec] assign[=] call[name[create_array_feature_extractor], parameter[name[input_features], name[f_name], name[idx]]] call[name[pline].add_model, parameter[name[feature_extractor_spec]]] variable[_cat_feature_idx] assign[=] call[name[_cat_feature_idx_mapping]][name[idx]] variable[ohe_input_features] assign[=] list[[<ast.Tuple object at 0x7da1b20280d0>]] variable[ohe_output_features] assign[=] list[[<ast.Tuple object at 0x7da1b202b040>]] variable[o_spec] assign[=] call[name[_Model_pb2].Model, parameter[]] name[o_spec].specificationVersion assign[=] name[SPECIFICATION_VERSION] variable[o_spec] assign[=] call[name[set_transform_interface_params], parameter[name[o_spec], name[ohe_input_features], name[ohe_output_features]]] variable[ohe_spec] assign[=] name[o_spec].oneHotEncoder name[ohe_spec].outputSparse assign[=] constant[True] if compare[name[model].handle_unknown equal[==] constant[error]] begin[:] name[ohe_spec].handleUnknown assign[=] call[name[_OHE_pb2].OneHotEncoder.HandleUnknown.Value, parameter[constant[ErrorOnUnknown]]] def function[bs_find, parameter[a, i]]: <ast.Tuple object at 0x7da1b2029960> assign[=] tuple[[<ast.Constant object at 0x7da1b202b0a0>, <ast.Call object at 0x7da1b202ba90>]] while compare[name[k] greater[>] constant[0]] begin[:] variable[_idx] assign[=] binary_operation[name[lb] + binary_operation[name[k] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]] if compare[call[name[a]][name[_idx]] less[<] name[i]] begin[:] variable[lb] assign[=] binary_operation[name[_idx] + constant[1]] <ast.AugAssign object at 0x7da1b202a380> variable[k] assign[=] binary_operation[name[k] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]] return[name[lb]] variable[f_idx_bottom] assign[=] call[name[model].feature_indices_][name[_cat_feature_idx]] variable[f_idx_top] assign[=] call[name[model].feature_indices_][binary_operation[name[_cat_feature_idx] + constant[1]]] variable[cat_feat_idx_bottom] assign[=] call[name[bs_find], parameter[name[model].active_features_, name[f_idx_bottom]]] variable[cat_feat_idx_top] assign[=] call[name[bs_find], parameter[name[model].active_features_, name[f_idx_top]]] variable[n_cat_values] assign[=] binary_operation[name[cat_feat_idx_top] - name[cat_feat_idx_bottom]] for taget[name[i]] in starred[call[name[range], parameter[name[cat_feat_idx_bottom], name[cat_feat_idx_top]]]] begin[:] variable[cat_idx] assign[=] binary_operation[call[name[model].active_features_][name[i]] - name[f_idx_bottom]] call[name[ohe_spec].int64Categories.vector.append, parameter[name[cat_idx]]] call[name[pline].add_model, parameter[name[o_spec]]] call[name[feature_vectorizer_input_features].append, parameter[tuple[[<ast.Name object at 0x7da1b20296f0>, <ast.Call object at 0x7da1b20286d0>]]]] call[name[feature_vectorizer_size_map]][name[f_name]] assign[=] name[n_cat_values] <ast.AugAssign object at 0x7da1b202a260> variable[pass_through_features] assign[=] <ast.ListComp object at 0x7da1b202b610> if name[pass_through_features] begin[:] variable[f_name] assign[=] constant[__OHE_pass_through__] variable[feature_extractor_spec] assign[=] call[name[create_array_feature_extractor], parameter[name[input_features], name[f_name], name[pass_through_features]]] call[name[pline].add_model, parameter[name[feature_extractor_spec]]] call[name[feature_vectorizer_input_features].append, parameter[tuple[[<ast.Name object at 0x7da1b2011cc0>, <ast.Call object at 0x7da1b2010fd0>]]]] variable[output_feature_name] assign[=] call[call[name[output_features]][constant[0]]][constant[0]] variable[output_feature_dimension] assign[=] call[call[name[output_features]][constant[0]]][constant[1]].num_elements <ast.Tuple object at 0x7da1b2013040> assign[=] call[name[create_feature_vectorizer], parameter[name[feature_vectorizer_input_features], call[call[name[output_features]][constant[0]]][constant[0]], name[feature_vectorizer_size_map]]] assert[compare[name[_num_out_dim] equal[==] call[call[name[output_features]][constant[0]]][constant[1]].num_elements]] call[name[pline].add_model, parameter[name[fvec]]] return[call[name[_MLModel], parameter[name[pline].spec]]]
keyword[def] identifier[convert] ( identifier[model] , identifier[input_features] , identifier[output_features] ): literal[string] keyword[if] keyword[not] ( identifier[_HAS_SKLEARN] ): keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[_sklearn_util] . identifier[check_expected_type] ( identifier[model] , identifier[OneHotEncoder] ) identifier[_sklearn_util] . identifier[check_fitted] ( identifier[model] , keyword[lambda] identifier[m] : identifier[hasattr] ( identifier[m] , literal[string] )) identifier[_sklearn_util] . identifier[check_fitted] ( identifier[model] , keyword[lambda] identifier[m] : identifier[hasattr] ( identifier[m] , literal[string] )) identifier[input_dimension] = identifier[get_input_dimension] ( identifier[model] ) keyword[if] identifier[input_dimension] keyword[is] keyword[not] keyword[None] : keyword[assert] identifier[len] ( identifier[input_features] )== literal[int] keyword[assert] identifier[input_features] [ literal[int] ][ literal[int] ]== identifier[datatypes] . identifier[Array] ( identifier[input_dimension] ) identifier[input_dimension] = identifier[input_features] [ literal[int] ][ literal[int] ]. identifier[num_elements] identifier[expected_output_dimension] = identifier[update_dimension] ( identifier[model] , identifier[input_dimension] ) keyword[assert] identifier[output_features] [ literal[int] ][ literal[int] ]== identifier[datatypes] . identifier[Array] ( identifier[expected_output_dimension] ) identifier[feature_vectorizer_input_features] =[] identifier[feature_vectorizer_size_map] ={} keyword[if] identifier[model] . identifier[categorical_features] == literal[string] : identifier[_categorical_features] = identifier[set] ( identifier[range] ( identifier[input_dimension] )) identifier[_cat_feature_idx_mapping] = identifier[dict] (( identifier[i] , identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[input_dimension] )) keyword[else] : identifier[_categorical_features] = identifier[set] ( identifier[model] . identifier[categorical_features] ) identifier[_cat_feature_idx_mapping] = identifier[dict] (( identifier[_idx] , identifier[i] ) keyword[for] identifier[i] , identifier[_idx] keyword[in] identifier[enumerate] ( identifier[sorted] ( identifier[model] . identifier[categorical_features] ))) identifier[pline] = identifier[Pipeline] ( identifier[input_features] , identifier[output_features] ) identifier[pack_idx] = literal[int] keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[input_dimension] ): identifier[f_name] = literal[string] % identifier[pack_idx] keyword[if] identifier[idx] keyword[in] identifier[_categorical_features] : identifier[feature_extractor_spec] = identifier[create_array_feature_extractor] ( identifier[input_features] , identifier[f_name] , identifier[idx] , identifier[output_type] = literal[string] ) identifier[pline] . identifier[add_model] ( identifier[feature_extractor_spec] ) identifier[_cat_feature_idx] = identifier[_cat_feature_idx_mapping] [ identifier[idx] ] identifier[ohe_input_features] =[( identifier[f_name] , identifier[datatypes] . identifier[Int64] ())] identifier[ohe_output_features] =[( identifier[f_name] , identifier[datatypes] . identifier[Dictionary] ( literal[string] ))] identifier[o_spec] = identifier[_Model_pb2] . identifier[Model] () identifier[o_spec] . identifier[specificationVersion] = identifier[SPECIFICATION_VERSION] identifier[o_spec] = identifier[set_transform_interface_params] ( identifier[o_spec] , identifier[ohe_input_features] , identifier[ohe_output_features] ) identifier[ohe_spec] = identifier[o_spec] . identifier[oneHotEncoder] identifier[ohe_spec] . identifier[outputSparse] = keyword[True] keyword[if] identifier[model] . identifier[handle_unknown] == literal[string] : identifier[ohe_spec] . identifier[handleUnknown] = identifier[_OHE_pb2] . identifier[OneHotEncoder] . identifier[HandleUnknown] . identifier[Value] ( literal[string] ) keyword[else] : identifier[ohe_spec] . identifier[handleUnknown] = identifier[_OHE_pb2] . identifier[OneHotEncoder] . identifier[HandleUnknown] . identifier[Value] ( literal[string] ) keyword[def] identifier[bs_find] ( identifier[a] , identifier[i] ): identifier[lb] , identifier[k] = literal[int] , identifier[len] ( identifier[a] ) keyword[while] identifier[k] > literal[int] : identifier[_idx] = identifier[lb] +( identifier[k] // literal[int] ) keyword[if] identifier[a] [ identifier[_idx] ]< identifier[i] : identifier[lb] = identifier[_idx] + literal[int] identifier[k] -= literal[int] identifier[k] =( identifier[k] // literal[int] ) keyword[return] identifier[lb] identifier[f_idx_bottom] = identifier[model] . identifier[feature_indices_] [ identifier[_cat_feature_idx] ] identifier[f_idx_top] = identifier[model] . identifier[feature_indices_] [ identifier[_cat_feature_idx] + literal[int] ] identifier[cat_feat_idx_bottom] = identifier[bs_find] ( identifier[model] . identifier[active_features_] , identifier[f_idx_bottom] ) identifier[cat_feat_idx_top] = identifier[bs_find] ( identifier[model] . identifier[active_features_] , identifier[f_idx_top] ) identifier[n_cat_values] = identifier[cat_feat_idx_top] - identifier[cat_feat_idx_bottom] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[cat_feat_idx_bottom] , identifier[cat_feat_idx_top] ): identifier[cat_idx] = identifier[model] . identifier[active_features_] [ identifier[i] ]- identifier[f_idx_bottom] identifier[ohe_spec] . identifier[int64Categories] . identifier[vector] . identifier[append] ( identifier[cat_idx] ) identifier[pline] . identifier[add_model] ( identifier[o_spec] ) identifier[feature_vectorizer_input_features] . identifier[append] (( identifier[f_name] , identifier[datatypes] . identifier[Dictionary] ( literal[string] ))) identifier[feature_vectorizer_size_map] [ identifier[f_name] ]= identifier[n_cat_values] identifier[pack_idx] += literal[int] identifier[pass_through_features] =[ identifier[idx] keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[input_dimension] ) keyword[if] identifier[idx] keyword[not] keyword[in] identifier[_categorical_features] ] keyword[if] identifier[pass_through_features] : identifier[f_name] = literal[string] identifier[feature_extractor_spec] = identifier[create_array_feature_extractor] ( identifier[input_features] , identifier[f_name] , identifier[pass_through_features] ) identifier[pline] . identifier[add_model] ( identifier[feature_extractor_spec] ) identifier[feature_vectorizer_input_features] . identifier[append] ( ( identifier[f_name] , identifier[datatypes] . identifier[Array] ( identifier[len] ( identifier[pass_through_features] )))) identifier[output_feature_name] = identifier[output_features] [ literal[int] ][ literal[int] ] identifier[output_feature_dimension] = identifier[output_features] [ literal[int] ][ literal[int] ]. identifier[num_elements] identifier[fvec] , identifier[_num_out_dim] = identifier[create_feature_vectorizer] ( identifier[feature_vectorizer_input_features] , identifier[output_features] [ literal[int] ][ literal[int] ], identifier[feature_vectorizer_size_map] ) keyword[assert] identifier[_num_out_dim] == identifier[output_features] [ literal[int] ][ literal[int] ]. identifier[num_elements] identifier[pline] . identifier[add_model] ( identifier[fvec] ) keyword[return] identifier[_MLModel] ( identifier[pline] . identifier[spec] )
def convert(model, input_features, output_features): """Convert a one-hot-encoder model to the protobuf spec. Parameters ---------- model: OneHotEncoder A trained one-hot encoder model. input_features: str, optional Name of the input column. output_features: str, optional Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model """ if not _HAS_SKLEARN: raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') # depends on [control=['if'], data=[]] # Make sure the model is fitted. _sklearn_util.check_expected_type(model, OneHotEncoder) _sklearn_util.check_fitted(model, lambda m: hasattr(m, 'active_features_')) _sklearn_util.check_fitted(model, lambda m: hasattr(m, 'n_values_')) input_dimension = get_input_dimension(model) if input_dimension is not None: # Make sure that our starting dimensions are correctly managed. assert len(input_features) == 1 assert input_features[0][1] == datatypes.Array(input_dimension) # depends on [control=['if'], data=['input_dimension']] input_dimension = input_features[0][1].num_elements expected_output_dimension = update_dimension(model, input_dimension) assert output_features[0][1] == datatypes.Array(expected_output_dimension) # Create a pipeline that can do all of the subsequent feature extraction. feature_vectorizer_input_features = [] feature_vectorizer_size_map = {} if model.categorical_features == 'all': _categorical_features = set(range(input_dimension)) _cat_feature_idx_mapping = dict(((i, i) for i in range(input_dimension))) # depends on [control=['if'], data=[]] else: _categorical_features = set(model.categorical_features) _cat_feature_idx_mapping = dict(((_idx, i) for (i, _idx) in enumerate(sorted(model.categorical_features)))) pline = Pipeline(input_features, output_features) # Track the overall packing index, which determines the output ordering. pack_idx = 0 # First, go through all the columns that are encoded. The sklearn OHE puts # all of these first, regardless of their original ordering. for idx in range(input_dimension): f_name = '__OHE_%d__' % pack_idx if idx in _categorical_features: # This input column is one hot encoded feature_extractor_spec = create_array_feature_extractor(input_features, f_name, idx, output_type='Int64') pline.add_model(feature_extractor_spec) _cat_feature_idx = _cat_feature_idx_mapping[idx] ohe_input_features = [(f_name, datatypes.Int64())] ohe_output_features = [(f_name, datatypes.Dictionary('Int64'))] # Create a one hot encoder per column o_spec = _Model_pb2.Model() o_spec.specificationVersion = SPECIFICATION_VERSION o_spec = set_transform_interface_params(o_spec, ohe_input_features, ohe_output_features) ohe_spec = o_spec.oneHotEncoder ohe_spec.outputSparse = True if model.handle_unknown == 'error': ohe_spec.handleUnknown = _OHE_pb2.OneHotEncoder.HandleUnknown.Value('ErrorOnUnknown') # depends on [control=['if'], data=[]] else: ohe_spec.handleUnknown = _OHE_pb2.OneHotEncoder.HandleUnknown.Value('IgnoreUnknown') # Need to do a quick search to find the part of the active_features_ mask # that represents the categorical variables in our part. Could do this # with binary search, but we probably don't need speed so much here. def bs_find(a, i): (lb, k) = (0, len(a)) while k > 0: _idx = lb + k // 2 if a[_idx] < i: lb = _idx + 1 k -= 1 # depends on [control=['if'], data=[]] k = k // 2 # depends on [control=['while'], data=['k']] return lb # Here are the indices we are looking fo f_idx_bottom = model.feature_indices_[_cat_feature_idx] f_idx_top = model.feature_indices_[_cat_feature_idx + 1] # Now find where in the active features list we should look. cat_feat_idx_bottom = bs_find(model.active_features_, f_idx_bottom) cat_feat_idx_top = bs_find(model.active_features_, f_idx_top) n_cat_values = cat_feat_idx_top - cat_feat_idx_bottom for i in range(cat_feat_idx_bottom, cat_feat_idx_top): # The actual categorical value is stored as an offset in the active_features list. cat_idx = model.active_features_[i] - f_idx_bottom ohe_spec.int64Categories.vector.append(cat_idx) # depends on [control=['for'], data=['i']] # Add the ohe to the pipeline pline.add_model(o_spec) # Add the result to the feature_vectorizer at the end. feature_vectorizer_input_features.append((f_name, datatypes.Dictionary('Int64'))) feature_vectorizer_size_map[f_name] = n_cat_values pack_idx += 1 # depends on [control=['if'], data=['idx']] # depends on [control=['for'], data=['idx']] # Now go through all the columns that are not encoded as the sklearn OHE puts # these after the encoded ones. For speed, we can put these all in a single # ArrayFeatureExtractor # pass_through_features = [idx for idx in range(input_dimension) if idx not in _categorical_features] if pass_through_features: f_name = '__OHE_pass_through__' # This input column is not one hot encoded feature_extractor_spec = create_array_feature_extractor(input_features, f_name, pass_through_features) pline.add_model(feature_extractor_spec) feature_vectorizer_input_features.append((f_name, datatypes.Array(len(pass_through_features)))) # depends on [control=['if'], data=[]] # Finally, add the feature vectorizer to the pipeline. output_feature_name = output_features[0][0] output_feature_dimension = output_features[0][1].num_elements (fvec, _num_out_dim) = create_feature_vectorizer(feature_vectorizer_input_features, output_features[0][0], feature_vectorizer_size_map) # Make sure that the feature vectorizer input actually matches up with the assert _num_out_dim == output_features[0][1].num_elements pline.add_model(fvec) return _MLModel(pline.spec)
def annotate_image(self, request, retry=None, timeout=None): """Run image detection and annotation for an image. Example: >>> from google.cloud.vision_v1 import ImageAnnotatorClient >>> client = ImageAnnotatorClient() >>> request = { ... 'image': { ... 'source': {'image_uri': 'https://foo.com/image.jpg'}, ... }, ... } >>> response = client.annotate_image(request) Args: request (:class:`~.vision_v1.types.AnnotateImageRequest`) retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. Returns: :class:`~.vision_v1.types.AnnotateImageResponse` The API response. """ # If the image is a file handler, set the content. image = protobuf.get(request, "image") if hasattr(image, "read"): img_bytes = image.read() protobuf.set(request, "image", {}) protobuf.set(request, "image.content", img_bytes) image = protobuf.get(request, "image") # If a filename is provided, read the file. filename = protobuf.get(image, "source.filename", default=None) if filename: with io.open(filename, "rb") as img_file: protobuf.set(request, "image.content", img_file.read()) protobuf.set(request, "image.source", None) # This method allows features not to be specified, and you get all # of them. protobuf.setdefault(request, "features", self._get_all_features()) r = self.batch_annotate_images([request], retry=retry, timeout=timeout) return r.responses[0]
def function[annotate_image, parameter[self, request, retry, timeout]]: constant[Run image detection and annotation for an image. Example: >>> from google.cloud.vision_v1 import ImageAnnotatorClient >>> client = ImageAnnotatorClient() >>> request = { ... 'image': { ... 'source': {'image_uri': 'https://foo.com/image.jpg'}, ... }, ... } >>> response = client.annotate_image(request) Args: request (:class:`~.vision_v1.types.AnnotateImageRequest`) retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. Returns: :class:`~.vision_v1.types.AnnotateImageResponse` The API response. ] variable[image] assign[=] call[name[protobuf].get, parameter[name[request], constant[image]]] if call[name[hasattr], parameter[name[image], constant[read]]] begin[:] variable[img_bytes] assign[=] call[name[image].read, parameter[]] call[name[protobuf].set, parameter[name[request], constant[image], dictionary[[], []]]] call[name[protobuf].set, parameter[name[request], constant[image.content], name[img_bytes]]] variable[image] assign[=] call[name[protobuf].get, parameter[name[request], constant[image]]] variable[filename] assign[=] call[name[protobuf].get, parameter[name[image], constant[source.filename]]] if name[filename] begin[:] with call[name[io].open, parameter[name[filename], constant[rb]]] begin[:] call[name[protobuf].set, parameter[name[request], constant[image.content], call[name[img_file].read, parameter[]]]] call[name[protobuf].set, parameter[name[request], constant[image.source], constant[None]]] call[name[protobuf].setdefault, parameter[name[request], constant[features], call[name[self]._get_all_features, parameter[]]]] variable[r] assign[=] call[name[self].batch_annotate_images, parameter[list[[<ast.Name object at 0x7da1b2345c00>]]]] return[call[name[r].responses][constant[0]]]
keyword[def] identifier[annotate_image] ( identifier[self] , identifier[request] , identifier[retry] = keyword[None] , identifier[timeout] = keyword[None] ): literal[string] identifier[image] = identifier[protobuf] . identifier[get] ( identifier[request] , literal[string] ) keyword[if] identifier[hasattr] ( identifier[image] , literal[string] ): identifier[img_bytes] = identifier[image] . identifier[read] () identifier[protobuf] . identifier[set] ( identifier[request] , literal[string] ,{}) identifier[protobuf] . identifier[set] ( identifier[request] , literal[string] , identifier[img_bytes] ) identifier[image] = identifier[protobuf] . identifier[get] ( identifier[request] , literal[string] ) identifier[filename] = identifier[protobuf] . identifier[get] ( identifier[image] , literal[string] , identifier[default] = keyword[None] ) keyword[if] identifier[filename] : keyword[with] identifier[io] . identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[img_file] : identifier[protobuf] . identifier[set] ( identifier[request] , literal[string] , identifier[img_file] . identifier[read] ()) identifier[protobuf] . identifier[set] ( identifier[request] , literal[string] , keyword[None] ) identifier[protobuf] . identifier[setdefault] ( identifier[request] , literal[string] , identifier[self] . identifier[_get_all_features] ()) identifier[r] = identifier[self] . identifier[batch_annotate_images] ([ identifier[request] ], identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] ) keyword[return] identifier[r] . identifier[responses] [ literal[int] ]
def annotate_image(self, request, retry=None, timeout=None): """Run image detection and annotation for an image. Example: >>> from google.cloud.vision_v1 import ImageAnnotatorClient >>> client = ImageAnnotatorClient() >>> request = { ... 'image': { ... 'source': {'image_uri': 'https://foo.com/image.jpg'}, ... }, ... } >>> response = client.annotate_image(request) Args: request (:class:`~.vision_v1.types.AnnotateImageRequest`) retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. Returns: :class:`~.vision_v1.types.AnnotateImageResponse` The API response. """ # If the image is a file handler, set the content. image = protobuf.get(request, 'image') if hasattr(image, 'read'): img_bytes = image.read() protobuf.set(request, 'image', {}) protobuf.set(request, 'image.content', img_bytes) image = protobuf.get(request, 'image') # depends on [control=['if'], data=[]] # If a filename is provided, read the file. filename = protobuf.get(image, 'source.filename', default=None) if filename: with io.open(filename, 'rb') as img_file: protobuf.set(request, 'image.content', img_file.read()) protobuf.set(request, 'image.source', None) # depends on [control=['with'], data=['img_file']] # depends on [control=['if'], data=[]] # This method allows features not to be specified, and you get all # of them. protobuf.setdefault(request, 'features', self._get_all_features()) r = self.batch_annotate_images([request], retry=retry, timeout=timeout) return r.responses[0]