code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def parse(readDataInstance): """ Returns a new L{ImageBoundForwarderRefEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with the corresponding data to generate a new L{ImageBoundForwarderRefEntry} object. @rtype: L{ImageBoundForwarderRefEntry} @return: A new L{ImageBoundForwarderRefEntry} object. """ boundForwarderEntry = ImageBoundForwarderRefEntry() boundForwarderEntry.timeDateStamp.value = readDataInstance.readDword() boundForwarderEntry.offsetModuleName.value = readDataInstance.readWord() boundForwarderEntry.reserved.value = readDataInstance.readWord() return boundForwarderEntry
def function[parse, parameter[readDataInstance]]: constant[ Returns a new L{ImageBoundForwarderRefEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with the corresponding data to generate a new L{ImageBoundForwarderRefEntry} object. @rtype: L{ImageBoundForwarderRefEntry} @return: A new L{ImageBoundForwarderRefEntry} object. ] variable[boundForwarderEntry] assign[=] call[name[ImageBoundForwarderRefEntry], parameter[]] name[boundForwarderEntry].timeDateStamp.value assign[=] call[name[readDataInstance].readDword, parameter[]] name[boundForwarderEntry].offsetModuleName.value assign[=] call[name[readDataInstance].readWord, parameter[]] name[boundForwarderEntry].reserved.value assign[=] call[name[readDataInstance].readWord, parameter[]] return[name[boundForwarderEntry]]
keyword[def] identifier[parse] ( identifier[readDataInstance] ): literal[string] identifier[boundForwarderEntry] = identifier[ImageBoundForwarderRefEntry] () identifier[boundForwarderEntry] . identifier[timeDateStamp] . identifier[value] = identifier[readDataInstance] . identifier[readDword] () identifier[boundForwarderEntry] . identifier[offsetModuleName] . identifier[value] = identifier[readDataInstance] . identifier[readWord] () identifier[boundForwarderEntry] . identifier[reserved] . identifier[value] = identifier[readDataInstance] . identifier[readWord] () keyword[return] identifier[boundForwarderEntry]
def parse(readDataInstance): """ Returns a new L{ImageBoundForwarderRefEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with the corresponding data to generate a new L{ImageBoundForwarderRefEntry} object. @rtype: L{ImageBoundForwarderRefEntry} @return: A new L{ImageBoundForwarderRefEntry} object. """ boundForwarderEntry = ImageBoundForwarderRefEntry() boundForwarderEntry.timeDateStamp.value = readDataInstance.readDword() boundForwarderEntry.offsetModuleName.value = readDataInstance.readWord() boundForwarderEntry.reserved.value = readDataInstance.readWord() return boundForwarderEntry
def transition(self, data, year, linked_tables=None): """ Add or remove rows from a table based on population targets. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : int Year number that will be passed to `transitioner`. linked_tables : dict of tuple, optional Dictionary of (table, 'column name') pairs. The column name should match the index of `data`. Indexes in `data` that are copied or removed will also be copied and removed in linked tables. They dictionary keys are used in the returned `updated_links`. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Series Indexes of new rows in `updated`. updated_links : dict of pandas.DataFrame """ logger.debug('start: transition') linked_tables = linked_tables or {} updated_links = {} with log_start_finish('add/remove rows', logger): updated, added, copied, removed = self.transitioner(data, year) for table_name, (table, col) in linked_tables.items(): logger.debug('updating linked table {}'.format(table_name)) updated_links[table_name] = \ _update_linked_table(table, col, added, copied, removed) logger.debug('finish: transition') return updated, added, updated_links
def function[transition, parameter[self, data, year, linked_tables]]: constant[ Add or remove rows from a table based on population targets. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : int Year number that will be passed to `transitioner`. linked_tables : dict of tuple, optional Dictionary of (table, 'column name') pairs. The column name should match the index of `data`. Indexes in `data` that are copied or removed will also be copied and removed in linked tables. They dictionary keys are used in the returned `updated_links`. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Series Indexes of new rows in `updated`. updated_links : dict of pandas.DataFrame ] call[name[logger].debug, parameter[constant[start: transition]]] variable[linked_tables] assign[=] <ast.BoolOp object at 0x7da1b2344b20> variable[updated_links] assign[=] dictionary[[], []] with call[name[log_start_finish], parameter[constant[add/remove rows], name[logger]]] begin[:] <ast.Tuple object at 0x7da1b2347d90> assign[=] call[name[self].transitioner, parameter[name[data], name[year]]] for taget[tuple[[<ast.Name object at 0x7da2054a7580>, <ast.Tuple object at 0x7da2054a46a0>]]] in starred[call[name[linked_tables].items, parameter[]]] begin[:] call[name[logger].debug, parameter[call[constant[updating linked table {}].format, parameter[name[table_name]]]]] call[name[updated_links]][name[table_name]] assign[=] call[name[_update_linked_table], parameter[name[table], name[col], name[added], name[copied], name[removed]]] call[name[logger].debug, parameter[constant[finish: transition]]] return[tuple[[<ast.Name object at 0x7da2054a7100>, <ast.Name object at 0x7da2054a7340>, <ast.Name object at 0x7da2054a4550>]]]
keyword[def] identifier[transition] ( identifier[self] , identifier[data] , identifier[year] , identifier[linked_tables] = keyword[None] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] ) identifier[linked_tables] = identifier[linked_tables] keyword[or] {} identifier[updated_links] ={} keyword[with] identifier[log_start_finish] ( literal[string] , identifier[logger] ): identifier[updated] , identifier[added] , identifier[copied] , identifier[removed] = identifier[self] . identifier[transitioner] ( identifier[data] , identifier[year] ) keyword[for] identifier[table_name] ,( identifier[table] , identifier[col] ) keyword[in] identifier[linked_tables] . identifier[items] (): identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[table_name] )) identifier[updated_links] [ identifier[table_name] ]= identifier[_update_linked_table] ( identifier[table] , identifier[col] , identifier[added] , identifier[copied] , identifier[removed] ) identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[updated] , identifier[added] , identifier[updated_links]
def transition(self, data, year, linked_tables=None): """ Add or remove rows from a table based on population targets. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : int Year number that will be passed to `transitioner`. linked_tables : dict of tuple, optional Dictionary of (table, 'column name') pairs. The column name should match the index of `data`. Indexes in `data` that are copied or removed will also be copied and removed in linked tables. They dictionary keys are used in the returned `updated_links`. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Series Indexes of new rows in `updated`. updated_links : dict of pandas.DataFrame """ logger.debug('start: transition') linked_tables = linked_tables or {} updated_links = {} with log_start_finish('add/remove rows', logger): (updated, added, copied, removed) = self.transitioner(data, year) # depends on [control=['with'], data=[]] for (table_name, (table, col)) in linked_tables.items(): logger.debug('updating linked table {}'.format(table_name)) updated_links[table_name] = _update_linked_table(table, col, added, copied, removed) # depends on [control=['for'], data=[]] logger.debug('finish: transition') return (updated, added, updated_links)
def _cb_dcnm_msg(self, method, body): """Callback function to process DCNM network creation/update/deletion message received by AMQP. It also communicates with DCNM to extract info for CPNR record insertion/deletion. :param pika.channel.Channel ch: The channel instance. :param pika.Spec.Basic.Deliver method: The basic deliver method which includes routing key. :param pika.Spec.BasicProperties properties: properties :param str body: The message body. """ LOG.debug('Routing_key: %(key)s, body: %(body)s.', {'key': method.routing_key, 'body': body}) partition_keyword = 'auto-config.organization.partition' network_keyword = partition_keyword + '.network' network_create_key = network_keyword + '.create' network_update_key = network_keyword + '.update' msg = jsonutils.loads(body) LOG.debug('_cb_dcnm_msg: RX message: %s', msg) if not msg: LOG.debug("error, return") return url = msg['link'] url_fields = url.split('/') pre_project_name = url_fields[4] pre_partition_name = url_fields[6] pre_seg_id = url_fields[9] data = {"project_name": pre_project_name, "partition_name": pre_partition_name, "segmentation_id": pre_seg_id} if network_create_key in method.routing_key or ( network_update_key in method.routing_key): pri = self._create_pri event_type = 'dcnm.network.create' else: pri = self._delete_pri event_type = 'dcnm.network.delete' if self._pq is not None: payload = (event_type, data) self._pq.put((pri, time.ctime, payload))
def function[_cb_dcnm_msg, parameter[self, method, body]]: constant[Callback function to process DCNM network creation/update/deletion message received by AMQP. It also communicates with DCNM to extract info for CPNR record insertion/deletion. :param pika.channel.Channel ch: The channel instance. :param pika.Spec.Basic.Deliver method: The basic deliver method which includes routing key. :param pika.Spec.BasicProperties properties: properties :param str body: The message body. ] call[name[LOG].debug, parameter[constant[Routing_key: %(key)s, body: %(body)s.], dictionary[[<ast.Constant object at 0x7da18ede48b0>, <ast.Constant object at 0x7da18ede4160>], [<ast.Attribute object at 0x7da18ede73a0>, <ast.Name object at 0x7da18ede4ee0>]]]] variable[partition_keyword] assign[=] constant[auto-config.organization.partition] variable[network_keyword] assign[=] binary_operation[name[partition_keyword] + constant[.network]] variable[network_create_key] assign[=] binary_operation[name[network_keyword] + constant[.create]] variable[network_update_key] assign[=] binary_operation[name[network_keyword] + constant[.update]] variable[msg] assign[=] call[name[jsonutils].loads, parameter[name[body]]] call[name[LOG].debug, parameter[constant[_cb_dcnm_msg: RX message: %s], name[msg]]] if <ast.UnaryOp object at 0x7da1b1a5ed70> begin[:] call[name[LOG].debug, parameter[constant[error, return]]] return[None] variable[url] assign[=] call[name[msg]][constant[link]] variable[url_fields] assign[=] call[name[url].split, parameter[constant[/]]] variable[pre_project_name] assign[=] call[name[url_fields]][constant[4]] variable[pre_partition_name] assign[=] call[name[url_fields]][constant[6]] variable[pre_seg_id] assign[=] call[name[url_fields]][constant[9]] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b1a5ebc0>, <ast.Constant object at 0x7da1b1a5c3d0>, <ast.Constant object at 0x7da1b1a5eb90>], [<ast.Name object at 0x7da1b1a5f490>, <ast.Name object at 0x7da1b1a5d210>, <ast.Name object at 0x7da1b1a5e560>]] if <ast.BoolOp object at 0x7da1b1a5ffd0> begin[:] variable[pri] assign[=] name[self]._create_pri variable[event_type] assign[=] constant[dcnm.network.create] if compare[name[self]._pq is_not constant[None]] begin[:] variable[payload] assign[=] tuple[[<ast.Name object at 0x7da1b1b15ea0>, <ast.Name object at 0x7da1b1c60430>]] call[name[self]._pq.put, parameter[tuple[[<ast.Name object at 0x7da1b1c63100>, <ast.Attribute object at 0x7da1b1c60df0>, <ast.Name object at 0x7da1b1c611e0>]]]]
keyword[def] identifier[_cb_dcnm_msg] ( identifier[self] , identifier[method] , identifier[body] ): literal[string] identifier[LOG] . identifier[debug] ( literal[string] , { literal[string] : identifier[method] . identifier[routing_key] , literal[string] : identifier[body] }) identifier[partition_keyword] = literal[string] identifier[network_keyword] = identifier[partition_keyword] + literal[string] identifier[network_create_key] = identifier[network_keyword] + literal[string] identifier[network_update_key] = identifier[network_keyword] + literal[string] identifier[msg] = identifier[jsonutils] . identifier[loads] ( identifier[body] ) identifier[LOG] . identifier[debug] ( literal[string] , identifier[msg] ) keyword[if] keyword[not] identifier[msg] : identifier[LOG] . identifier[debug] ( literal[string] ) keyword[return] identifier[url] = identifier[msg] [ literal[string] ] identifier[url_fields] = identifier[url] . identifier[split] ( literal[string] ) identifier[pre_project_name] = identifier[url_fields] [ literal[int] ] identifier[pre_partition_name] = identifier[url_fields] [ literal[int] ] identifier[pre_seg_id] = identifier[url_fields] [ literal[int] ] identifier[data] ={ literal[string] : identifier[pre_project_name] , literal[string] : identifier[pre_partition_name] , literal[string] : identifier[pre_seg_id] } keyword[if] identifier[network_create_key] keyword[in] identifier[method] . identifier[routing_key] keyword[or] ( identifier[network_update_key] keyword[in] identifier[method] . identifier[routing_key] ): identifier[pri] = identifier[self] . identifier[_create_pri] identifier[event_type] = literal[string] keyword[else] : identifier[pri] = identifier[self] . identifier[_delete_pri] identifier[event_type] = literal[string] keyword[if] identifier[self] . identifier[_pq] keyword[is] keyword[not] keyword[None] : identifier[payload] =( identifier[event_type] , identifier[data] ) identifier[self] . identifier[_pq] . identifier[put] (( identifier[pri] , identifier[time] . identifier[ctime] , identifier[payload] ))
def _cb_dcnm_msg(self, method, body): """Callback function to process DCNM network creation/update/deletion message received by AMQP. It also communicates with DCNM to extract info for CPNR record insertion/deletion. :param pika.channel.Channel ch: The channel instance. :param pika.Spec.Basic.Deliver method: The basic deliver method which includes routing key. :param pika.Spec.BasicProperties properties: properties :param str body: The message body. """ LOG.debug('Routing_key: %(key)s, body: %(body)s.', {'key': method.routing_key, 'body': body}) partition_keyword = 'auto-config.organization.partition' network_keyword = partition_keyword + '.network' network_create_key = network_keyword + '.create' network_update_key = network_keyword + '.update' msg = jsonutils.loads(body) LOG.debug('_cb_dcnm_msg: RX message: %s', msg) if not msg: LOG.debug('error, return') return # depends on [control=['if'], data=[]] url = msg['link'] url_fields = url.split('/') pre_project_name = url_fields[4] pre_partition_name = url_fields[6] pre_seg_id = url_fields[9] data = {'project_name': pre_project_name, 'partition_name': pre_partition_name, 'segmentation_id': pre_seg_id} if network_create_key in method.routing_key or network_update_key in method.routing_key: pri = self._create_pri event_type = 'dcnm.network.create' # depends on [control=['if'], data=[]] else: pri = self._delete_pri event_type = 'dcnm.network.delete' if self._pq is not None: payload = (event_type, data) self._pq.put((pri, time.ctime, payload)) # depends on [control=['if'], data=[]]
def connect(self): """Opens a HTTP connection to the RPC server.""" logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port) try: self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port) self.connection.connect() except (httplib.HTTPException, socket.error) as e: raise errors.InterfaceError('Unable to connect to the specified service', e)
def function[connect, parameter[self]]: constant[Opens a HTTP connection to the RPC server.] call[name[logger].debug, parameter[constant[Opening connection to %s:%s], name[self].url.hostname, name[self].url.port]] <ast.Try object at 0x7da1b1131e70>
keyword[def] identifier[connect] ( identifier[self] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[url] . identifier[hostname] , identifier[self] . identifier[url] . identifier[port] ) keyword[try] : identifier[self] . identifier[connection] = identifier[httplib] . identifier[HTTPConnection] ( identifier[self] . identifier[url] . identifier[hostname] , identifier[self] . identifier[url] . identifier[port] ) identifier[self] . identifier[connection] . identifier[connect] () keyword[except] ( identifier[httplib] . identifier[HTTPException] , identifier[socket] . identifier[error] ) keyword[as] identifier[e] : keyword[raise] identifier[errors] . identifier[InterfaceError] ( literal[string] , identifier[e] )
def connect(self): """Opens a HTTP connection to the RPC server.""" logger.debug('Opening connection to %s:%s', self.url.hostname, self.url.port) try: self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port) self.connection.connect() # depends on [control=['try'], data=[]] except (httplib.HTTPException, socket.error) as e: raise errors.InterfaceError('Unable to connect to the specified service', e) # depends on [control=['except'], data=['e']]
def delete_ssh_template(auth, url, template_name= None, template_id= None): """ Takes template_name as input to issue RESTUL call to HP IMC which will delete the specific ssh template from the IMC system :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param template_name: str value of template name :param template_id: str value template template_id value :return: int HTTP response code :rtype int """ try: if template_id is None: ssh_templates = get_ssh_template(auth, url) if template_name is None: template_name = ssh_template['name'] template_id = None for template in ssh_templates: if template['name'] == template_name: template_id = template['id'] f_url = url + "/imcrs/plat/res/ssh/%s/delete" % template_id response = requests.delete(f_url, auth=auth, headers=HEADERS) return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " delete_ssh_template: An Error has occured"
def function[delete_ssh_template, parameter[auth, url, template_name, template_id]]: constant[ Takes template_name as input to issue RESTUL call to HP IMC which will delete the specific ssh template from the IMC system :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param template_name: str value of template name :param template_id: str value template template_id value :return: int HTTP response code :rtype int ] <ast.Try object at 0x7da20c76dae0>
keyword[def] identifier[delete_ssh_template] ( identifier[auth] , identifier[url] , identifier[template_name] = keyword[None] , identifier[template_id] = keyword[None] ): literal[string] keyword[try] : keyword[if] identifier[template_id] keyword[is] keyword[None] : identifier[ssh_templates] = identifier[get_ssh_template] ( identifier[auth] , identifier[url] ) keyword[if] identifier[template_name] keyword[is] keyword[None] : identifier[template_name] = identifier[ssh_template] [ literal[string] ] identifier[template_id] = keyword[None] keyword[for] identifier[template] keyword[in] identifier[ssh_templates] : keyword[if] identifier[template] [ literal[string] ]== identifier[template_name] : identifier[template_id] = identifier[template] [ literal[string] ] identifier[f_url] = identifier[url] + literal[string] % identifier[template_id] identifier[response] = identifier[requests] . identifier[delete] ( identifier[f_url] , identifier[auth] = identifier[auth] , identifier[headers] = identifier[HEADERS] ) keyword[return] identifier[response] . identifier[status_code] keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] keyword[as] identifier[error] : keyword[return] literal[string] + identifier[str] ( identifier[error] )+ literal[string]
def delete_ssh_template(auth, url, template_name=None, template_id=None): """ Takes template_name as input to issue RESTUL call to HP IMC which will delete the specific ssh template from the IMC system :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param template_name: str value of template name :param template_id: str value template template_id value :return: int HTTP response code :rtype int """ try: if template_id is None: ssh_templates = get_ssh_template(auth, url) if template_name is None: template_name = ssh_template['name'] # depends on [control=['if'], data=['template_name']] template_id = None for template in ssh_templates: if template['name'] == template_name: template_id = template['id'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['template']] # depends on [control=['if'], data=['template_id']] f_url = url + '/imcrs/plat/res/ssh/%s/delete' % template_id response = requests.delete(f_url, auth=auth, headers=HEADERS) return response.status_code # depends on [control=['try'], data=[]] except requests.exceptions.RequestException as error: return 'Error:\n' + str(error) + ' delete_ssh_template: An Error has occured' # depends on [control=['except'], data=['error']]
def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop()
def function[destroy, parameter[self]]: constant[ Tear down the syndic minion ] call[call[name[super], parameter[name[Syndic], name[self]]].destroy, parameter[]] if call[name[hasattr], parameter[name[self], constant[local]]] begin[:] <ast.Delete object at 0x7da1b21e8d30> if call[name[hasattr], parameter[name[self], constant[forward_events]]] begin[:] call[name[self].forward_events.stop, parameter[]]
keyword[def] identifier[destroy] ( identifier[self] ): literal[string] identifier[super] ( identifier[Syndic] , identifier[self] ). identifier[destroy] () keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): keyword[del] identifier[self] . identifier[local] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[forward_events] . identifier[stop] ()
def destroy(self): """ Tear down the syndic minion """ # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local # depends on [control=['if'], data=[]] if hasattr(self, 'forward_events'): self.forward_events.stop() # depends on [control=['if'], data=[]]
def _unpack(formatstring, packed): """Unpack a bytestring into a value. Uses the built-in :mod:`struct` Python module. Args: * formatstring (str): String for the packing. See the :mod:`struct` module for details. * packed (str): The bytestring to be unpacked. Returns: A value. The type depends on the formatstring. Raises: ValueError Note that the :mod:`struct` module wants byte buffers for Python3, but bytestrings for Python2. This is compensated for automatically. """ _checkString(formatstring, description='formatstring', minlength=1) _checkString(packed, description='packed string', minlength=1) if sys.version_info[0] > 2: packed = bytes(packed, encoding='latin1') # Convert types to make it Python3 compatible try: value = struct.unpack(formatstring, packed)[0] except: errortext = 'The received bytestring is probably wrong, as the bytestring-to-num conversion failed.' errortext += ' Bytestring: {0!r} Struct format code is: {1}' raise ValueError(errortext.format(packed, formatstring)) return value
def function[_unpack, parameter[formatstring, packed]]: constant[Unpack a bytestring into a value. Uses the built-in :mod:`struct` Python module. Args: * formatstring (str): String for the packing. See the :mod:`struct` module for details. * packed (str): The bytestring to be unpacked. Returns: A value. The type depends on the formatstring. Raises: ValueError Note that the :mod:`struct` module wants byte buffers for Python3, but bytestrings for Python2. This is compensated for automatically. ] call[name[_checkString], parameter[name[formatstring]]] call[name[_checkString], parameter[name[packed]]] if compare[call[name[sys].version_info][constant[0]] greater[>] constant[2]] begin[:] variable[packed] assign[=] call[name[bytes], parameter[name[packed]]] <ast.Try object at 0x7da18eb54370> return[name[value]]
keyword[def] identifier[_unpack] ( identifier[formatstring] , identifier[packed] ): literal[string] identifier[_checkString] ( identifier[formatstring] , identifier[description] = literal[string] , identifier[minlength] = literal[int] ) identifier[_checkString] ( identifier[packed] , identifier[description] = literal[string] , identifier[minlength] = literal[int] ) keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]> literal[int] : identifier[packed] = identifier[bytes] ( identifier[packed] , identifier[encoding] = literal[string] ) keyword[try] : identifier[value] = identifier[struct] . identifier[unpack] ( identifier[formatstring] , identifier[packed] )[ literal[int] ] keyword[except] : identifier[errortext] = literal[string] identifier[errortext] += literal[string] keyword[raise] identifier[ValueError] ( identifier[errortext] . identifier[format] ( identifier[packed] , identifier[formatstring] )) keyword[return] identifier[value]
def _unpack(formatstring, packed): """Unpack a bytestring into a value. Uses the built-in :mod:`struct` Python module. Args: * formatstring (str): String for the packing. See the :mod:`struct` module for details. * packed (str): The bytestring to be unpacked. Returns: A value. The type depends on the formatstring. Raises: ValueError Note that the :mod:`struct` module wants byte buffers for Python3, but bytestrings for Python2. This is compensated for automatically. """ _checkString(formatstring, description='formatstring', minlength=1) _checkString(packed, description='packed string', minlength=1) if sys.version_info[0] > 2: packed = bytes(packed, encoding='latin1') # Convert types to make it Python3 compatible # depends on [control=['if'], data=[]] try: value = struct.unpack(formatstring, packed)[0] # depends on [control=['try'], data=[]] except: errortext = 'The received bytestring is probably wrong, as the bytestring-to-num conversion failed.' errortext += ' Bytestring: {0!r} Struct format code is: {1}' raise ValueError(errortext.format(packed, formatstring)) # depends on [control=['except'], data=[]] return value
def findall(self, expr): """list of all matching (sub-)expressions in `expr` See also: :meth:`finditer` yields the matches (:class:`MatchDict` instances) for the matched expressions. """ result = [] try: for arg in expr.args: result.extend(self.findall(arg)) for arg in expr.kwargs.values(): result.extend(self.findall(arg)) except AttributeError: pass if self.match(expr): result.append(expr) return result
def function[findall, parameter[self, expr]]: constant[list of all matching (sub-)expressions in `expr` See also: :meth:`finditer` yields the matches (:class:`MatchDict` instances) for the matched expressions. ] variable[result] assign[=] list[[]] <ast.Try object at 0x7da18ede4dc0> if call[name[self].match, parameter[name[expr]]] begin[:] call[name[result].append, parameter[name[expr]]] return[name[result]]
keyword[def] identifier[findall] ( identifier[self] , identifier[expr] ): literal[string] identifier[result] =[] keyword[try] : keyword[for] identifier[arg] keyword[in] identifier[expr] . identifier[args] : identifier[result] . identifier[extend] ( identifier[self] . identifier[findall] ( identifier[arg] )) keyword[for] identifier[arg] keyword[in] identifier[expr] . identifier[kwargs] . identifier[values] (): identifier[result] . identifier[extend] ( identifier[self] . identifier[findall] ( identifier[arg] )) keyword[except] identifier[AttributeError] : keyword[pass] keyword[if] identifier[self] . identifier[match] ( identifier[expr] ): identifier[result] . identifier[append] ( identifier[expr] ) keyword[return] identifier[result]
def findall(self, expr): """list of all matching (sub-)expressions in `expr` See also: :meth:`finditer` yields the matches (:class:`MatchDict` instances) for the matched expressions. """ result = [] try: for arg in expr.args: result.extend(self.findall(arg)) # depends on [control=['for'], data=['arg']] for arg in expr.kwargs.values(): result.extend(self.findall(arg)) # depends on [control=['for'], data=['arg']] # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] if self.match(expr): result.append(expr) # depends on [control=['if'], data=[]] return result
def _pick_or_create_inserted_op_moment_index( self, splitter_index: int, op: ops.Operation, strategy: InsertStrategy) -> int: """Determines and prepares where an insertion will occur. Args: splitter_index: The index to insert at. op: The operation that will be inserted. strategy: The insertion strategy. Returns: The index of the (possibly new) moment where the insertion should occur. Raises: ValueError: Unrecognized append strategy. """ if (strategy is InsertStrategy.NEW or strategy is InsertStrategy.NEW_THEN_INLINE): self._moments.insert(splitter_index, ops.Moment()) return splitter_index if strategy is InsertStrategy.INLINE: if (0 <= splitter_index - 1 < len(self._moments) and self._can_add_op_at(splitter_index - 1, op)): return splitter_index - 1 return self._pick_or_create_inserted_op_moment_index( splitter_index, op, InsertStrategy.NEW) if strategy is InsertStrategy.EARLIEST: if self._can_add_op_at(splitter_index, op): p = self._prev_moment_available(op, splitter_index) return p or 0 return self._pick_or_create_inserted_op_moment_index( splitter_index, op, InsertStrategy.INLINE) raise ValueError('Unrecognized append strategy: {}'.format(strategy))
def function[_pick_or_create_inserted_op_moment_index, parameter[self, splitter_index, op, strategy]]: constant[Determines and prepares where an insertion will occur. Args: splitter_index: The index to insert at. op: The operation that will be inserted. strategy: The insertion strategy. Returns: The index of the (possibly new) moment where the insertion should occur. Raises: ValueError: Unrecognized append strategy. ] if <ast.BoolOp object at 0x7da1b1ced8a0> begin[:] call[name[self]._moments.insert, parameter[name[splitter_index], call[name[ops].Moment, parameter[]]]] return[name[splitter_index]] if compare[name[strategy] is name[InsertStrategy].INLINE] begin[:] if <ast.BoolOp object at 0x7da1b1cefca0> begin[:] return[binary_operation[name[splitter_index] - constant[1]]] return[call[name[self]._pick_or_create_inserted_op_moment_index, parameter[name[splitter_index], name[op], name[InsertStrategy].NEW]]] if compare[name[strategy] is name[InsertStrategy].EARLIEST] begin[:] if call[name[self]._can_add_op_at, parameter[name[splitter_index], name[op]]] begin[:] variable[p] assign[=] call[name[self]._prev_moment_available, parameter[name[op], name[splitter_index]]] return[<ast.BoolOp object at 0x7da1b1ceccd0>] return[call[name[self]._pick_or_create_inserted_op_moment_index, parameter[name[splitter_index], name[op], name[InsertStrategy].INLINE]]] <ast.Raise object at 0x7da1b1cee2f0>
keyword[def] identifier[_pick_or_create_inserted_op_moment_index] ( identifier[self] , identifier[splitter_index] : identifier[int] , identifier[op] : identifier[ops] . identifier[Operation] , identifier[strategy] : identifier[InsertStrategy] )-> identifier[int] : literal[string] keyword[if] ( identifier[strategy] keyword[is] identifier[InsertStrategy] . identifier[NEW] keyword[or] identifier[strategy] keyword[is] identifier[InsertStrategy] . identifier[NEW_THEN_INLINE] ): identifier[self] . identifier[_moments] . identifier[insert] ( identifier[splitter_index] , identifier[ops] . identifier[Moment] ()) keyword[return] identifier[splitter_index] keyword[if] identifier[strategy] keyword[is] identifier[InsertStrategy] . identifier[INLINE] : keyword[if] ( literal[int] <= identifier[splitter_index] - literal[int] < identifier[len] ( identifier[self] . identifier[_moments] ) keyword[and] identifier[self] . identifier[_can_add_op_at] ( identifier[splitter_index] - literal[int] , identifier[op] )): keyword[return] identifier[splitter_index] - literal[int] keyword[return] identifier[self] . identifier[_pick_or_create_inserted_op_moment_index] ( identifier[splitter_index] , identifier[op] , identifier[InsertStrategy] . identifier[NEW] ) keyword[if] identifier[strategy] keyword[is] identifier[InsertStrategy] . identifier[EARLIEST] : keyword[if] identifier[self] . identifier[_can_add_op_at] ( identifier[splitter_index] , identifier[op] ): identifier[p] = identifier[self] . identifier[_prev_moment_available] ( identifier[op] , identifier[splitter_index] ) keyword[return] identifier[p] keyword[or] literal[int] keyword[return] identifier[self] . identifier[_pick_or_create_inserted_op_moment_index] ( identifier[splitter_index] , identifier[op] , identifier[InsertStrategy] . identifier[INLINE] ) keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[strategy] ))
def _pick_or_create_inserted_op_moment_index(self, splitter_index: int, op: ops.Operation, strategy: InsertStrategy) -> int: """Determines and prepares where an insertion will occur. Args: splitter_index: The index to insert at. op: The operation that will be inserted. strategy: The insertion strategy. Returns: The index of the (possibly new) moment where the insertion should occur. Raises: ValueError: Unrecognized append strategy. """ if strategy is InsertStrategy.NEW or strategy is InsertStrategy.NEW_THEN_INLINE: self._moments.insert(splitter_index, ops.Moment()) return splitter_index # depends on [control=['if'], data=[]] if strategy is InsertStrategy.INLINE: if 0 <= splitter_index - 1 < len(self._moments) and self._can_add_op_at(splitter_index - 1, op): return splitter_index - 1 # depends on [control=['if'], data=[]] return self._pick_or_create_inserted_op_moment_index(splitter_index, op, InsertStrategy.NEW) # depends on [control=['if'], data=[]] if strategy is InsertStrategy.EARLIEST: if self._can_add_op_at(splitter_index, op): p = self._prev_moment_available(op, splitter_index) return p or 0 # depends on [control=['if'], data=[]] return self._pick_or_create_inserted_op_moment_index(splitter_index, op, InsertStrategy.INLINE) # depends on [control=['if'], data=[]] raise ValueError('Unrecognized append strategy: {}'.format(strategy))
def set_lacp_fallback(self, name, mode=None): """Configures the Port-Channel lacp_fallback Args: name(str): The Port-Channel interface name mode(str): The Port-Channel LACP fallback setting Valid values are 'disabled', 'static', 'individual': * static - Fallback to static LAG mode * individual - Fallback to individual ports * disabled - Disable LACP fallback Returns: True if the operation succeeds otherwise False is returned """ if mode not in ['disabled', 'static', 'individual']: return False disable = True if mode == 'disabled' else False commands = ['interface %s' % name] commands.append(self.command_builder('port-channel lacp fallback', value=mode, disable=disable)) return self.configure(commands)
def function[set_lacp_fallback, parameter[self, name, mode]]: constant[Configures the Port-Channel lacp_fallback Args: name(str): The Port-Channel interface name mode(str): The Port-Channel LACP fallback setting Valid values are 'disabled', 'static', 'individual': * static - Fallback to static LAG mode * individual - Fallback to individual ports * disabled - Disable LACP fallback Returns: True if the operation succeeds otherwise False is returned ] if compare[name[mode] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b230a4d0>, <ast.Constant object at 0x7da1b230b010>, <ast.Constant object at 0x7da1b230bb20>]]] begin[:] return[constant[False]] variable[disable] assign[=] <ast.IfExp object at 0x7da1b2309db0> variable[commands] assign[=] list[[<ast.BinOp object at 0x7da1b230abf0>]] call[name[commands].append, parameter[call[name[self].command_builder, parameter[constant[port-channel lacp fallback]]]]] return[call[name[self].configure, parameter[name[commands]]]]
keyword[def] identifier[set_lacp_fallback] ( identifier[self] , identifier[name] , identifier[mode] = keyword[None] ): literal[string] keyword[if] identifier[mode] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]: keyword[return] keyword[False] identifier[disable] = keyword[True] keyword[if] identifier[mode] == literal[string] keyword[else] keyword[False] identifier[commands] =[ literal[string] % identifier[name] ] identifier[commands] . identifier[append] ( identifier[self] . identifier[command_builder] ( literal[string] , identifier[value] = identifier[mode] , identifier[disable] = identifier[disable] )) keyword[return] identifier[self] . identifier[configure] ( identifier[commands] )
def set_lacp_fallback(self, name, mode=None): """Configures the Port-Channel lacp_fallback Args: name(str): The Port-Channel interface name mode(str): The Port-Channel LACP fallback setting Valid values are 'disabled', 'static', 'individual': * static - Fallback to static LAG mode * individual - Fallback to individual ports * disabled - Disable LACP fallback Returns: True if the operation succeeds otherwise False is returned """ if mode not in ['disabled', 'static', 'individual']: return False # depends on [control=['if'], data=[]] disable = True if mode == 'disabled' else False commands = ['interface %s' % name] commands.append(self.command_builder('port-channel lacp fallback', value=mode, disable=disable)) return self.configure(commands)
def get_metadata_count_mat(self): ''' Returns ------- np.array with columns as categories and rows as terms ''' freq_mat = np.zeros(shape=(self.get_num_metadata(), self.get_num_categories()), dtype=int) for cat_i in range(self.get_num_categories()): mX = (self._mX[self._y == cat_i, :] > 0).astype(int) freq_mat[:, cat_i] = mX.sum(axis=0) return freq_mat
def function[get_metadata_count_mat, parameter[self]]: constant[ Returns ------- np.array with columns as categories and rows as terms ] variable[freq_mat] assign[=] call[name[np].zeros, parameter[]] for taget[name[cat_i]] in starred[call[name[range], parameter[call[name[self].get_num_categories, parameter[]]]]] begin[:] variable[mX] assign[=] call[compare[call[name[self]._mX][tuple[[<ast.Compare object at 0x7da1b1b850f0>, <ast.Slice object at 0x7da1b1a37ac0>]]] greater[>] constant[0]].astype, parameter[name[int]]] call[name[freq_mat]][tuple[[<ast.Slice object at 0x7da1b1a371f0>, <ast.Name object at 0x7da1b1a37130>]]] assign[=] call[name[mX].sum, parameter[]] return[name[freq_mat]]
keyword[def] identifier[get_metadata_count_mat] ( identifier[self] ): literal[string] identifier[freq_mat] = identifier[np] . identifier[zeros] ( identifier[shape] =( identifier[self] . identifier[get_num_metadata] (), identifier[self] . identifier[get_num_categories] ()), identifier[dtype] = identifier[int] ) keyword[for] identifier[cat_i] keyword[in] identifier[range] ( identifier[self] . identifier[get_num_categories] ()): identifier[mX] =( identifier[self] . identifier[_mX] [ identifier[self] . identifier[_y] == identifier[cat_i] ,:]> literal[int] ). identifier[astype] ( identifier[int] ) identifier[freq_mat] [:, identifier[cat_i] ]= identifier[mX] . identifier[sum] ( identifier[axis] = literal[int] ) keyword[return] identifier[freq_mat]
def get_metadata_count_mat(self): """ Returns ------- np.array with columns as categories and rows as terms """ freq_mat = np.zeros(shape=(self.get_num_metadata(), self.get_num_categories()), dtype=int) for cat_i in range(self.get_num_categories()): mX = (self._mX[self._y == cat_i, :] > 0).astype(int) freq_mat[:, cat_i] = mX.sum(axis=0) # depends on [control=['for'], data=['cat_i']] return freq_mat
def getdir(self, section, option, raw=False, vars=None, fallback="", validate=False): """ A convenience method which coerces the option in the specified section to a directory. """ v = self.get(section, option, raw=raw, vars=vars, fallback=fallback) v = self._convert_to_path(v) return v if not validate or os.path.isdir(v) else fallback
def function[getdir, parameter[self, section, option, raw, vars, fallback, validate]]: constant[ A convenience method which coerces the option in the specified section to a directory. ] variable[v] assign[=] call[name[self].get, parameter[name[section], name[option]]] variable[v] assign[=] call[name[self]._convert_to_path, parameter[name[v]]] return[<ast.IfExp object at 0x7da1b2458490>]
keyword[def] identifier[getdir] ( identifier[self] , identifier[section] , identifier[option] , identifier[raw] = keyword[False] , identifier[vars] = keyword[None] , identifier[fallback] = literal[string] , identifier[validate] = keyword[False] ): literal[string] identifier[v] = identifier[self] . identifier[get] ( identifier[section] , identifier[option] , identifier[raw] = identifier[raw] , identifier[vars] = identifier[vars] , identifier[fallback] = identifier[fallback] ) identifier[v] = identifier[self] . identifier[_convert_to_path] ( identifier[v] ) keyword[return] identifier[v] keyword[if] keyword[not] identifier[validate] keyword[or] identifier[os] . identifier[path] . identifier[isdir] ( identifier[v] ) keyword[else] identifier[fallback]
def getdir(self, section, option, raw=False, vars=None, fallback='', validate=False): """ A convenience method which coerces the option in the specified section to a directory. """ v = self.get(section, option, raw=raw, vars=vars, fallback=fallback) v = self._convert_to_path(v) return v if not validate or os.path.isdir(v) else fallback
def facets(mesh, engine=None): """ Find the list of parallel adjacent faces. Parameters --------- mesh : trimesh.Trimesh engine : str Which graph engine to use: ('scipy', 'networkx', 'graphtool') Returns --------- facets : sequence of (n,) int Groups of face indexes of parallel adjacent faces. """ # what is the radius of a circle that passes through the perpendicular # projection of the vector between the two non- shared vertices # onto the shared edge, with the face normal from the two adjacent faces radii = mesh.face_adjacency_radius # what is the span perpendicular to the shared edge span = mesh.face_adjacency_span # a very arbitrary formula for declaring two adjacent faces # parallel in a way that is hopefully (and anecdotally) robust # to numeric error # a common failure mode is two faces that are very narrow with a slight # angle between them, so here we divide by the perpendicular span # to penalize very narrow faces, and then square it just for fun parallel = np.ones(len(radii), dtype=np.bool) # if span is zero we know faces are small/parallel nonzero = np.abs(span) > tol.zero # faces with a radii/span ratio larger than a threshold pass parallel[nonzero] = (radii[nonzero] / span[nonzero]) ** 2 > tol.facet_threshold # run connected components on the parallel faces to group them components = connected_components(mesh.face_adjacency[parallel], nodes=np.arange(len(mesh.faces)), min_len=2, engine=engine) return components
def function[facets, parameter[mesh, engine]]: constant[ Find the list of parallel adjacent faces. Parameters --------- mesh : trimesh.Trimesh engine : str Which graph engine to use: ('scipy', 'networkx', 'graphtool') Returns --------- facets : sequence of (n,) int Groups of face indexes of parallel adjacent faces. ] variable[radii] assign[=] name[mesh].face_adjacency_radius variable[span] assign[=] name[mesh].face_adjacency_span variable[parallel] assign[=] call[name[np].ones, parameter[call[name[len], parameter[name[radii]]]]] variable[nonzero] assign[=] compare[call[name[np].abs, parameter[name[span]]] greater[>] name[tol].zero] call[name[parallel]][name[nonzero]] assign[=] compare[binary_operation[binary_operation[call[name[radii]][name[nonzero]] / call[name[span]][name[nonzero]]] ** constant[2]] greater[>] name[tol].facet_threshold] variable[components] assign[=] call[name[connected_components], parameter[call[name[mesh].face_adjacency][name[parallel]]]] return[name[components]]
keyword[def] identifier[facets] ( identifier[mesh] , identifier[engine] = keyword[None] ): literal[string] identifier[radii] = identifier[mesh] . identifier[face_adjacency_radius] identifier[span] = identifier[mesh] . identifier[face_adjacency_span] identifier[parallel] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[radii] ), identifier[dtype] = identifier[np] . identifier[bool] ) identifier[nonzero] = identifier[np] . identifier[abs] ( identifier[span] )> identifier[tol] . identifier[zero] identifier[parallel] [ identifier[nonzero] ]=( identifier[radii] [ identifier[nonzero] ]/ identifier[span] [ identifier[nonzero] ])** literal[int] > identifier[tol] . identifier[facet_threshold] identifier[components] = identifier[connected_components] ( identifier[mesh] . identifier[face_adjacency] [ identifier[parallel] ], identifier[nodes] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[mesh] . identifier[faces] )), identifier[min_len] = literal[int] , identifier[engine] = identifier[engine] ) keyword[return] identifier[components]
def facets(mesh, engine=None): """ Find the list of parallel adjacent faces. Parameters --------- mesh : trimesh.Trimesh engine : str Which graph engine to use: ('scipy', 'networkx', 'graphtool') Returns --------- facets : sequence of (n,) int Groups of face indexes of parallel adjacent faces. """ # what is the radius of a circle that passes through the perpendicular # projection of the vector between the two non- shared vertices # onto the shared edge, with the face normal from the two adjacent faces radii = mesh.face_adjacency_radius # what is the span perpendicular to the shared edge span = mesh.face_adjacency_span # a very arbitrary formula for declaring two adjacent faces # parallel in a way that is hopefully (and anecdotally) robust # to numeric error # a common failure mode is two faces that are very narrow with a slight # angle between them, so here we divide by the perpendicular span # to penalize very narrow faces, and then square it just for fun parallel = np.ones(len(radii), dtype=np.bool) # if span is zero we know faces are small/parallel nonzero = np.abs(span) > tol.zero # faces with a radii/span ratio larger than a threshold pass parallel[nonzero] = (radii[nonzero] / span[nonzero]) ** 2 > tol.facet_threshold # run connected components on the parallel faces to group them components = connected_components(mesh.face_adjacency[parallel], nodes=np.arange(len(mesh.faces)), min_len=2, engine=engine) return components
def check_program(self, name): """ Checks whether a program is available on the shell PATH. """ if not check_program(name): raise management.CommandError( "The program \"{name:s}\" is not available in the shell. " "Please ensure that \"{name:s}\" is installed and reachable " "through your PATH environment variable.".format( name=name))
def function[check_program, parameter[self, name]]: constant[ Checks whether a program is available on the shell PATH. ] if <ast.UnaryOp object at 0x7da1b143c340> begin[:] <ast.Raise object at 0x7da1b14d8400>
keyword[def] identifier[check_program] ( identifier[self] , identifier[name] ): literal[string] keyword[if] keyword[not] identifier[check_program] ( identifier[name] ): keyword[raise] identifier[management] . identifier[CommandError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[name] = identifier[name] ))
def check_program(self, name): """ Checks whether a program is available on the shell PATH. """ if not check_program(name): raise management.CommandError('The program "{name:s}" is not available in the shell. Please ensure that "{name:s}" is installed and reachable through your PATH environment variable.'.format(name=name)) # depends on [control=['if'], data=[]]
def gpu_a_trous(): """ Simple convenience function so that the a trous kernels can be easily accessed by any function. """ ker1 = SourceModule(""" __global__ void gpu_a_trous_row_kernel(float *in1, float *in2, float *wfil, int *scale) { const int len = gridDim.x*blockDim.x; const int col = (blockDim.x * blockIdx.x + threadIdx.x); const int i = col; const int row = (blockDim.y * blockIdx.y + threadIdx.y); const int j = row*len; const int tid2 = i + j; const int lstp = exp2(float(scale[0] + 1)); const int sstp = exp2(float(scale[0])); in2[tid2] = wfil[2]*in1[tid2]; if (row < lstp) { in2[tid2] += wfil[0]*in1[col + len*(lstp - row - 1)]; } else { in2[tid2] += wfil[0]*in1[tid2 - lstp*len]; } if (row < sstp) { in2[tid2] += wfil[1]*in1[col + len*(sstp - row - 1)]; } else { in2[tid2] += wfil[1]*in1[tid2 - sstp*len]; } if (row >= (len - sstp)) { in2[tid2] += wfil[3]*in1[col + len*(2*len - row - sstp - 1)]; } else { in2[tid2] += wfil[3]*in1[tid2 + sstp*len]; } if (row >= (len - lstp)) { in2[tid2] += wfil[4]*in1[col + len*(2*len - row - lstp - 1)]; } else { in2[tid2] += wfil[4]*in1[tid2 + lstp*len]; } } """, keep=True) ker2 = SourceModule(""" __global__ void gpu_a_trous_col_kernel(float *in1, float *in2, float *wfil, int *scale) { const int len = gridDim.x*blockDim.x; const int col = (blockDim.x * blockIdx.x + threadIdx.x); const int i = col; const int row = (blockDim.y * blockIdx.y + threadIdx.y); const int j = row*len; const int tid2 = i + j; const int lstp = exp2(float(scale[0] + 1)); const int sstp = exp2(float(scale[0])); in2[tid2] = wfil[2]*in1[tid2]; if (col < lstp) { in2[tid2] += wfil[0]*in1[j - col + lstp - 1]; } else { in2[tid2] += wfil[0]*in1[tid2 - lstp]; } if (col < sstp) { in2[tid2] += wfil[1]*in1[j - col + sstp - 1]; } else { in2[tid2] += wfil[1]*in1[tid2 - sstp]; } if (col >= (len - sstp)) { in2[tid2] += wfil[3]*in1[j + 2*len - sstp - col - 1]; } else { in2[tid2] += wfil[3]*in1[tid2 + sstp]; } if (col >= (len - lstp)) { in2[tid2] += wfil[4]*in1[j + 2*len - lstp - col - 1]; } else { in2[tid2] += wfil[4]*in1[tid2 + lstp]; } } """, keep=True) return ker1.get_function("gpu_a_trous_row_kernel"), ker2.get_function("gpu_a_trous_col_kernel")
def function[gpu_a_trous, parameter[]]: constant[ Simple convenience function so that the a trous kernels can be easily accessed by any function. ] variable[ker1] assign[=] call[name[SourceModule], parameter[constant[ __global__ void gpu_a_trous_row_kernel(float *in1, float *in2, float *wfil, int *scale) { const int len = gridDim.x*blockDim.x; const int col = (blockDim.x * blockIdx.x + threadIdx.x); const int i = col; const int row = (blockDim.y * blockIdx.y + threadIdx.y); const int j = row*len; const int tid2 = i + j; const int lstp = exp2(float(scale[0] + 1)); const int sstp = exp2(float(scale[0])); in2[tid2] = wfil[2]*in1[tid2]; if (row < lstp) { in2[tid2] += wfil[0]*in1[col + len*(lstp - row - 1)]; } else { in2[tid2] += wfil[0]*in1[tid2 - lstp*len]; } if (row < sstp) { in2[tid2] += wfil[1]*in1[col + len*(sstp - row - 1)]; } else { in2[tid2] += wfil[1]*in1[tid2 - sstp*len]; } if (row >= (len - sstp)) { in2[tid2] += wfil[3]*in1[col + len*(2*len - row - sstp - 1)]; } else { in2[tid2] += wfil[3]*in1[tid2 + sstp*len]; } if (row >= (len - lstp)) { in2[tid2] += wfil[4]*in1[col + len*(2*len - row - lstp - 1)]; } else { in2[tid2] += wfil[4]*in1[tid2 + lstp*len]; } } ]]] variable[ker2] assign[=] call[name[SourceModule], parameter[constant[ __global__ void gpu_a_trous_col_kernel(float *in1, float *in2, float *wfil, int *scale) { const int len = gridDim.x*blockDim.x; const int col = (blockDim.x * blockIdx.x + threadIdx.x); const int i = col; const int row = (blockDim.y * blockIdx.y + threadIdx.y); const int j = row*len; const int tid2 = i + j; const int lstp = exp2(float(scale[0] + 1)); const int sstp = exp2(float(scale[0])); in2[tid2] = wfil[2]*in1[tid2]; if (col < lstp) { in2[tid2] += wfil[0]*in1[j - col + lstp - 1]; } else { in2[tid2] += wfil[0]*in1[tid2 - lstp]; } if (col < sstp) { in2[tid2] += wfil[1]*in1[j - col + sstp - 1]; } else { in2[tid2] += wfil[1]*in1[tid2 - sstp]; } if (col >= (len - sstp)) { in2[tid2] += wfil[3]*in1[j + 2*len - sstp - col - 1]; } else { in2[tid2] += wfil[3]*in1[tid2 + sstp]; } if (col >= (len - lstp)) { in2[tid2] += wfil[4]*in1[j + 2*len - lstp - col - 1]; } else { in2[tid2] += wfil[4]*in1[tid2 + lstp]; } } ]]] return[tuple[[<ast.Call object at 0x7da1b255c400>, <ast.Call object at 0x7da1b255c040>]]]
keyword[def] identifier[gpu_a_trous] (): literal[string] identifier[ker1] = identifier[SourceModule] ( literal[string] , identifier[keep] = keyword[True] ) identifier[ker2] = identifier[SourceModule] ( literal[string] , identifier[keep] = keyword[True] ) keyword[return] identifier[ker1] . identifier[get_function] ( literal[string] ), identifier[ker2] . identifier[get_function] ( literal[string] )
def gpu_a_trous(): """ Simple convenience function so that the a trous kernels can be easily accessed by any function. """ ker1 = SourceModule('\n __global__ void gpu_a_trous_row_kernel(float *in1, float *in2, float *wfil, int *scale)\n {\n const int len = gridDim.x*blockDim.x;\n const int col = (blockDim.x * blockIdx.x + threadIdx.x);\n const int i = col;\n const int row = (blockDim.y * blockIdx.y + threadIdx.y);\n const int j = row*len;\n const int tid2 = i + j;\n const int lstp = exp2(float(scale[0] + 1));\n const int sstp = exp2(float(scale[0]));\n\n in2[tid2] = wfil[2]*in1[tid2];\n\n if (row < lstp)\n { in2[tid2] += wfil[0]*in1[col + len*(lstp - row - 1)]; }\n else\n { in2[tid2] += wfil[0]*in1[tid2 - lstp*len]; }\n\n if (row < sstp)\n { in2[tid2] += wfil[1]*in1[col + len*(sstp - row - 1)]; }\n else\n { in2[tid2] += wfil[1]*in1[tid2 - sstp*len]; }\n\n if (row >= (len - sstp))\n { in2[tid2] += wfil[3]*in1[col + len*(2*len - row - sstp - 1)]; }\n else\n { in2[tid2] += wfil[3]*in1[tid2 + sstp*len]; }\n\n if (row >= (len - lstp))\n { in2[tid2] += wfil[4]*in1[col + len*(2*len - row - lstp - 1)]; }\n else\n { in2[tid2] += wfil[4]*in1[tid2 + lstp*len]; }\n }\n ', keep=True) ker2 = SourceModule('\n __global__ void gpu_a_trous_col_kernel(float *in1, float *in2, float *wfil, int *scale)\n {\n const int len = gridDim.x*blockDim.x;\n const int col = (blockDim.x * blockIdx.x + threadIdx.x);\n const int i = col;\n const int row = (blockDim.y * blockIdx.y + threadIdx.y);\n const int j = row*len;\n const int tid2 = i + j;\n const int lstp = exp2(float(scale[0] + 1));\n const int sstp = exp2(float(scale[0]));\n\n in2[tid2] = wfil[2]*in1[tid2];\n\n if (col < lstp)\n { in2[tid2] += wfil[0]*in1[j - col + lstp - 1]; }\n else\n { in2[tid2] += wfil[0]*in1[tid2 - lstp]; }\n\n if (col < sstp)\n { in2[tid2] += wfil[1]*in1[j - col + sstp - 1]; }\n else\n { in2[tid2] += wfil[1]*in1[tid2 - sstp]; }\n\n if (col >= (len - sstp))\n { in2[tid2] += wfil[3]*in1[j + 2*len - sstp - col - 1]; }\n else\n { in2[tid2] += wfil[3]*in1[tid2 + sstp]; }\n\n if (col >= (len - lstp))\n { in2[tid2] += wfil[4]*in1[j + 2*len - lstp - col - 1]; }\n else\n { in2[tid2] += wfil[4]*in1[tid2 + lstp]; }\n\n }\n ', keep=True) return (ker1.get_function('gpu_a_trous_row_kernel'), ker2.get_function('gpu_a_trous_col_kernel'))
def urlretrieve(uri, saveas=None, retries=3, cache_dir=None): '''urllib.urlretrieve wrapper''' retries = int(retries) if retries else 3 # FIXME: make random filename (saveas) in cache_dir... # cache_dir = cache_dir or CACHE_DIR while retries: try: _path, headers = urllib.urlretrieve(uri, saveas) except Exception as e: retries -= 1 logger.warn( 'Failed getting uri "%s": %s (retry:%s in 1s)' % ( uri, e, retries)) time.sleep(.2) continue else: break else: raise RuntimeError("Failed to retrieve uri: %s" % uri) return _path
def function[urlretrieve, parameter[uri, saveas, retries, cache_dir]]: constant[urllib.urlretrieve wrapper] variable[retries] assign[=] <ast.IfExp object at 0x7da1b0b72bc0> while name[retries] begin[:] <ast.Try object at 0x7da1b0b736a0> return[name[_path]]
keyword[def] identifier[urlretrieve] ( identifier[uri] , identifier[saveas] = keyword[None] , identifier[retries] = literal[int] , identifier[cache_dir] = keyword[None] ): literal[string] identifier[retries] = identifier[int] ( identifier[retries] ) keyword[if] identifier[retries] keyword[else] literal[int] keyword[while] identifier[retries] : keyword[try] : identifier[_path] , identifier[headers] = identifier[urllib] . identifier[urlretrieve] ( identifier[uri] , identifier[saveas] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[retries] -= literal[int] identifier[logger] . identifier[warn] ( literal[string] %( identifier[uri] , identifier[e] , identifier[retries] )) identifier[time] . identifier[sleep] ( literal[int] ) keyword[continue] keyword[else] : keyword[break] keyword[else] : keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[uri] ) keyword[return] identifier[_path]
def urlretrieve(uri, saveas=None, retries=3, cache_dir=None): """urllib.urlretrieve wrapper""" retries = int(retries) if retries else 3 # FIXME: make random filename (saveas) in cache_dir... # cache_dir = cache_dir or CACHE_DIR while retries: try: (_path, headers) = urllib.urlretrieve(uri, saveas) # depends on [control=['try'], data=[]] except Exception as e: retries -= 1 logger.warn('Failed getting uri "%s": %s (retry:%s in 1s)' % (uri, e, retries)) time.sleep(0.2) continue # depends on [control=['except'], data=['e']] else: break # depends on [control=['while'], data=[]] else: raise RuntimeError('Failed to retrieve uri: %s' % uri) return _path
def diff_speed(sw_dens=1.028, dens_gcm3=1.053, seal_length=300, seal_girth=200, Cd=0.09): '''Calculate terminal velocity of animal with a body size Args ---- sw_dens: float Density of seawater (g/cm^3) dens_gcm3: float Density of animal (g/cm^3) seal_length: float Length of animal (cm) seal_girth: float Girth of animal (cm) Cd: float Drag coefficient of object in fluid, unitless Returns ------- Vt: float Terminal velocity of animal with given body dimensions (m/s). References ---------- Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of free-ranging seals using simple dive characteristics. Journal of Experimental Biology 206, 3405–3423. doi:10.1242/jeb.00583 Vogel, S., 1994. Life in Moving Fluids: The Physical Biology of Flow. Princeton University Press. ''' import numpy surf, vol = surf_vol(seal_length, seal_girth) Fb = buoyant_force(dens_gcm3, vol, sw_dens) x = 2 * (Fb/(Cd * sw_dens * (surf*1000))) if x >= 0: Vt = numpy.sqrt(x) else: Vt = -numpy.sqrt(-x) return Vt
def function[diff_speed, parameter[sw_dens, dens_gcm3, seal_length, seal_girth, Cd]]: constant[Calculate terminal velocity of animal with a body size Args ---- sw_dens: float Density of seawater (g/cm^3) dens_gcm3: float Density of animal (g/cm^3) seal_length: float Length of animal (cm) seal_girth: float Girth of animal (cm) Cd: float Drag coefficient of object in fluid, unitless Returns ------- Vt: float Terminal velocity of animal with given body dimensions (m/s). References ---------- Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of free-ranging seals using simple dive characteristics. Journal of Experimental Biology 206, 3405–3423. doi:10.1242/jeb.00583 Vogel, S., 1994. Life in Moving Fluids: The Physical Biology of Flow. Princeton University Press. ] import module[numpy] <ast.Tuple object at 0x7da1b13ba620> assign[=] call[name[surf_vol], parameter[name[seal_length], name[seal_girth]]] variable[Fb] assign[=] call[name[buoyant_force], parameter[name[dens_gcm3], name[vol], name[sw_dens]]] variable[x] assign[=] binary_operation[constant[2] * binary_operation[name[Fb] / binary_operation[binary_operation[name[Cd] * name[sw_dens]] * binary_operation[name[surf] * constant[1000]]]]] if compare[name[x] greater_or_equal[>=] constant[0]] begin[:] variable[Vt] assign[=] call[name[numpy].sqrt, parameter[name[x]]] return[name[Vt]]
keyword[def] identifier[diff_speed] ( identifier[sw_dens] = literal[int] , identifier[dens_gcm3] = literal[int] , identifier[seal_length] = literal[int] , identifier[seal_girth] = literal[int] , identifier[Cd] = literal[int] ): literal[string] keyword[import] identifier[numpy] identifier[surf] , identifier[vol] = identifier[surf_vol] ( identifier[seal_length] , identifier[seal_girth] ) identifier[Fb] = identifier[buoyant_force] ( identifier[dens_gcm3] , identifier[vol] , identifier[sw_dens] ) identifier[x] = literal[int] *( identifier[Fb] /( identifier[Cd] * identifier[sw_dens] *( identifier[surf] * literal[int] ))) keyword[if] identifier[x] >= literal[int] : identifier[Vt] = identifier[numpy] . identifier[sqrt] ( identifier[x] ) keyword[else] : identifier[Vt] =- identifier[numpy] . identifier[sqrt] (- identifier[x] ) keyword[return] identifier[Vt]
def diff_speed(sw_dens=1.028, dens_gcm3=1.053, seal_length=300, seal_girth=200, Cd=0.09): """Calculate terminal velocity of animal with a body size Args ---- sw_dens: float Density of seawater (g/cm^3) dens_gcm3: float Density of animal (g/cm^3) seal_length: float Length of animal (cm) seal_girth: float Girth of animal (cm) Cd: float Drag coefficient of object in fluid, unitless Returns ------- Vt: float Terminal velocity of animal with given body dimensions (m/s). References ---------- Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of free-ranging seals using simple dive characteristics. Journal of Experimental Biology 206, 3405–3423. doi:10.1242/jeb.00583 Vogel, S., 1994. Life in Moving Fluids: The Physical Biology of Flow. Princeton University Press. """ import numpy (surf, vol) = surf_vol(seal_length, seal_girth) Fb = buoyant_force(dens_gcm3, vol, sw_dens) x = 2 * (Fb / (Cd * sw_dens * (surf * 1000))) if x >= 0: Vt = numpy.sqrt(x) # depends on [control=['if'], data=['x']] else: Vt = -numpy.sqrt(-x) return Vt
def Read(self, meta_only=False, allowed=None, cast=False): """Read the ESPA XML metadata file""" if allowed is not None and not isinstance(allowed, (list, tuple)): raise RuntimeError('`allowed` must be a list of str names.') meta = xmltodict.parse( open(self.filename, 'r').read() ).get('espa_metadata') # Handle bands seperately bands = meta.get('bands').get('band') del(meta['bands']) if not isinstance(bands, (list)): bands = [bands] meta = self.CleanDict(meta) # Get spatial refernce ras = SetProperties(RasterSet, meta) if allowed is not None: # Remove non-allowed arrays from bdict for k in list(self.bdict.keys()): if k not in allowed: del(self.bdict[k]) for i in range(len(bands)): info = self.GenerateBand(bands[i], meta_only=True, cast=cast) if allowed is not None and info.name not in allowed: continue if info.name not in self.bdict.keys() or self.bdict[info.name].data is None: b = self.GenerateBand(bands[i], meta_only=meta_only, cast=cast) self.bdict[b.name] = b elif cast and self.bdict[info.name].data.dtype != np.float32: b = self.GenerateBand(bands[i], meta_only=meta_only, cast=cast) self.bdict[b.name] = b elif not cast and self.bdict[info.name].data.dtype == np.float32: b = self.GenerateBand(bands[i], meta_only=meta_only, cast=cast) self.bdict[b.name] = b ras.bands = self.bdict if not meta_only: ras.validate() return ras
def function[Read, parameter[self, meta_only, allowed, cast]]: constant[Read the ESPA XML metadata file] if <ast.BoolOp object at 0x7da1b09254e0> begin[:] <ast.Raise object at 0x7da1b0925210> variable[meta] assign[=] call[call[name[xmltodict].parse, parameter[call[call[name[open], parameter[name[self].filename, constant[r]]].read, parameter[]]]].get, parameter[constant[espa_metadata]]] variable[bands] assign[=] call[call[name[meta].get, parameter[constant[bands]]].get, parameter[constant[band]]] <ast.Delete object at 0x7da1b095d720> if <ast.UnaryOp object at 0x7da1b095d450> begin[:] variable[bands] assign[=] list[[<ast.Name object at 0x7da1b095f940>]] variable[meta] assign[=] call[name[self].CleanDict, parameter[name[meta]]] variable[ras] assign[=] call[name[SetProperties], parameter[name[RasterSet], name[meta]]] if compare[name[allowed] is_not constant[None]] begin[:] for taget[name[k]] in starred[call[name[list], parameter[call[name[self].bdict.keys, parameter[]]]]] begin[:] if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[allowed]] begin[:] <ast.Delete object at 0x7da1b095e770> for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[bands]]]]]] begin[:] variable[info] assign[=] call[name[self].GenerateBand, parameter[call[name[bands]][name[i]]]] if <ast.BoolOp object at 0x7da1b095eb90> begin[:] continue if <ast.BoolOp object at 0x7da1b095f040> begin[:] variable[b] assign[=] call[name[self].GenerateBand, parameter[call[name[bands]][name[i]]]] call[name[self].bdict][name[b].name] assign[=] name[b] name[ras].bands assign[=] name[self].bdict if <ast.UnaryOp object at 0x7da1b0ae2650> begin[:] call[name[ras].validate, parameter[]] return[name[ras]]
keyword[def] identifier[Read] ( identifier[self] , identifier[meta_only] = keyword[False] , identifier[allowed] = keyword[None] , identifier[cast] = keyword[False] ): literal[string] keyword[if] identifier[allowed] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[allowed] ,( identifier[list] , identifier[tuple] )): keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[meta] = identifier[xmltodict] . identifier[parse] ( identifier[open] ( identifier[self] . identifier[filename] , literal[string] ). identifier[read] () ). identifier[get] ( literal[string] ) identifier[bands] = identifier[meta] . identifier[get] ( literal[string] ). identifier[get] ( literal[string] ) keyword[del] ( identifier[meta] [ literal[string] ]) keyword[if] keyword[not] identifier[isinstance] ( identifier[bands] ,( identifier[list] )): identifier[bands] =[ identifier[bands] ] identifier[meta] = identifier[self] . identifier[CleanDict] ( identifier[meta] ) identifier[ras] = identifier[SetProperties] ( identifier[RasterSet] , identifier[meta] ) keyword[if] identifier[allowed] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[k] keyword[in] identifier[list] ( identifier[self] . identifier[bdict] . identifier[keys] ()): keyword[if] identifier[k] keyword[not] keyword[in] identifier[allowed] : keyword[del] ( identifier[self] . identifier[bdict] [ identifier[k] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[bands] )): identifier[info] = identifier[self] . identifier[GenerateBand] ( identifier[bands] [ identifier[i] ], identifier[meta_only] = keyword[True] , identifier[cast] = identifier[cast] ) keyword[if] identifier[allowed] keyword[is] keyword[not] keyword[None] keyword[and] identifier[info] . identifier[name] keyword[not] keyword[in] identifier[allowed] : keyword[continue] keyword[if] identifier[info] . identifier[name] keyword[not] keyword[in] identifier[self] . identifier[bdict] . identifier[keys] () keyword[or] identifier[self] . identifier[bdict] [ identifier[info] . identifier[name] ]. identifier[data] keyword[is] keyword[None] : identifier[b] = identifier[self] . identifier[GenerateBand] ( identifier[bands] [ identifier[i] ], identifier[meta_only] = identifier[meta_only] , identifier[cast] = identifier[cast] ) identifier[self] . identifier[bdict] [ identifier[b] . identifier[name] ]= identifier[b] keyword[elif] identifier[cast] keyword[and] identifier[self] . identifier[bdict] [ identifier[info] . identifier[name] ]. identifier[data] . identifier[dtype] != identifier[np] . identifier[float32] : identifier[b] = identifier[self] . identifier[GenerateBand] ( identifier[bands] [ identifier[i] ], identifier[meta_only] = identifier[meta_only] , identifier[cast] = identifier[cast] ) identifier[self] . identifier[bdict] [ identifier[b] . identifier[name] ]= identifier[b] keyword[elif] keyword[not] identifier[cast] keyword[and] identifier[self] . identifier[bdict] [ identifier[info] . identifier[name] ]. identifier[data] . identifier[dtype] == identifier[np] . identifier[float32] : identifier[b] = identifier[self] . identifier[GenerateBand] ( identifier[bands] [ identifier[i] ], identifier[meta_only] = identifier[meta_only] , identifier[cast] = identifier[cast] ) identifier[self] . identifier[bdict] [ identifier[b] . identifier[name] ]= identifier[b] identifier[ras] . identifier[bands] = identifier[self] . identifier[bdict] keyword[if] keyword[not] identifier[meta_only] : identifier[ras] . identifier[validate] () keyword[return] identifier[ras]
def Read(self, meta_only=False, allowed=None, cast=False): """Read the ESPA XML metadata file""" if allowed is not None and (not isinstance(allowed, (list, tuple))): raise RuntimeError('`allowed` must be a list of str names.') # depends on [control=['if'], data=[]] meta = xmltodict.parse(open(self.filename, 'r').read()).get('espa_metadata') # Handle bands seperately bands = meta.get('bands').get('band') del meta['bands'] if not isinstance(bands, list): bands = [bands] # depends on [control=['if'], data=[]] meta = self.CleanDict(meta) # Get spatial refernce ras = SetProperties(RasterSet, meta) if allowed is not None: # Remove non-allowed arrays from bdict for k in list(self.bdict.keys()): if k not in allowed: del self.bdict[k] # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=['allowed']] for i in range(len(bands)): info = self.GenerateBand(bands[i], meta_only=True, cast=cast) if allowed is not None and info.name not in allowed: continue # depends on [control=['if'], data=[]] if info.name not in self.bdict.keys() or self.bdict[info.name].data is None: b = self.GenerateBand(bands[i], meta_only=meta_only, cast=cast) self.bdict[b.name] = b # depends on [control=['if'], data=[]] elif cast and self.bdict[info.name].data.dtype != np.float32: b = self.GenerateBand(bands[i], meta_only=meta_only, cast=cast) self.bdict[b.name] = b # depends on [control=['if'], data=[]] elif not cast and self.bdict[info.name].data.dtype == np.float32: b = self.GenerateBand(bands[i], meta_only=meta_only, cast=cast) self.bdict[b.name] = b # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] ras.bands = self.bdict if not meta_only: ras.validate() # depends on [control=['if'], data=[]] return ras
def edit_by_id( self, id_equip_acesso, id_tipo_acesso, fqdn, user, password, enable_pass): """Edit access type, fqdn, user, password and enable_pass of the relationship of equipment and access type. :param id_tipo_acesso: Access type identifier. :param id_equip_acesso: Equipment identifier. :param fqdn: Equipment FQDN. :param user: User. :param password: Password. :param enable_pass: Enable access. :return: None :raise InvalidParameterError: The parameters fqdn, user, password or access type id are invalid or none. :raise EquipamentoAcessoNaoExisteError: Equipment access type relationship doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_tipo_acesso): raise InvalidParameterError( u'Access type id is invalid or not informed.') equipamento_acesso_map = dict() equipamento_acesso_map['fqdn'] = fqdn equipamento_acesso_map['user'] = user equipamento_acesso_map['pass'] = password equipamento_acesso_map['enable_pass'] = enable_pass equipamento_acesso_map['id_tipo_acesso'] = id_tipo_acesso equipamento_acesso_map['id_equip_acesso'] = id_equip_acesso url = 'equipamentoacesso/edit/' code, xml = self.submit( {'equipamento_acesso': equipamento_acesso_map}, 'POST', url) return self.response(code, xml)
def function[edit_by_id, parameter[self, id_equip_acesso, id_tipo_acesso, fqdn, user, password, enable_pass]]: constant[Edit access type, fqdn, user, password and enable_pass of the relationship of equipment and access type. :param id_tipo_acesso: Access type identifier. :param id_equip_acesso: Equipment identifier. :param fqdn: Equipment FQDN. :param user: User. :param password: Password. :param enable_pass: Enable access. :return: None :raise InvalidParameterError: The parameters fqdn, user, password or access type id are invalid or none. :raise EquipamentoAcessoNaoExisteError: Equipment access type relationship doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. ] if <ast.UnaryOp object at 0x7da2041d8850> begin[:] <ast.Raise object at 0x7da2041dadd0> variable[equipamento_acesso_map] assign[=] call[name[dict], parameter[]] call[name[equipamento_acesso_map]][constant[fqdn]] assign[=] name[fqdn] call[name[equipamento_acesso_map]][constant[user]] assign[=] name[user] call[name[equipamento_acesso_map]][constant[pass]] assign[=] name[password] call[name[equipamento_acesso_map]][constant[enable_pass]] assign[=] name[enable_pass] call[name[equipamento_acesso_map]][constant[id_tipo_acesso]] assign[=] name[id_tipo_acesso] call[name[equipamento_acesso_map]][constant[id_equip_acesso]] assign[=] name[id_equip_acesso] variable[url] assign[=] constant[equipamentoacesso/edit/] <ast.Tuple object at 0x7da20c6a97e0> assign[=] call[name[self].submit, parameter[dictionary[[<ast.Constant object at 0x7da20c6aa230>], [<ast.Name object at 0x7da20c6a8e50>]], constant[POST], name[url]]] return[call[name[self].response, parameter[name[code], name[xml]]]]
keyword[def] identifier[edit_by_id] ( identifier[self] , identifier[id_equip_acesso] , identifier[id_tipo_acesso] , identifier[fqdn] , identifier[user] , identifier[password] , identifier[enable_pass] ): literal[string] keyword[if] keyword[not] identifier[is_valid_int_param] ( identifier[id_tipo_acesso] ): keyword[raise] identifier[InvalidParameterError] ( literal[string] ) identifier[equipamento_acesso_map] = identifier[dict] () identifier[equipamento_acesso_map] [ literal[string] ]= identifier[fqdn] identifier[equipamento_acesso_map] [ literal[string] ]= identifier[user] identifier[equipamento_acesso_map] [ literal[string] ]= identifier[password] identifier[equipamento_acesso_map] [ literal[string] ]= identifier[enable_pass] identifier[equipamento_acesso_map] [ literal[string] ]= identifier[id_tipo_acesso] identifier[equipamento_acesso_map] [ literal[string] ]= identifier[id_equip_acesso] identifier[url] = literal[string] identifier[code] , identifier[xml] = identifier[self] . identifier[submit] ( { literal[string] : identifier[equipamento_acesso_map] }, literal[string] , identifier[url] ) keyword[return] identifier[self] . identifier[response] ( identifier[code] , identifier[xml] )
def edit_by_id(self, id_equip_acesso, id_tipo_acesso, fqdn, user, password, enable_pass): """Edit access type, fqdn, user, password and enable_pass of the relationship of equipment and access type. :param id_tipo_acesso: Access type identifier. :param id_equip_acesso: Equipment identifier. :param fqdn: Equipment FQDN. :param user: User. :param password: Password. :param enable_pass: Enable access. :return: None :raise InvalidParameterError: The parameters fqdn, user, password or access type id are invalid or none. :raise EquipamentoAcessoNaoExisteError: Equipment access type relationship doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_tipo_acesso): raise InvalidParameterError(u'Access type id is invalid or not informed.') # depends on [control=['if'], data=[]] equipamento_acesso_map = dict() equipamento_acesso_map['fqdn'] = fqdn equipamento_acesso_map['user'] = user equipamento_acesso_map['pass'] = password equipamento_acesso_map['enable_pass'] = enable_pass equipamento_acesso_map['id_tipo_acesso'] = id_tipo_acesso equipamento_acesso_map['id_equip_acesso'] = id_equip_acesso url = 'equipamentoacesso/edit/' (code, xml) = self.submit({'equipamento_acesso': equipamento_acesso_map}, 'POST', url) return self.response(code, xml)
def _serialize_into_store(profile, filter=None): """ Takes data from app layer and serializes the models into the store. """ # ensure that we write and retrieve the counter in one go for consistency current_id = InstanceIDModel.get_current_instance_and_increment_counter() with transaction.atomic(): # create Q objects for filtering by prefixes prefix_condition = None if filter: prefix_condition = functools.reduce(lambda x, y: x | y, [Q(_morango_partition__startswith=prefix) for prefix in filter]) # filter through all models with the dirty bit turned on syncable_dict = _profile_models[profile] for (_, klass_model) in six.iteritems(syncable_dict): new_store_records = [] new_rmc_records = [] klass_queryset = klass_model.objects.filter(_morango_dirty_bit=True) if prefix_condition: klass_queryset = klass_queryset.filter(prefix_condition) store_records_dict = Store.objects.in_bulk(id_list=klass_queryset.values_list('id', flat=True)) for app_model in klass_queryset: try: store_model = store_records_dict[app_model.id] # if store record dirty and app record dirty, append store serialized to conflicting data if store_model.dirty_bit: store_model.conflicting_serialized_data = store_model.serialized + "\n" + store_model.conflicting_serialized_data store_model.dirty_bit = False # set new serialized data on this store model ser_dict = json.loads(store_model.serialized) ser_dict.update(app_model.serialize()) store_model.serialized = DjangoJSONEncoder().encode(ser_dict) # create or update instance and counter on the record max counter for this store model RecordMaxCounter.objects.update_or_create(defaults={'counter': current_id.counter}, instance_id=current_id.id, store_model_id=store_model.id) # update last saved bys for this store model store_model.last_saved_instance = current_id.id store_model.last_saved_counter = current_id.counter # update deleted flags in case it was previously deleted store_model.deleted = False store_model.hard_deleted = False # update this model store_model.save() except KeyError: kwargs = { 'id': app_model.id, 'serialized': DjangoJSONEncoder().encode(app_model.serialize()), 'last_saved_instance': current_id.id, 'last_saved_counter': current_id.counter, 'model_name': app_model.morango_model_name, 'profile': app_model.morango_profile, 'partition': app_model._morango_partition, 'source_id': app_model._morango_source_id, } # check if model has FK pointing to it and add the value to a field on the store self_ref_fk = _self_referential_fk(klass_model) if self_ref_fk: self_ref_fk_value = getattr(app_model, self_ref_fk) kwargs.update({'_self_ref_fk': self_ref_fk_value or ''}) # create store model and record max counter for the app model new_store_records.append(Store(**kwargs)) new_rmc_records.append(RecordMaxCounter(store_model_id=app_model.id, instance_id=current_id.id, counter=current_id.counter)) # bulk create store and rmc records for this class Store.objects.bulk_create(new_store_records) RecordMaxCounter.objects.bulk_create(new_rmc_records) # set dirty bit to false for all instances of this model klass_queryset.update(update_dirty_bit_to=False) # get list of ids of deleted models deleted_ids = DeletedModels.objects.filter(profile=profile).values_list('id', flat=True) # update last_saved_bys and deleted flag of all deleted store model instances deleted_store_records = Store.objects.filter(id__in=deleted_ids) deleted_store_records.update(dirty_bit=False, deleted=True, last_saved_instance=current_id.id, last_saved_counter=current_id.counter) # update rmcs counters for deleted models that have our instance id RecordMaxCounter.objects.filter(instance_id=current_id.id, store_model_id__in=deleted_ids).update(counter=current_id.counter) # get a list of deleted model ids that don't have an rmc for our instance id new_rmc_ids = deleted_store_records.exclude(recordmaxcounter__instance_id=current_id.id).values_list("id", flat=True) # bulk create these new rmcs RecordMaxCounter.objects.bulk_create([RecordMaxCounter(store_model_id=r_id, instance_id=current_id.id, counter=current_id.counter) for r_id in new_rmc_ids]) # clear deleted models table for this profile DeletedModels.objects.filter(profile=profile).delete() # handle logic for hard deletion models hard_deleted_ids = HardDeletedModels.objects.filter(profile=profile).values_list('id', flat=True) hard_deleted_store_records = Store.objects.filter(id__in=hard_deleted_ids) hard_deleted_store_records.update(hard_deleted=True, serialized='{}', conflicting_serialized_data='') HardDeletedModels.objects.filter(profile=profile).delete() # update our own database max counters after serialization if not filter: DatabaseMaxCounter.objects.update_or_create(instance_id=current_id.id, partition="", defaults={'counter': current_id.counter}) else: for f in filter: DatabaseMaxCounter.objects.update_or_create(instance_id=current_id.id, partition=f, defaults={'counter': current_id.counter})
def function[_serialize_into_store, parameter[profile, filter]]: constant[ Takes data from app layer and serializes the models into the store. ] variable[current_id] assign[=] call[name[InstanceIDModel].get_current_instance_and_increment_counter, parameter[]] with call[name[transaction].atomic, parameter[]] begin[:] variable[prefix_condition] assign[=] constant[None] if name[filter] begin[:] variable[prefix_condition] assign[=] call[name[functools].reduce, parameter[<ast.Lambda object at 0x7da1b008ae00>, <ast.ListComp object at 0x7da1b008af50>]] variable[syncable_dict] assign[=] call[name[_profile_models]][name[profile]] for taget[tuple[[<ast.Name object at 0x7da1b008ab60>, <ast.Name object at 0x7da1b008ab30>]]] in starred[call[name[six].iteritems, parameter[name[syncable_dict]]]] begin[:] variable[new_store_records] assign[=] list[[]] variable[new_rmc_records] assign[=] list[[]] variable[klass_queryset] assign[=] call[name[klass_model].objects.filter, parameter[]] if name[prefix_condition] begin[:] variable[klass_queryset] assign[=] call[name[klass_queryset].filter, parameter[name[prefix_condition]]] variable[store_records_dict] assign[=] call[name[Store].objects.in_bulk, parameter[]] for taget[name[app_model]] in starred[name[klass_queryset]] begin[:] <ast.Try object at 0x7da18bcca950> call[name[Store].objects.bulk_create, parameter[name[new_store_records]]] call[name[RecordMaxCounter].objects.bulk_create, parameter[name[new_rmc_records]]] call[name[klass_queryset].update, parameter[]] variable[deleted_ids] assign[=] call[call[name[DeletedModels].objects.filter, parameter[]].values_list, parameter[constant[id]]] variable[deleted_store_records] assign[=] call[name[Store].objects.filter, parameter[]] call[name[deleted_store_records].update, parameter[]] call[call[name[RecordMaxCounter].objects.filter, parameter[]].update, parameter[]] variable[new_rmc_ids] assign[=] call[call[name[deleted_store_records].exclude, parameter[]].values_list, parameter[constant[id]]] call[name[RecordMaxCounter].objects.bulk_create, parameter[<ast.ListComp object at 0x7da1b2347d90>]] call[call[name[DeletedModels].objects.filter, parameter[]].delete, parameter[]] variable[hard_deleted_ids] assign[=] call[call[name[HardDeletedModels].objects.filter, parameter[]].values_list, parameter[constant[id]]] variable[hard_deleted_store_records] assign[=] call[name[Store].objects.filter, parameter[]] call[name[hard_deleted_store_records].update, parameter[]] call[call[name[HardDeletedModels].objects.filter, parameter[]].delete, parameter[]] if <ast.UnaryOp object at 0x7da1b00f4a90> begin[:] call[name[DatabaseMaxCounter].objects.update_or_create, parameter[]]
keyword[def] identifier[_serialize_into_store] ( identifier[profile] , identifier[filter] = keyword[None] ): literal[string] identifier[current_id] = identifier[InstanceIDModel] . identifier[get_current_instance_and_increment_counter] () keyword[with] identifier[transaction] . identifier[atomic] (): identifier[prefix_condition] = keyword[None] keyword[if] identifier[filter] : identifier[prefix_condition] = identifier[functools] . identifier[reduce] ( keyword[lambda] identifier[x] , identifier[y] : identifier[x] | identifier[y] ,[ identifier[Q] ( identifier[_morango_partition__startswith] = identifier[prefix] ) keyword[for] identifier[prefix] keyword[in] identifier[filter] ]) identifier[syncable_dict] = identifier[_profile_models] [ identifier[profile] ] keyword[for] ( identifier[_] , identifier[klass_model] ) keyword[in] identifier[six] . identifier[iteritems] ( identifier[syncable_dict] ): identifier[new_store_records] =[] identifier[new_rmc_records] =[] identifier[klass_queryset] = identifier[klass_model] . identifier[objects] . identifier[filter] ( identifier[_morango_dirty_bit] = keyword[True] ) keyword[if] identifier[prefix_condition] : identifier[klass_queryset] = identifier[klass_queryset] . identifier[filter] ( identifier[prefix_condition] ) identifier[store_records_dict] = identifier[Store] . identifier[objects] . identifier[in_bulk] ( identifier[id_list] = identifier[klass_queryset] . identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] )) keyword[for] identifier[app_model] keyword[in] identifier[klass_queryset] : keyword[try] : identifier[store_model] = identifier[store_records_dict] [ identifier[app_model] . identifier[id] ] keyword[if] identifier[store_model] . identifier[dirty_bit] : identifier[store_model] . identifier[conflicting_serialized_data] = identifier[store_model] . identifier[serialized] + literal[string] + identifier[store_model] . identifier[conflicting_serialized_data] identifier[store_model] . identifier[dirty_bit] = keyword[False] identifier[ser_dict] = identifier[json] . identifier[loads] ( identifier[store_model] . identifier[serialized] ) identifier[ser_dict] . identifier[update] ( identifier[app_model] . identifier[serialize] ()) identifier[store_model] . identifier[serialized] = identifier[DjangoJSONEncoder] (). identifier[encode] ( identifier[ser_dict] ) identifier[RecordMaxCounter] . identifier[objects] . identifier[update_or_create] ( identifier[defaults] ={ literal[string] : identifier[current_id] . identifier[counter] }, identifier[instance_id] = identifier[current_id] . identifier[id] , identifier[store_model_id] = identifier[store_model] . identifier[id] ) identifier[store_model] . identifier[last_saved_instance] = identifier[current_id] . identifier[id] identifier[store_model] . identifier[last_saved_counter] = identifier[current_id] . identifier[counter] identifier[store_model] . identifier[deleted] = keyword[False] identifier[store_model] . identifier[hard_deleted] = keyword[False] identifier[store_model] . identifier[save] () keyword[except] identifier[KeyError] : identifier[kwargs] ={ literal[string] : identifier[app_model] . identifier[id] , literal[string] : identifier[DjangoJSONEncoder] (). identifier[encode] ( identifier[app_model] . identifier[serialize] ()), literal[string] : identifier[current_id] . identifier[id] , literal[string] : identifier[current_id] . identifier[counter] , literal[string] : identifier[app_model] . identifier[morango_model_name] , literal[string] : identifier[app_model] . identifier[morango_profile] , literal[string] : identifier[app_model] . identifier[_morango_partition] , literal[string] : identifier[app_model] . identifier[_morango_source_id] , } identifier[self_ref_fk] = identifier[_self_referential_fk] ( identifier[klass_model] ) keyword[if] identifier[self_ref_fk] : identifier[self_ref_fk_value] = identifier[getattr] ( identifier[app_model] , identifier[self_ref_fk] ) identifier[kwargs] . identifier[update] ({ literal[string] : identifier[self_ref_fk_value] keyword[or] literal[string] }) identifier[new_store_records] . identifier[append] ( identifier[Store] (** identifier[kwargs] )) identifier[new_rmc_records] . identifier[append] ( identifier[RecordMaxCounter] ( identifier[store_model_id] = identifier[app_model] . identifier[id] , identifier[instance_id] = identifier[current_id] . identifier[id] , identifier[counter] = identifier[current_id] . identifier[counter] )) identifier[Store] . identifier[objects] . identifier[bulk_create] ( identifier[new_store_records] ) identifier[RecordMaxCounter] . identifier[objects] . identifier[bulk_create] ( identifier[new_rmc_records] ) identifier[klass_queryset] . identifier[update] ( identifier[update_dirty_bit_to] = keyword[False] ) identifier[deleted_ids] = identifier[DeletedModels] . identifier[objects] . identifier[filter] ( identifier[profile] = identifier[profile] ). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ) identifier[deleted_store_records] = identifier[Store] . identifier[objects] . identifier[filter] ( identifier[id__in] = identifier[deleted_ids] ) identifier[deleted_store_records] . identifier[update] ( identifier[dirty_bit] = keyword[False] , identifier[deleted] = keyword[True] , identifier[last_saved_instance] = identifier[current_id] . identifier[id] , identifier[last_saved_counter] = identifier[current_id] . identifier[counter] ) identifier[RecordMaxCounter] . identifier[objects] . identifier[filter] ( identifier[instance_id] = identifier[current_id] . identifier[id] , identifier[store_model_id__in] = identifier[deleted_ids] ). identifier[update] ( identifier[counter] = identifier[current_id] . identifier[counter] ) identifier[new_rmc_ids] = identifier[deleted_store_records] . identifier[exclude] ( identifier[recordmaxcounter__instance_id] = identifier[current_id] . identifier[id] ). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ) identifier[RecordMaxCounter] . identifier[objects] . identifier[bulk_create] ([ identifier[RecordMaxCounter] ( identifier[store_model_id] = identifier[r_id] , identifier[instance_id] = identifier[current_id] . identifier[id] , identifier[counter] = identifier[current_id] . identifier[counter] ) keyword[for] identifier[r_id] keyword[in] identifier[new_rmc_ids] ]) identifier[DeletedModels] . identifier[objects] . identifier[filter] ( identifier[profile] = identifier[profile] ). identifier[delete] () identifier[hard_deleted_ids] = identifier[HardDeletedModels] . identifier[objects] . identifier[filter] ( identifier[profile] = identifier[profile] ). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ) identifier[hard_deleted_store_records] = identifier[Store] . identifier[objects] . identifier[filter] ( identifier[id__in] = identifier[hard_deleted_ids] ) identifier[hard_deleted_store_records] . identifier[update] ( identifier[hard_deleted] = keyword[True] , identifier[serialized] = literal[string] , identifier[conflicting_serialized_data] = literal[string] ) identifier[HardDeletedModels] . identifier[objects] . identifier[filter] ( identifier[profile] = identifier[profile] ). identifier[delete] () keyword[if] keyword[not] identifier[filter] : identifier[DatabaseMaxCounter] . identifier[objects] . identifier[update_or_create] ( identifier[instance_id] = identifier[current_id] . identifier[id] , identifier[partition] = literal[string] , identifier[defaults] ={ literal[string] : identifier[current_id] . identifier[counter] }) keyword[else] : keyword[for] identifier[f] keyword[in] identifier[filter] : identifier[DatabaseMaxCounter] . identifier[objects] . identifier[update_or_create] ( identifier[instance_id] = identifier[current_id] . identifier[id] , identifier[partition] = identifier[f] , identifier[defaults] ={ literal[string] : identifier[current_id] . identifier[counter] })
def _serialize_into_store(profile, filter=None): """ Takes data from app layer and serializes the models into the store. """ # ensure that we write and retrieve the counter in one go for consistency current_id = InstanceIDModel.get_current_instance_and_increment_counter() with transaction.atomic(): # create Q objects for filtering by prefixes prefix_condition = None if filter: prefix_condition = functools.reduce(lambda x, y: x | y, [Q(_morango_partition__startswith=prefix) for prefix in filter]) # depends on [control=['if'], data=[]] # filter through all models with the dirty bit turned on syncable_dict = _profile_models[profile] for (_, klass_model) in six.iteritems(syncable_dict): new_store_records = [] new_rmc_records = [] klass_queryset = klass_model.objects.filter(_morango_dirty_bit=True) if prefix_condition: klass_queryset = klass_queryset.filter(prefix_condition) # depends on [control=['if'], data=[]] store_records_dict = Store.objects.in_bulk(id_list=klass_queryset.values_list('id', flat=True)) for app_model in klass_queryset: try: store_model = store_records_dict[app_model.id] # if store record dirty and app record dirty, append store serialized to conflicting data if store_model.dirty_bit: store_model.conflicting_serialized_data = store_model.serialized + '\n' + store_model.conflicting_serialized_data store_model.dirty_bit = False # depends on [control=['if'], data=[]] # set new serialized data on this store model ser_dict = json.loads(store_model.serialized) ser_dict.update(app_model.serialize()) store_model.serialized = DjangoJSONEncoder().encode(ser_dict) # create or update instance and counter on the record max counter for this store model RecordMaxCounter.objects.update_or_create(defaults={'counter': current_id.counter}, instance_id=current_id.id, store_model_id=store_model.id) # update last saved bys for this store model store_model.last_saved_instance = current_id.id store_model.last_saved_counter = current_id.counter # update deleted flags in case it was previously deleted store_model.deleted = False store_model.hard_deleted = False # update this model store_model.save() # depends on [control=['try'], data=[]] except KeyError: kwargs = {'id': app_model.id, 'serialized': DjangoJSONEncoder().encode(app_model.serialize()), 'last_saved_instance': current_id.id, 'last_saved_counter': current_id.counter, 'model_name': app_model.morango_model_name, 'profile': app_model.morango_profile, 'partition': app_model._morango_partition, 'source_id': app_model._morango_source_id} # check if model has FK pointing to it and add the value to a field on the store self_ref_fk = _self_referential_fk(klass_model) if self_ref_fk: self_ref_fk_value = getattr(app_model, self_ref_fk) kwargs.update({'_self_ref_fk': self_ref_fk_value or ''}) # depends on [control=['if'], data=[]] # create store model and record max counter for the app model new_store_records.append(Store(**kwargs)) new_rmc_records.append(RecordMaxCounter(store_model_id=app_model.id, instance_id=current_id.id, counter=current_id.counter)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['app_model']] # bulk create store and rmc records for this class Store.objects.bulk_create(new_store_records) RecordMaxCounter.objects.bulk_create(new_rmc_records) # set dirty bit to false for all instances of this model klass_queryset.update(update_dirty_bit_to=False) # depends on [control=['for'], data=[]] # get list of ids of deleted models deleted_ids = DeletedModels.objects.filter(profile=profile).values_list('id', flat=True) # update last_saved_bys and deleted flag of all deleted store model instances deleted_store_records = Store.objects.filter(id__in=deleted_ids) deleted_store_records.update(dirty_bit=False, deleted=True, last_saved_instance=current_id.id, last_saved_counter=current_id.counter) # update rmcs counters for deleted models that have our instance id RecordMaxCounter.objects.filter(instance_id=current_id.id, store_model_id__in=deleted_ids).update(counter=current_id.counter) # get a list of deleted model ids that don't have an rmc for our instance id new_rmc_ids = deleted_store_records.exclude(recordmaxcounter__instance_id=current_id.id).values_list('id', flat=True) # bulk create these new rmcs RecordMaxCounter.objects.bulk_create([RecordMaxCounter(store_model_id=r_id, instance_id=current_id.id, counter=current_id.counter) for r_id in new_rmc_ids]) # clear deleted models table for this profile DeletedModels.objects.filter(profile=profile).delete() # handle logic for hard deletion models hard_deleted_ids = HardDeletedModels.objects.filter(profile=profile).values_list('id', flat=True) hard_deleted_store_records = Store.objects.filter(id__in=hard_deleted_ids) hard_deleted_store_records.update(hard_deleted=True, serialized='{}', conflicting_serialized_data='') HardDeletedModels.objects.filter(profile=profile).delete() # update our own database max counters after serialization if not filter: DatabaseMaxCounter.objects.update_or_create(instance_id=current_id.id, partition='', defaults={'counter': current_id.counter}) # depends on [control=['if'], data=[]] else: for f in filter: DatabaseMaxCounter.objects.update_or_create(instance_id=current_id.id, partition=f, defaults={'counter': current_id.counter}) # depends on [control=['for'], data=['f']] # depends on [control=['with'], data=[]]
def read_settings(self): """Set the IF state from QSettings.""" extent = setting('user_extent', None, str) if extent: extent = QgsGeometry.fromWkt(extent) if not extent.isGeosValid(): extent = None crs = setting('user_extent_crs', None, str) if crs: crs = QgsCoordinateReferenceSystem(crs) if not crs.isValid(): crs = None mode = setting('analysis_extents_mode', HAZARD_EXPOSURE_VIEW) if crs and extent and mode == HAZARD_EXPOSURE_BOUNDINGBOX: self.extent.set_user_extent(extent, crs) self.extent.show_rubber_bands = setting( 'showRubberBands', False, bool) self.zoom_to_impact_flag = setting('setZoomToImpactFlag', True, bool) # whether exposure layer should be hidden after model completes self.hide_exposure_flag = setting('setHideExposureFlag', False, bool)
def function[read_settings, parameter[self]]: constant[Set the IF state from QSettings.] variable[extent] assign[=] call[name[setting], parameter[constant[user_extent], constant[None], name[str]]] if name[extent] begin[:] variable[extent] assign[=] call[name[QgsGeometry].fromWkt, parameter[name[extent]]] if <ast.UnaryOp object at 0x7da207f99540> begin[:] variable[extent] assign[=] constant[None] variable[crs] assign[=] call[name[setting], parameter[constant[user_extent_crs], constant[None], name[str]]] if name[crs] begin[:] variable[crs] assign[=] call[name[QgsCoordinateReferenceSystem], parameter[name[crs]]] if <ast.UnaryOp object at 0x7da20e9b15d0> begin[:] variable[crs] assign[=] constant[None] variable[mode] assign[=] call[name[setting], parameter[constant[analysis_extents_mode], name[HAZARD_EXPOSURE_VIEW]]] if <ast.BoolOp object at 0x7da20e9b0640> begin[:] call[name[self].extent.set_user_extent, parameter[name[extent], name[crs]]] name[self].extent.show_rubber_bands assign[=] call[name[setting], parameter[constant[showRubberBands], constant[False], name[bool]]] name[self].zoom_to_impact_flag assign[=] call[name[setting], parameter[constant[setZoomToImpactFlag], constant[True], name[bool]]] name[self].hide_exposure_flag assign[=] call[name[setting], parameter[constant[setHideExposureFlag], constant[False], name[bool]]]
keyword[def] identifier[read_settings] ( identifier[self] ): literal[string] identifier[extent] = identifier[setting] ( literal[string] , keyword[None] , identifier[str] ) keyword[if] identifier[extent] : identifier[extent] = identifier[QgsGeometry] . identifier[fromWkt] ( identifier[extent] ) keyword[if] keyword[not] identifier[extent] . identifier[isGeosValid] (): identifier[extent] = keyword[None] identifier[crs] = identifier[setting] ( literal[string] , keyword[None] , identifier[str] ) keyword[if] identifier[crs] : identifier[crs] = identifier[QgsCoordinateReferenceSystem] ( identifier[crs] ) keyword[if] keyword[not] identifier[crs] . identifier[isValid] (): identifier[crs] = keyword[None] identifier[mode] = identifier[setting] ( literal[string] , identifier[HAZARD_EXPOSURE_VIEW] ) keyword[if] identifier[crs] keyword[and] identifier[extent] keyword[and] identifier[mode] == identifier[HAZARD_EXPOSURE_BOUNDINGBOX] : identifier[self] . identifier[extent] . identifier[set_user_extent] ( identifier[extent] , identifier[crs] ) identifier[self] . identifier[extent] . identifier[show_rubber_bands] = identifier[setting] ( literal[string] , keyword[False] , identifier[bool] ) identifier[self] . identifier[zoom_to_impact_flag] = identifier[setting] ( literal[string] , keyword[True] , identifier[bool] ) identifier[self] . identifier[hide_exposure_flag] = identifier[setting] ( literal[string] , keyword[False] , identifier[bool] )
def read_settings(self): """Set the IF state from QSettings.""" extent = setting('user_extent', None, str) if extent: extent = QgsGeometry.fromWkt(extent) if not extent.isGeosValid(): extent = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] crs = setting('user_extent_crs', None, str) if crs: crs = QgsCoordinateReferenceSystem(crs) if not crs.isValid(): crs = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] mode = setting('analysis_extents_mode', HAZARD_EXPOSURE_VIEW) if crs and extent and (mode == HAZARD_EXPOSURE_BOUNDINGBOX): self.extent.set_user_extent(extent, crs) # depends on [control=['if'], data=[]] self.extent.show_rubber_bands = setting('showRubberBands', False, bool) self.zoom_to_impact_flag = setting('setZoomToImpactFlag', True, bool) # whether exposure layer should be hidden after model completes self.hide_exposure_flag = setting('setHideExposureFlag', False, bool)
def update_user_trackers(sender, topic, user, request, response, **kwargs): """ Receiver to mark a topic being viewed as read. This can result in marking the related forum tracker as read. """ TrackingHandler = get_class('forum_tracking.handler', 'TrackingHandler') # noqa track_handler = TrackingHandler() track_handler.mark_topic_read(topic, user)
def function[update_user_trackers, parameter[sender, topic, user, request, response]]: constant[ Receiver to mark a topic being viewed as read. This can result in marking the related forum tracker as read. ] variable[TrackingHandler] assign[=] call[name[get_class], parameter[constant[forum_tracking.handler], constant[TrackingHandler]]] variable[track_handler] assign[=] call[name[TrackingHandler], parameter[]] call[name[track_handler].mark_topic_read, parameter[name[topic], name[user]]]
keyword[def] identifier[update_user_trackers] ( identifier[sender] , identifier[topic] , identifier[user] , identifier[request] , identifier[response] ,** identifier[kwargs] ): literal[string] identifier[TrackingHandler] = identifier[get_class] ( literal[string] , literal[string] ) identifier[track_handler] = identifier[TrackingHandler] () identifier[track_handler] . identifier[mark_topic_read] ( identifier[topic] , identifier[user] )
def update_user_trackers(sender, topic, user, request, response, **kwargs): """ Receiver to mark a topic being viewed as read. This can result in marking the related forum tracker as read. """ TrackingHandler = get_class('forum_tracking.handler', 'TrackingHandler') # noqa track_handler = TrackingHandler() track_handler.mark_topic_read(topic, user)
def parse(raw_email): # type: (six.string_types) -> Tuple[six.string_types, six.string_types] """Extract email from a full address. Example: 'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com >>> parse("John Doe <me+github.com@someorg.com") ('me', 'someorg.com') >>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidEmail: 'Invalid email: 42' >>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidEmail: 'None or NaN is not a valid email address' """ if not isinstance(raw_email, six.string_types): raise InvalidEmail("Invalid email: %s" % raw_email) if not raw_email or pd.isnull(raw_email): raise InvalidEmail("None or NaN is not a valid email address") email = raw_email.split("<", 1)[-1].split(">", 1)[0] chunks = email.split("@", 3) # git-svn generates emails with several @, e.g.: # <rossberg@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00> if len(chunks) < 2: raise InvalidEmail("Invalid email") uname = chunks[0].rsplit(" ", 1)[-1] addr_domain = chunks[1].split(" ", 1)[0] return uname.split("+", 1)[0], addr_domain
def function[parse, parameter[raw_email]]: constant[Extract email from a full address. Example: 'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com >>> parse("John Doe <me+github.com@someorg.com") ('me', 'someorg.com') >>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidEmail: 'Invalid email: 42' >>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidEmail: 'None or NaN is not a valid email address' ] if <ast.UnaryOp object at 0x7da1b28bdf00> begin[:] <ast.Raise object at 0x7da1b28bfd60> if <ast.BoolOp object at 0x7da1b28bd5a0> begin[:] <ast.Raise object at 0x7da1b28bf6d0> variable[email] assign[=] call[call[call[call[name[raw_email].split, parameter[constant[<], constant[1]]]][<ast.UnaryOp object at 0x7da1b28bc220>].split, parameter[constant[>], constant[1]]]][constant[0]] variable[chunks] assign[=] call[name[email].split, parameter[constant[@], constant[3]]] if compare[call[name[len], parameter[name[chunks]]] less[<] constant[2]] begin[:] <ast.Raise object at 0x7da1b28bd0f0> variable[uname] assign[=] call[call[call[name[chunks]][constant[0]].rsplit, parameter[constant[ ], constant[1]]]][<ast.UnaryOp object at 0x7da1b28bee00>] variable[addr_domain] assign[=] call[call[call[name[chunks]][constant[1]].split, parameter[constant[ ], constant[1]]]][constant[0]] return[tuple[[<ast.Subscript object at 0x7da1b28be230>, <ast.Name object at 0x7da1b28bfb20>]]]
keyword[def] identifier[parse] ( identifier[raw_email] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[raw_email] , identifier[six] . identifier[string_types] ): keyword[raise] identifier[InvalidEmail] ( literal[string] % identifier[raw_email] ) keyword[if] keyword[not] identifier[raw_email] keyword[or] identifier[pd] . identifier[isnull] ( identifier[raw_email] ): keyword[raise] identifier[InvalidEmail] ( literal[string] ) identifier[email] = identifier[raw_email] . identifier[split] ( literal[string] , literal[int] )[- literal[int] ]. identifier[split] ( literal[string] , literal[int] )[ literal[int] ] identifier[chunks] = identifier[email] . identifier[split] ( literal[string] , literal[int] ) keyword[if] identifier[len] ( identifier[chunks] )< literal[int] : keyword[raise] identifier[InvalidEmail] ( literal[string] ) identifier[uname] = identifier[chunks] [ literal[int] ]. identifier[rsplit] ( literal[string] , literal[int] )[- literal[int] ] identifier[addr_domain] = identifier[chunks] [ literal[int] ]. identifier[split] ( literal[string] , literal[int] )[ literal[int] ] keyword[return] identifier[uname] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ], identifier[addr_domain]
def parse(raw_email): # type: (six.string_types) -> Tuple[six.string_types, six.string_types] 'Extract email from a full address. Example:\n \'John Doe <jdoe+github@foo.com>\' -> jdoe@foo.com\n\n >>> parse("John Doe <me+github.com@someorg.com")\n (\'me\', \'someorg.com\')\n >>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n InvalidEmail: \'Invalid email: 42\'\n >>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n InvalidEmail: \'None or NaN is not a valid email address\'\n ' if not isinstance(raw_email, six.string_types): raise InvalidEmail('Invalid email: %s' % raw_email) # depends on [control=['if'], data=[]] if not raw_email or pd.isnull(raw_email): raise InvalidEmail('None or NaN is not a valid email address') # depends on [control=['if'], data=[]] email = raw_email.split('<', 1)[-1].split('>', 1)[0] chunks = email.split('@', 3) # git-svn generates emails with several @, e.g.: # <rossberg@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00> if len(chunks) < 2: raise InvalidEmail('Invalid email') # depends on [control=['if'], data=[]] uname = chunks[0].rsplit(' ', 1)[-1] addr_domain = chunks[1].split(' ', 1)[0] return (uname.split('+', 1)[0], addr_domain)
def system_image_type(self, system_image_type): """ Sets the system_image_type of this BuildEnvironmentRest. :param system_image_type: The system_image_type of this BuildEnvironmentRest. :type: str """ allowed_values = ["DOCKER_IMAGE", "VIRTUAL_MACHINE_RAW", "VIRTUAL_MACHINE_QCOW2", "LOCAL_WORKSPACE"] if system_image_type not in allowed_values: raise ValueError( "Invalid value for `system_image_type` ({0}), must be one of {1}" .format(system_image_type, allowed_values) ) self._system_image_type = system_image_type
def function[system_image_type, parameter[self, system_image_type]]: constant[ Sets the system_image_type of this BuildEnvironmentRest. :param system_image_type: The system_image_type of this BuildEnvironmentRest. :type: str ] variable[allowed_values] assign[=] list[[<ast.Constant object at 0x7da1b0e3a1a0>, <ast.Constant object at 0x7da1b0e3ba30>, <ast.Constant object at 0x7da1b0e3aad0>, <ast.Constant object at 0x7da1b0e3a650>]] if compare[name[system_image_type] <ast.NotIn object at 0x7da2590d7190> name[allowed_values]] begin[:] <ast.Raise object at 0x7da1b0e3aef0> name[self]._system_image_type assign[=] name[system_image_type]
keyword[def] identifier[system_image_type] ( identifier[self] , identifier[system_image_type] ): literal[string] identifier[allowed_values] =[ literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] identifier[system_image_type] keyword[not] keyword[in] identifier[allowed_values] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[system_image_type] , identifier[allowed_values] ) ) identifier[self] . identifier[_system_image_type] = identifier[system_image_type]
def system_image_type(self, system_image_type): """ Sets the system_image_type of this BuildEnvironmentRest. :param system_image_type: The system_image_type of this BuildEnvironmentRest. :type: str """ allowed_values = ['DOCKER_IMAGE', 'VIRTUAL_MACHINE_RAW', 'VIRTUAL_MACHINE_QCOW2', 'LOCAL_WORKSPACE'] if system_image_type not in allowed_values: raise ValueError('Invalid value for `system_image_type` ({0}), must be one of {1}'.format(system_image_type, allowed_values)) # depends on [control=['if'], data=['system_image_type', 'allowed_values']] self._system_image_type = system_image_type
def handle(self, *test_labels, **options): """ Set the default Gherkin test runner. """ if not options.get('testrunner', None): options['testrunner'] = test_runner_class return super(Command, self).handle(*test_labels, **options)
def function[handle, parameter[self]]: constant[ Set the default Gherkin test runner. ] if <ast.UnaryOp object at 0x7da1b1a11180> begin[:] call[name[options]][constant[testrunner]] assign[=] name[test_runner_class] return[call[call[name[super], parameter[name[Command], name[self]]].handle, parameter[<ast.Starred object at 0x7da1b1a104c0>]]]
keyword[def] identifier[handle] ( identifier[self] ,* identifier[test_labels] ,** identifier[options] ): literal[string] keyword[if] keyword[not] identifier[options] . identifier[get] ( literal[string] , keyword[None] ): identifier[options] [ literal[string] ]= identifier[test_runner_class] keyword[return] identifier[super] ( identifier[Command] , identifier[self] ). identifier[handle] (* identifier[test_labels] ,** identifier[options] )
def handle(self, *test_labels, **options): """ Set the default Gherkin test runner. """ if not options.get('testrunner', None): options['testrunner'] = test_runner_class # depends on [control=['if'], data=[]] return super(Command, self).handle(*test_labels, **options)
def preload(python_data: LdapObject, database: Optional[Database] = None) -> LdapObject: """ Preload all NotLoaded fields in LdapObject. """ changes = {} # Load objects within lists. def preload_item(value: Any) -> Any: if isinstance(value, NotLoaded): return value.load(database) else: return value for name in python_data.keys(): value_list = python_data.get_as_list(name) # Check for errors. if isinstance(value_list, NotLoadedObject): raise RuntimeError(f"{name}: Unexpected NotLoadedObject outside list.") elif isinstance(value_list, NotLoadedList): value_list = value_list.load(database) else: if any(isinstance(v, NotLoadedList) for v in value_list): raise RuntimeError(f"{name}: Unexpected NotLoadedList in list.") elif any(isinstance(v, NotLoadedObject) for v in value_list): value_list = [preload_item(value) for value in value_list] else: value_list = None if value_list is not None: changes[name] = value_list return python_data.merge(changes)
def function[preload, parameter[python_data, database]]: constant[ Preload all NotLoaded fields in LdapObject. ] variable[changes] assign[=] dictionary[[], []] def function[preload_item, parameter[value]]: if call[name[isinstance], parameter[name[value], name[NotLoaded]]] begin[:] return[call[name[value].load, parameter[name[database]]]] for taget[name[name]] in starred[call[name[python_data].keys, parameter[]]] begin[:] variable[value_list] assign[=] call[name[python_data].get_as_list, parameter[name[name]]] if call[name[isinstance], parameter[name[value_list], name[NotLoadedObject]]] begin[:] <ast.Raise object at 0x7da18f09c7f0> if compare[name[value_list] is_not constant[None]] begin[:] call[name[changes]][name[name]] assign[=] name[value_list] return[call[name[python_data].merge, parameter[name[changes]]]]
keyword[def] identifier[preload] ( identifier[python_data] : identifier[LdapObject] , identifier[database] : identifier[Optional] [ identifier[Database] ]= keyword[None] )-> identifier[LdapObject] : literal[string] identifier[changes] ={} keyword[def] identifier[preload_item] ( identifier[value] : identifier[Any] )-> identifier[Any] : keyword[if] identifier[isinstance] ( identifier[value] , identifier[NotLoaded] ): keyword[return] identifier[value] . identifier[load] ( identifier[database] ) keyword[else] : keyword[return] identifier[value] keyword[for] identifier[name] keyword[in] identifier[python_data] . identifier[keys] (): identifier[value_list] = identifier[python_data] . identifier[get_as_list] ( identifier[name] ) keyword[if] identifier[isinstance] ( identifier[value_list] , identifier[NotLoadedObject] ): keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[elif] identifier[isinstance] ( identifier[value_list] , identifier[NotLoadedList] ): identifier[value_list] = identifier[value_list] . identifier[load] ( identifier[database] ) keyword[else] : keyword[if] identifier[any] ( identifier[isinstance] ( identifier[v] , identifier[NotLoadedList] ) keyword[for] identifier[v] keyword[in] identifier[value_list] ): keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[elif] identifier[any] ( identifier[isinstance] ( identifier[v] , identifier[NotLoadedObject] ) keyword[for] identifier[v] keyword[in] identifier[value_list] ): identifier[value_list] =[ identifier[preload_item] ( identifier[value] ) keyword[for] identifier[value] keyword[in] identifier[value_list] ] keyword[else] : identifier[value_list] = keyword[None] keyword[if] identifier[value_list] keyword[is] keyword[not] keyword[None] : identifier[changes] [ identifier[name] ]= identifier[value_list] keyword[return] identifier[python_data] . identifier[merge] ( identifier[changes] )
def preload(python_data: LdapObject, database: Optional[Database]=None) -> LdapObject: """ Preload all NotLoaded fields in LdapObject. """ changes = {} # Load objects within lists. def preload_item(value: Any) -> Any: if isinstance(value, NotLoaded): return value.load(database) # depends on [control=['if'], data=[]] else: return value for name in python_data.keys(): value_list = python_data.get_as_list(name) # Check for errors. if isinstance(value_list, NotLoadedObject): raise RuntimeError(f'{name}: Unexpected NotLoadedObject outside list.') # depends on [control=['if'], data=[]] elif isinstance(value_list, NotLoadedList): value_list = value_list.load(database) # depends on [control=['if'], data=[]] elif any((isinstance(v, NotLoadedList) for v in value_list)): raise RuntimeError(f'{name}: Unexpected NotLoadedList in list.') # depends on [control=['if'], data=[]] elif any((isinstance(v, NotLoadedObject) for v in value_list)): value_list = [preload_item(value) for value in value_list] # depends on [control=['if'], data=[]] else: value_list = None if value_list is not None: changes[name] = value_list # depends on [control=['if'], data=['value_list']] # depends on [control=['for'], data=['name']] return python_data.merge(changes)
def make_permalink(img_id): """ Removes tmp prefix from filename and rename main and variant files. Returns img_id without tmp prefix. """ profile, filename = img_id.split(':', 1) new_img_id = profile + ':' + remove_tmp_prefix_from_filename(filename) urls = get_files_by_img_id(img_id) if urls is None: return urls move_list = {(urls['main'], remove_tmp_prefix_from_file_path(urls['main']))} for var_label, var_file_path in urls['variants'].iteritems(): move_list.add((var_file_path, remove_tmp_prefix_from_file_path(var_file_path))) for file_path_from, file_path_to in move_list: os.rename(media_path(file_path_from), media_path(file_path_to)) return new_img_id
def function[make_permalink, parameter[img_id]]: constant[ Removes tmp prefix from filename and rename main and variant files. Returns img_id without tmp prefix. ] <ast.Tuple object at 0x7da1b28bf8e0> assign[=] call[name[img_id].split, parameter[constant[:], constant[1]]] variable[new_img_id] assign[=] binary_operation[binary_operation[name[profile] + constant[:]] + call[name[remove_tmp_prefix_from_filename], parameter[name[filename]]]] variable[urls] assign[=] call[name[get_files_by_img_id], parameter[name[img_id]]] if compare[name[urls] is constant[None]] begin[:] return[name[urls]] variable[move_list] assign[=] <ast.Set object at 0x7da1b28aef50> for taget[tuple[[<ast.Name object at 0x7da1b28df490>, <ast.Name object at 0x7da1b28dd3f0>]]] in starred[call[call[name[urls]][constant[variants]].iteritems, parameter[]]] begin[:] call[name[move_list].add, parameter[tuple[[<ast.Name object at 0x7da1b28dea70>, <ast.Call object at 0x7da1b28dc910>]]]] for taget[tuple[[<ast.Name object at 0x7da1b28de470>, <ast.Name object at 0x7da1b28dd060>]]] in starred[name[move_list]] begin[:] call[name[os].rename, parameter[call[name[media_path], parameter[name[file_path_from]]], call[name[media_path], parameter[name[file_path_to]]]]] return[name[new_img_id]]
keyword[def] identifier[make_permalink] ( identifier[img_id] ): literal[string] identifier[profile] , identifier[filename] = identifier[img_id] . identifier[split] ( literal[string] , literal[int] ) identifier[new_img_id] = identifier[profile] + literal[string] + identifier[remove_tmp_prefix_from_filename] ( identifier[filename] ) identifier[urls] = identifier[get_files_by_img_id] ( identifier[img_id] ) keyword[if] identifier[urls] keyword[is] keyword[None] : keyword[return] identifier[urls] identifier[move_list] ={( identifier[urls] [ literal[string] ], identifier[remove_tmp_prefix_from_file_path] ( identifier[urls] [ literal[string] ]))} keyword[for] identifier[var_label] , identifier[var_file_path] keyword[in] identifier[urls] [ literal[string] ]. identifier[iteritems] (): identifier[move_list] . identifier[add] (( identifier[var_file_path] , identifier[remove_tmp_prefix_from_file_path] ( identifier[var_file_path] ))) keyword[for] identifier[file_path_from] , identifier[file_path_to] keyword[in] identifier[move_list] : identifier[os] . identifier[rename] ( identifier[media_path] ( identifier[file_path_from] ), identifier[media_path] ( identifier[file_path_to] )) keyword[return] identifier[new_img_id]
def make_permalink(img_id): """ Removes tmp prefix from filename and rename main and variant files. Returns img_id without tmp prefix. """ (profile, filename) = img_id.split(':', 1) new_img_id = profile + ':' + remove_tmp_prefix_from_filename(filename) urls = get_files_by_img_id(img_id) if urls is None: return urls # depends on [control=['if'], data=['urls']] move_list = {(urls['main'], remove_tmp_prefix_from_file_path(urls['main']))} for (var_label, var_file_path) in urls['variants'].iteritems(): move_list.add((var_file_path, remove_tmp_prefix_from_file_path(var_file_path))) # depends on [control=['for'], data=[]] for (file_path_from, file_path_to) in move_list: os.rename(media_path(file_path_from), media_path(file_path_to)) # depends on [control=['for'], data=[]] return new_img_id
def detectSonyMylo(self): """Return detection of a Sony Mylo device Detects if the current browser is a Sony Mylo device. """ return UAgentInfo.manuSony in self.__userAgent \ and (UAgentInfo.qtembedded in self.__userAgent or UAgentInfo.mylocom2 in self.__userAgent)
def function[detectSonyMylo, parameter[self]]: constant[Return detection of a Sony Mylo device Detects if the current browser is a Sony Mylo device. ] return[<ast.BoolOp object at 0x7da1b0aa6f20>]
keyword[def] identifier[detectSonyMylo] ( identifier[self] ): literal[string] keyword[return] identifier[UAgentInfo] . identifier[manuSony] keyword[in] identifier[self] . identifier[__userAgent] keyword[and] ( identifier[UAgentInfo] . identifier[qtembedded] keyword[in] identifier[self] . identifier[__userAgent] keyword[or] identifier[UAgentInfo] . identifier[mylocom2] keyword[in] identifier[self] . identifier[__userAgent] )
def detectSonyMylo(self): """Return detection of a Sony Mylo device Detects if the current browser is a Sony Mylo device. """ return UAgentInfo.manuSony in self.__userAgent and (UAgentInfo.qtembedded in self.__userAgent or UAgentInfo.mylocom2 in self.__userAgent)
def get_content(self): """Open content as a stream for reading. See DAVResource.get_content() """ filestream = compat.StringIO() tableName, primKey = self.provider._split_path(self.path) if primKey is not None: conn = self.provider._init_connection() listFields = self.provider._get_field_list(conn, tableName) csvwriter = csv.DictWriter(filestream, listFields, extrasaction="ignore") dictFields = {} for field_name in listFields: dictFields[field_name] = field_name csvwriter.writerow(dictFields) if primKey == "_ENTIRE_CONTENTS": cursor = conn.cursor(MySQLdb.cursors.DictCursor) cursor.execute("SELECT * from " + self.provider._db + "." + tableName) result_set = cursor.fetchall() for row in result_set: csvwriter.writerow(row) cursor.close() else: row = self.provider._get_record_by_primary_key(conn, tableName, primKey) if row is not None: csvwriter.writerow(row) conn.close() # this suffices for small dbs, but # for a production big database, I imagine you would have a FileMixin that # does the retrieving and population even as the file object is being read filestream.seek(0) return filestream
def function[get_content, parameter[self]]: constant[Open content as a stream for reading. See DAVResource.get_content() ] variable[filestream] assign[=] call[name[compat].StringIO, parameter[]] <ast.Tuple object at 0x7da1b01fb940> assign[=] call[name[self].provider._split_path, parameter[name[self].path]] if compare[name[primKey] is_not constant[None]] begin[:] variable[conn] assign[=] call[name[self].provider._init_connection, parameter[]] variable[listFields] assign[=] call[name[self].provider._get_field_list, parameter[name[conn], name[tableName]]] variable[csvwriter] assign[=] call[name[csv].DictWriter, parameter[name[filestream], name[listFields]]] variable[dictFields] assign[=] dictionary[[], []] for taget[name[field_name]] in starred[name[listFields]] begin[:] call[name[dictFields]][name[field_name]] assign[=] name[field_name] call[name[csvwriter].writerow, parameter[name[dictFields]]] if compare[name[primKey] equal[==] constant[_ENTIRE_CONTENTS]] begin[:] variable[cursor] assign[=] call[name[conn].cursor, parameter[name[MySQLdb].cursors.DictCursor]] call[name[cursor].execute, parameter[binary_operation[binary_operation[binary_operation[constant[SELECT * from ] + name[self].provider._db] + constant[.]] + name[tableName]]]] variable[result_set] assign[=] call[name[cursor].fetchall, parameter[]] for taget[name[row]] in starred[name[result_set]] begin[:] call[name[csvwriter].writerow, parameter[name[row]]] call[name[cursor].close, parameter[]] call[name[conn].close, parameter[]] call[name[filestream].seek, parameter[constant[0]]] return[name[filestream]]
keyword[def] identifier[get_content] ( identifier[self] ): literal[string] identifier[filestream] = identifier[compat] . identifier[StringIO] () identifier[tableName] , identifier[primKey] = identifier[self] . identifier[provider] . identifier[_split_path] ( identifier[self] . identifier[path] ) keyword[if] identifier[primKey] keyword[is] keyword[not] keyword[None] : identifier[conn] = identifier[self] . identifier[provider] . identifier[_init_connection] () identifier[listFields] = identifier[self] . identifier[provider] . identifier[_get_field_list] ( identifier[conn] , identifier[tableName] ) identifier[csvwriter] = identifier[csv] . identifier[DictWriter] ( identifier[filestream] , identifier[listFields] , identifier[extrasaction] = literal[string] ) identifier[dictFields] ={} keyword[for] identifier[field_name] keyword[in] identifier[listFields] : identifier[dictFields] [ identifier[field_name] ]= identifier[field_name] identifier[csvwriter] . identifier[writerow] ( identifier[dictFields] ) keyword[if] identifier[primKey] == literal[string] : identifier[cursor] = identifier[conn] . identifier[cursor] ( identifier[MySQLdb] . identifier[cursors] . identifier[DictCursor] ) identifier[cursor] . identifier[execute] ( literal[string] + identifier[self] . identifier[provider] . identifier[_db] + literal[string] + identifier[tableName] ) identifier[result_set] = identifier[cursor] . identifier[fetchall] () keyword[for] identifier[row] keyword[in] identifier[result_set] : identifier[csvwriter] . identifier[writerow] ( identifier[row] ) identifier[cursor] . identifier[close] () keyword[else] : identifier[row] = identifier[self] . identifier[provider] . identifier[_get_record_by_primary_key] ( identifier[conn] , identifier[tableName] , identifier[primKey] ) keyword[if] identifier[row] keyword[is] keyword[not] keyword[None] : identifier[csvwriter] . identifier[writerow] ( identifier[row] ) identifier[conn] . identifier[close] () identifier[filestream] . identifier[seek] ( literal[int] ) keyword[return] identifier[filestream]
def get_content(self): """Open content as a stream for reading. See DAVResource.get_content() """ filestream = compat.StringIO() (tableName, primKey) = self.provider._split_path(self.path) if primKey is not None: conn = self.provider._init_connection() listFields = self.provider._get_field_list(conn, tableName) csvwriter = csv.DictWriter(filestream, listFields, extrasaction='ignore') dictFields = {} for field_name in listFields: dictFields[field_name] = field_name # depends on [control=['for'], data=['field_name']] csvwriter.writerow(dictFields) if primKey == '_ENTIRE_CONTENTS': cursor = conn.cursor(MySQLdb.cursors.DictCursor) cursor.execute('SELECT * from ' + self.provider._db + '.' + tableName) result_set = cursor.fetchall() for row in result_set: csvwriter.writerow(row) # depends on [control=['for'], data=['row']] cursor.close() # depends on [control=['if'], data=[]] else: row = self.provider._get_record_by_primary_key(conn, tableName, primKey) if row is not None: csvwriter.writerow(row) # depends on [control=['if'], data=['row']] conn.close() # depends on [control=['if'], data=['primKey']] # this suffices for small dbs, but # for a production big database, I imagine you would have a FileMixin that # does the retrieving and population even as the file object is being read filestream.seek(0) return filestream
def notify3_d_event(self, type_p, data): """Notifies framebuffer about 3D backend event. in type_p of type int event type. Currently only VBOX3D_NOTIFY_EVENT_TYPE_VISIBLE_3DDATA is supported. in data of type str event-specific data, depends on the supplied event type """ if not isinstance(type_p, baseinteger): raise TypeError("type_p can only be an instance of type baseinteger") if not isinstance(data, list): raise TypeError("data can only be an instance of type list") for a in data[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") self._call("notify3DEvent", in_p=[type_p, data])
def function[notify3_d_event, parameter[self, type_p, data]]: constant[Notifies framebuffer about 3D backend event. in type_p of type int event type. Currently only VBOX3D_NOTIFY_EVENT_TYPE_VISIBLE_3DDATA is supported. in data of type str event-specific data, depends on the supplied event type ] if <ast.UnaryOp object at 0x7da204344340> begin[:] <ast.Raise object at 0x7da204345420> if <ast.UnaryOp object at 0x7da2043476a0> begin[:] <ast.Raise object at 0x7da204347e50> for taget[name[a]] in starred[call[name[data]][<ast.Slice object at 0x7da204345b70>]] begin[:] if <ast.UnaryOp object at 0x7da204344a30> begin[:] <ast.Raise object at 0x7da204345cc0> call[name[self]._call, parameter[constant[notify3DEvent]]]
keyword[def] identifier[notify3_d_event] ( identifier[self] , identifier[type_p] , identifier[data] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[type_p] , identifier[baseinteger] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[list] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[for] identifier[a] keyword[in] identifier[data] [: literal[int] ]: keyword[if] keyword[not] identifier[isinstance] ( identifier[a] , identifier[basestring] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[self] . identifier[_call] ( literal[string] , identifier[in_p] =[ identifier[type_p] , identifier[data] ])
def notify3_d_event(self, type_p, data): """Notifies framebuffer about 3D backend event. in type_p of type int event type. Currently only VBOX3D_NOTIFY_EVENT_TYPE_VISIBLE_3DDATA is supported. in data of type str event-specific data, depends on the supplied event type """ if not isinstance(type_p, baseinteger): raise TypeError('type_p can only be an instance of type baseinteger') # depends on [control=['if'], data=[]] if not isinstance(data, list): raise TypeError('data can only be an instance of type list') # depends on [control=['if'], data=[]] for a in data[:10]: if not isinstance(a, basestring): raise TypeError('array can only contain objects of type basestring') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']] self._call('notify3DEvent', in_p=[type_p, data])
def server(self): """ UDP server to listen for responses. """ server = getattr(self, "_server", None) if server is None: log.debug("Binding datagram server to %s", self.bind) server = DatagramServer(self.bind, self._response_received) self._server = server return server
def function[server, parameter[self]]: constant[ UDP server to listen for responses. ] variable[server] assign[=] call[name[getattr], parameter[name[self], constant[_server], constant[None]]] if compare[name[server] is constant[None]] begin[:] call[name[log].debug, parameter[constant[Binding datagram server to %s], name[self].bind]] variable[server] assign[=] call[name[DatagramServer], parameter[name[self].bind, name[self]._response_received]] name[self]._server assign[=] name[server] return[name[server]]
keyword[def] identifier[server] ( identifier[self] ): literal[string] identifier[server] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ) keyword[if] identifier[server] keyword[is] keyword[None] : identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[bind] ) identifier[server] = identifier[DatagramServer] ( identifier[self] . identifier[bind] , identifier[self] . identifier[_response_received] ) identifier[self] . identifier[_server] = identifier[server] keyword[return] identifier[server]
def server(self): """ UDP server to listen for responses. """ server = getattr(self, '_server', None) if server is None: log.debug('Binding datagram server to %s', self.bind) server = DatagramServer(self.bind, self._response_received) self._server = server # depends on [control=['if'], data=['server']] return server
def inherit_doc(cls): """ A decorator that makes a class inherit documentation from its parents. """ for name, func in vars(cls).items(): # only inherit docstring for public functions if name.startswith("_"): continue if not func.__doc__: for parent in cls.__bases__: parent_func = getattr(parent, name, None) if parent_func and getattr(parent_func, "__doc__", None): func.__doc__ = parent_func.__doc__ break return cls
def function[inherit_doc, parameter[cls]]: constant[ A decorator that makes a class inherit documentation from its parents. ] for taget[tuple[[<ast.Name object at 0x7da204961390>, <ast.Name object at 0x7da204961c30>]]] in starred[call[call[name[vars], parameter[name[cls]]].items, parameter[]]] begin[:] if call[name[name].startswith, parameter[constant[_]]] begin[:] continue if <ast.UnaryOp object at 0x7da204960d90> begin[:] for taget[name[parent]] in starred[name[cls].__bases__] begin[:] variable[parent_func] assign[=] call[name[getattr], parameter[name[parent], name[name], constant[None]]] if <ast.BoolOp object at 0x7da204961a50> begin[:] name[func].__doc__ assign[=] name[parent_func].__doc__ break return[name[cls]]
keyword[def] identifier[inherit_doc] ( identifier[cls] ): literal[string] keyword[for] identifier[name] , identifier[func] keyword[in] identifier[vars] ( identifier[cls] ). identifier[items] (): keyword[if] identifier[name] . identifier[startswith] ( literal[string] ): keyword[continue] keyword[if] keyword[not] identifier[func] . identifier[__doc__] : keyword[for] identifier[parent] keyword[in] identifier[cls] . identifier[__bases__] : identifier[parent_func] = identifier[getattr] ( identifier[parent] , identifier[name] , keyword[None] ) keyword[if] identifier[parent_func] keyword[and] identifier[getattr] ( identifier[parent_func] , literal[string] , keyword[None] ): identifier[func] . identifier[__doc__] = identifier[parent_func] . identifier[__doc__] keyword[break] keyword[return] identifier[cls]
def inherit_doc(cls): """ A decorator that makes a class inherit documentation from its parents. """ for (name, func) in vars(cls).items(): # only inherit docstring for public functions if name.startswith('_'): continue # depends on [control=['if'], data=[]] if not func.__doc__: for parent in cls.__bases__: parent_func = getattr(parent, name, None) if parent_func and getattr(parent_func, '__doc__', None): func.__doc__ = parent_func.__doc__ break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['parent']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return cls
def kron_dot(A, B, C, out=None): r""" Kronecker product followed by dot product. Let :math:`\mathrm A`, :math:`\mathrm B`, and :math:`\mathrm C` be matrices of dimensions :math:`p\times p`, :math:`n\times d`, and :math:`d\times p`. It computes .. math:: \text{unvec}((\mathrm A\otimes\mathrm B)\text{vec}(\mathrm C)) \in n\times p, which is equivalent to :math:`\mathrm B\mathrm C\mathrm A^{\intercal}`. Parameters ---------- A : array_like Matrix A. B : array_like Matrix B. C : array_like Matrix C. out : :class:`numpy.ndarray`, optional Copy result to. Defaults to ``None``. Returns ------- :class:`numpy.ndarray` unvec((A ⊗ B) vec(C)) """ from numpy import dot, zeros, asarray A = asarray(A) B = asarray(B) C = asarray(C) if out is None: out = zeros((B.shape[0], A.shape[0])) dot(B, dot(C, A.T), out=out) return out
def function[kron_dot, parameter[A, B, C, out]]: constant[ Kronecker product followed by dot product. Let :math:`\mathrm A`, :math:`\mathrm B`, and :math:`\mathrm C` be matrices of dimensions :math:`p\times p`, :math:`n\times d`, and :math:`d\times p`. It computes .. math:: \text{unvec}((\mathrm A\otimes\mathrm B)\text{vec}(\mathrm C)) \in n\times p, which is equivalent to :math:`\mathrm B\mathrm C\mathrm A^{\intercal}`. Parameters ---------- A : array_like Matrix A. B : array_like Matrix B. C : array_like Matrix C. out : :class:`numpy.ndarray`, optional Copy result to. Defaults to ``None``. Returns ------- :class:`numpy.ndarray` unvec((A ⊗ B) vec(C)) ] from relative_module[numpy] import module[dot], module[zeros], module[asarray] variable[A] assign[=] call[name[asarray], parameter[name[A]]] variable[B] assign[=] call[name[asarray], parameter[name[B]]] variable[C] assign[=] call[name[asarray], parameter[name[C]]] if compare[name[out] is constant[None]] begin[:] variable[out] assign[=] call[name[zeros], parameter[tuple[[<ast.Subscript object at 0x7da1b1a74b50>, <ast.Subscript object at 0x7da1b1a77430>]]]] call[name[dot], parameter[name[B], call[name[dot], parameter[name[C], name[A].T]]]] return[name[out]]
keyword[def] identifier[kron_dot] ( identifier[A] , identifier[B] , identifier[C] , identifier[out] = keyword[None] ): literal[string] keyword[from] identifier[numpy] keyword[import] identifier[dot] , identifier[zeros] , identifier[asarray] identifier[A] = identifier[asarray] ( identifier[A] ) identifier[B] = identifier[asarray] ( identifier[B] ) identifier[C] = identifier[asarray] ( identifier[C] ) keyword[if] identifier[out] keyword[is] keyword[None] : identifier[out] = identifier[zeros] (( identifier[B] . identifier[shape] [ literal[int] ], identifier[A] . identifier[shape] [ literal[int] ])) identifier[dot] ( identifier[B] , identifier[dot] ( identifier[C] , identifier[A] . identifier[T] ), identifier[out] = identifier[out] ) keyword[return] identifier[out]
def kron_dot(A, B, C, out=None): """ Kronecker product followed by dot product. Let :math:`\\mathrm A`, :math:`\\mathrm B`, and :math:`\\mathrm C` be matrices of dimensions :math:`p\\times p`, :math:`n\\times d`, and :math:`d\\times p`. It computes .. math:: \\text{unvec}((\\mathrm A\\otimes\\mathrm B)\\text{vec}(\\mathrm C)) \\in n\\times p, which is equivalent to :math:`\\mathrm B\\mathrm C\\mathrm A^{\\intercal}`. Parameters ---------- A : array_like Matrix A. B : array_like Matrix B. C : array_like Matrix C. out : :class:`numpy.ndarray`, optional Copy result to. Defaults to ``None``. Returns ------- :class:`numpy.ndarray` unvec((A ⊗ B) vec(C)) """ from numpy import dot, zeros, asarray A = asarray(A) B = asarray(B) C = asarray(C) if out is None: out = zeros((B.shape[0], A.shape[0])) # depends on [control=['if'], data=['out']] dot(B, dot(C, A.T), out=out) return out
def nic_v1(msg, NICs): """Calculate NIC, navigation integrity category, for ADS-B version 1 Args: msg (string): 28 bytes hexadecimal message string NICs (int or string): NIC supplement Returns: int or string: Horizontal Radius of Containment int or string: Vertical Protection Limit """ if typecode(msg) < 5 or typecode(msg) > 22: raise RuntimeError( "%s: Not a surface position message (5<TC<8), \ airborne position message (8<TC<19), \ or airborne position with GNSS height (20<TC<22)" % msg ) tc = typecode(msg) NIC = uncertainty.TC_NICv1_lookup[tc] if isinstance(NIC, dict): NIC = NIC[NICs] try: Rc = uncertainty.NICv1[NIC][NICs]['Rc'] VPL = uncertainty.NICv1[NIC][NICs]['VPL'] except KeyError: Rc, VPL = uncertainty.NA, uncertainty.NA return Rc, VPL
def function[nic_v1, parameter[msg, NICs]]: constant[Calculate NIC, navigation integrity category, for ADS-B version 1 Args: msg (string): 28 bytes hexadecimal message string NICs (int or string): NIC supplement Returns: int or string: Horizontal Radius of Containment int or string: Vertical Protection Limit ] if <ast.BoolOp object at 0x7da1b16b08e0> begin[:] <ast.Raise object at 0x7da1b16b2320> variable[tc] assign[=] call[name[typecode], parameter[name[msg]]] variable[NIC] assign[=] call[name[uncertainty].TC_NICv1_lookup][name[tc]] if call[name[isinstance], parameter[name[NIC], name[dict]]] begin[:] variable[NIC] assign[=] call[name[NIC]][name[NICs]] <ast.Try object at 0x7da1b16b1540> return[tuple[[<ast.Name object at 0x7da1b16b04c0>, <ast.Name object at 0x7da1b16b23e0>]]]
keyword[def] identifier[nic_v1] ( identifier[msg] , identifier[NICs] ): literal[string] keyword[if] identifier[typecode] ( identifier[msg] )< literal[int] keyword[or] identifier[typecode] ( identifier[msg] )> literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[msg] ) identifier[tc] = identifier[typecode] ( identifier[msg] ) identifier[NIC] = identifier[uncertainty] . identifier[TC_NICv1_lookup] [ identifier[tc] ] keyword[if] identifier[isinstance] ( identifier[NIC] , identifier[dict] ): identifier[NIC] = identifier[NIC] [ identifier[NICs] ] keyword[try] : identifier[Rc] = identifier[uncertainty] . identifier[NICv1] [ identifier[NIC] ][ identifier[NICs] ][ literal[string] ] identifier[VPL] = identifier[uncertainty] . identifier[NICv1] [ identifier[NIC] ][ identifier[NICs] ][ literal[string] ] keyword[except] identifier[KeyError] : identifier[Rc] , identifier[VPL] = identifier[uncertainty] . identifier[NA] , identifier[uncertainty] . identifier[NA] keyword[return] identifier[Rc] , identifier[VPL]
def nic_v1(msg, NICs): """Calculate NIC, navigation integrity category, for ADS-B version 1 Args: msg (string): 28 bytes hexadecimal message string NICs (int or string): NIC supplement Returns: int or string: Horizontal Radius of Containment int or string: Vertical Protection Limit """ if typecode(msg) < 5 or typecode(msg) > 22: raise RuntimeError('%s: Not a surface position message (5<TC<8), airborne position message (8<TC<19), or airborne position with GNSS height (20<TC<22)' % msg) # depends on [control=['if'], data=[]] tc = typecode(msg) NIC = uncertainty.TC_NICv1_lookup[tc] if isinstance(NIC, dict): NIC = NIC[NICs] # depends on [control=['if'], data=[]] try: Rc = uncertainty.NICv1[NIC][NICs]['Rc'] VPL = uncertainty.NICv1[NIC][NICs]['VPL'] # depends on [control=['try'], data=[]] except KeyError: (Rc, VPL) = (uncertainty.NA, uncertainty.NA) # depends on [control=['except'], data=[]] return (Rc, VPL)
def set_property_value(self, name, value, dry_run=False): """Set or remove property value. See DAVResource.set_property_value() """ raise DAVError( HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty )
def function[set_property_value, parameter[self, name, value, dry_run]]: constant[Set or remove property value. See DAVResource.set_property_value() ] <ast.Raise object at 0x7da1b01f80d0>
keyword[def] identifier[set_property_value] ( identifier[self] , identifier[name] , identifier[value] , identifier[dry_run] = keyword[False] ): literal[string] keyword[raise] identifier[DAVError] ( identifier[HTTP_FORBIDDEN] , identifier[err_condition] = identifier[PRECONDITION_CODE_ProtectedProperty] )
def set_property_value(self, name, value, dry_run=False): """Set or remove property value. See DAVResource.set_property_value() """ raise DAVError(HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty)
def _member_defs(self): """ A single string containing the aggregated member definitions section of the documentation page """ members = self._clsdict['__members__'] member_defs = [ self._member_def(member) for member in members if member.name is not None ] return '\n'.join(member_defs)
def function[_member_defs, parameter[self]]: constant[ A single string containing the aggregated member definitions section of the documentation page ] variable[members] assign[=] call[name[self]._clsdict][constant[__members__]] variable[member_defs] assign[=] <ast.ListComp object at 0x7da1b1cb91b0> return[call[constant[ ].join, parameter[name[member_defs]]]]
keyword[def] identifier[_member_defs] ( identifier[self] ): literal[string] identifier[members] = identifier[self] . identifier[_clsdict] [ literal[string] ] identifier[member_defs] =[ identifier[self] . identifier[_member_def] ( identifier[member] ) keyword[for] identifier[member] keyword[in] identifier[members] keyword[if] identifier[member] . identifier[name] keyword[is] keyword[not] keyword[None] ] keyword[return] literal[string] . identifier[join] ( identifier[member_defs] )
def _member_defs(self): """ A single string containing the aggregated member definitions section of the documentation page """ members = self._clsdict['__members__'] member_defs = [self._member_def(member) for member in members if member.name is not None] return '\n'.join(member_defs)
def NoExclusions(self): """Determine that there are no exclusion criterion in play :return: True if there is no real boundary specification of any kind. Simple method allowing parsers to short circuit the determination of missingness, which can be moderately compute intensive. """ if len(self.start_bounds) + len(self.target_rs) + len(self.ignored_rs) == 0: return BoundaryCheck.chrom == -1 return False
def function[NoExclusions, parameter[self]]: constant[Determine that there are no exclusion criterion in play :return: True if there is no real boundary specification of any kind. Simple method allowing parsers to short circuit the determination of missingness, which can be moderately compute intensive. ] if compare[binary_operation[binary_operation[call[name[len], parameter[name[self].start_bounds]] + call[name[len], parameter[name[self].target_rs]]] + call[name[len], parameter[name[self].ignored_rs]]] equal[==] constant[0]] begin[:] return[compare[name[BoundaryCheck].chrom equal[==] <ast.UnaryOp object at 0x7da1b143fa60>]] return[constant[False]]
keyword[def] identifier[NoExclusions] ( identifier[self] ): literal[string] keyword[if] identifier[len] ( identifier[self] . identifier[start_bounds] )+ identifier[len] ( identifier[self] . identifier[target_rs] )+ identifier[len] ( identifier[self] . identifier[ignored_rs] )== literal[int] : keyword[return] identifier[BoundaryCheck] . identifier[chrom] ==- literal[int] keyword[return] keyword[False]
def NoExclusions(self): """Determine that there are no exclusion criterion in play :return: True if there is no real boundary specification of any kind. Simple method allowing parsers to short circuit the determination of missingness, which can be moderately compute intensive. """ if len(self.start_bounds) + len(self.target_rs) + len(self.ignored_rs) == 0: return BoundaryCheck.chrom == -1 # depends on [control=['if'], data=[]] return False
def to_json_basic(self): """ Create JSON structure with generic attributes :return: dict """ return {'name': self.__class__.__name__, 'priority': self.priority, 'address': self.address, 'rtr': self.rtr}
def function[to_json_basic, parameter[self]]: constant[ Create JSON structure with generic attributes :return: dict ] return[dictionary[[<ast.Constant object at 0x7da207f03e80>, <ast.Constant object at 0x7da207f02ad0>, <ast.Constant object at 0x7da207f03f10>, <ast.Constant object at 0x7da207f011b0>], [<ast.Attribute object at 0x7da207f02e30>, <ast.Attribute object at 0x7da207f03370>, <ast.Attribute object at 0x7da207f033d0>, <ast.Attribute object at 0x7da207f02b30>]]]
keyword[def] identifier[to_json_basic] ( identifier[self] ): literal[string] keyword[return] { literal[string] : identifier[self] . identifier[__class__] . identifier[__name__] , literal[string] : identifier[self] . identifier[priority] , literal[string] : identifier[self] . identifier[address] , literal[string] : identifier[self] . identifier[rtr] }
def to_json_basic(self): """ Create JSON structure with generic attributes :return: dict """ return {'name': self.__class__.__name__, 'priority': self.priority, 'address': self.address, 'rtr': self.rtr}
def experiments_list(self, api_url=None, offset=0, limit=-1, properties=None): """Get list of experiment resources from a SCO-API. Parameters ---------- api_url : string, optional Base Url of the SCO-API. Uses default API if argument not present. offset : int, optional Starting offset for returned list items limit : int, optional Limit the number of items in the result properties : List(string) List of additional object properties to be included for items in the result Returns ------- List(scoserv.ResourceHandle) List of resource handles (one per image group in the listing) """ # Get subject listing Url for given SCO-API and return the retrieved # resource listing return sco.get_resource_listing( self.get_api_references(api_url)[sco.REF_EXPERIMENTS_LISTING], offset, limit, properties )
def function[experiments_list, parameter[self, api_url, offset, limit, properties]]: constant[Get list of experiment resources from a SCO-API. Parameters ---------- api_url : string, optional Base Url of the SCO-API. Uses default API if argument not present. offset : int, optional Starting offset for returned list items limit : int, optional Limit the number of items in the result properties : List(string) List of additional object properties to be included for items in the result Returns ------- List(scoserv.ResourceHandle) List of resource handles (one per image group in the listing) ] return[call[name[sco].get_resource_listing, parameter[call[call[name[self].get_api_references, parameter[name[api_url]]]][name[sco].REF_EXPERIMENTS_LISTING], name[offset], name[limit], name[properties]]]]
keyword[def] identifier[experiments_list] ( identifier[self] , identifier[api_url] = keyword[None] , identifier[offset] = literal[int] , identifier[limit] =- literal[int] , identifier[properties] = keyword[None] ): literal[string] keyword[return] identifier[sco] . identifier[get_resource_listing] ( identifier[self] . identifier[get_api_references] ( identifier[api_url] )[ identifier[sco] . identifier[REF_EXPERIMENTS_LISTING] ], identifier[offset] , identifier[limit] , identifier[properties] )
def experiments_list(self, api_url=None, offset=0, limit=-1, properties=None): """Get list of experiment resources from a SCO-API. Parameters ---------- api_url : string, optional Base Url of the SCO-API. Uses default API if argument not present. offset : int, optional Starting offset for returned list items limit : int, optional Limit the number of items in the result properties : List(string) List of additional object properties to be included for items in the result Returns ------- List(scoserv.ResourceHandle) List of resource handles (one per image group in the listing) """ # Get subject listing Url for given SCO-API and return the retrieved # resource listing return sco.get_resource_listing(self.get_api_references(api_url)[sco.REF_EXPERIMENTS_LISTING], offset, limit, properties)
def set_substitution(self, what, rep): """Set a substitution. Equivalent to ``! sub`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution. """ if rep is None: # Unset the variable. if what in self._subs: del self._subs[what] self._subs[what] = rep
def function[set_substitution, parameter[self, what, rep]]: constant[Set a substitution. Equivalent to ``! sub`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution. ] if compare[name[rep] is constant[None]] begin[:] if compare[name[what] in name[self]._subs] begin[:] <ast.Delete object at 0x7da20c6c5330> call[name[self]._subs][name[what]] assign[=] name[rep]
keyword[def] identifier[set_substitution] ( identifier[self] , identifier[what] , identifier[rep] ): literal[string] keyword[if] identifier[rep] keyword[is] keyword[None] : keyword[if] identifier[what] keyword[in] identifier[self] . identifier[_subs] : keyword[del] identifier[self] . identifier[_subs] [ identifier[what] ] identifier[self] . identifier[_subs] [ identifier[what] ]= identifier[rep]
def set_substitution(self, what, rep): """Set a substitution. Equivalent to ``! sub`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution. """ if rep is None: # Unset the variable. if what in self._subs: del self._subs[what] # depends on [control=['if'], data=['what']] # depends on [control=['if'], data=[]] self._subs[what] = rep
def insert_ordered(value, array): """ This will insert the value into the array, keeping it sorted, and returning the index where it was inserted """ index = 0 # search for the last array item that value is larger than for n in range(0,len(array)): if value >= array[n]: index = n+1 array.insert(index, value) return index
def function[insert_ordered, parameter[value, array]]: constant[ This will insert the value into the array, keeping it sorted, and returning the index where it was inserted ] variable[index] assign[=] constant[0] for taget[name[n]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[array]]]]]] begin[:] if compare[name[value] greater_or_equal[>=] call[name[array]][name[n]]] begin[:] variable[index] assign[=] binary_operation[name[n] + constant[1]] call[name[array].insert, parameter[name[index], name[value]]] return[name[index]]
keyword[def] identifier[insert_ordered] ( identifier[value] , identifier[array] ): literal[string] identifier[index] = literal[int] keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[array] )): keyword[if] identifier[value] >= identifier[array] [ identifier[n] ]: identifier[index] = identifier[n] + literal[int] identifier[array] . identifier[insert] ( identifier[index] , identifier[value] ) keyword[return] identifier[index]
def insert_ordered(value, array): """ This will insert the value into the array, keeping it sorted, and returning the index where it was inserted """ index = 0 # search for the last array item that value is larger than for n in range(0, len(array)): if value >= array[n]: index = n + 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']] array.insert(index, value) return index
def select(self, criterion, keepboth=False): """Filter current file collections, create another file collections contains all winfile with criterion=True. How to construct your own criterion function, see :meth:`FileCollection.from_path_by_criterion`. :param criterion: customize filter function :type criterion: function :param keepboth: if True, returns two file collections, one is files with criterion=True, another is False. :type keepboth: boolean **中文文档** 在当前的文件集合中, 根据criterion中的规则, 选择需要的生成 FileCollection。当keepboth参数=True时, 返回两个FileCollection, 一个 是符合条件的文件集合, 一个是不符合条件的。 """ if keepboth: fcs_yes, fcs_no = FileCollection(), FileCollection() for winfile in self.files.values(): if criterion(winfile): fcs_yes.files[winfile.abspath] = winfile else: fcs_no.files[winfile.abspath] = winfile return fcs_yes, fcs_no else: fcs = FileCollection() for winfile in self.files.values(): if criterion(winfile): fcs.files[winfile.abspath] = winfile return fcs
def function[select, parameter[self, criterion, keepboth]]: constant[Filter current file collections, create another file collections contains all winfile with criterion=True. How to construct your own criterion function, see :meth:`FileCollection.from_path_by_criterion`. :param criterion: customize filter function :type criterion: function :param keepboth: if True, returns two file collections, one is files with criterion=True, another is False. :type keepboth: boolean **中文文档** 在当前的文件集合中, 根据criterion中的规则, 选择需要的生成 FileCollection。当keepboth参数=True时, 返回两个FileCollection, 一个 是符合条件的文件集合, 一个是不符合条件的。 ] if name[keepboth] begin[:] <ast.Tuple object at 0x7da204963ca0> assign[=] tuple[[<ast.Call object at 0x7da20c6a9240>, <ast.Call object at 0x7da20c6a9960>]] for taget[name[winfile]] in starred[call[name[self].files.values, parameter[]]] begin[:] if call[name[criterion], parameter[name[winfile]]] begin[:] call[name[fcs_yes].files][name[winfile].abspath] assign[=] name[winfile] return[tuple[[<ast.Name object at 0x7da20c6aabf0>, <ast.Name object at 0x7da20c6a9d50>]]]
keyword[def] identifier[select] ( identifier[self] , identifier[criterion] , identifier[keepboth] = keyword[False] ): literal[string] keyword[if] identifier[keepboth] : identifier[fcs_yes] , identifier[fcs_no] = identifier[FileCollection] (), identifier[FileCollection] () keyword[for] identifier[winfile] keyword[in] identifier[self] . identifier[files] . identifier[values] (): keyword[if] identifier[criterion] ( identifier[winfile] ): identifier[fcs_yes] . identifier[files] [ identifier[winfile] . identifier[abspath] ]= identifier[winfile] keyword[else] : identifier[fcs_no] . identifier[files] [ identifier[winfile] . identifier[abspath] ]= identifier[winfile] keyword[return] identifier[fcs_yes] , identifier[fcs_no] keyword[else] : identifier[fcs] = identifier[FileCollection] () keyword[for] identifier[winfile] keyword[in] identifier[self] . identifier[files] . identifier[values] (): keyword[if] identifier[criterion] ( identifier[winfile] ): identifier[fcs] . identifier[files] [ identifier[winfile] . identifier[abspath] ]= identifier[winfile] keyword[return] identifier[fcs]
def select(self, criterion, keepboth=False): """Filter current file collections, create another file collections contains all winfile with criterion=True. How to construct your own criterion function, see :meth:`FileCollection.from_path_by_criterion`. :param criterion: customize filter function :type criterion: function :param keepboth: if True, returns two file collections, one is files with criterion=True, another is False. :type keepboth: boolean **中文文档** 在当前的文件集合中, 根据criterion中的规则, 选择需要的生成 FileCollection。当keepboth参数=True时, 返回两个FileCollection, 一个 是符合条件的文件集合, 一个是不符合条件的。 """ if keepboth: (fcs_yes, fcs_no) = (FileCollection(), FileCollection()) for winfile in self.files.values(): if criterion(winfile): fcs_yes.files[winfile.abspath] = winfile # depends on [control=['if'], data=[]] else: fcs_no.files[winfile.abspath] = winfile # depends on [control=['for'], data=['winfile']] return (fcs_yes, fcs_no) # depends on [control=['if'], data=[]] else: fcs = FileCollection() for winfile in self.files.values(): if criterion(winfile): fcs.files[winfile.abspath] = winfile # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['winfile']] return fcs
def run_to_abs_pos(self, **kwargs): """ Run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`. """ for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN_TO_ABS_POS
def function[run_to_abs_pos, parameter[self]]: constant[ Run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`. ] for taget[name[key]] in starred[name[kwargs]] begin[:] call[name[setattr], parameter[name[self], name[key], call[name[kwargs]][name[key]]]] name[self].command assign[=] name[self].COMMAND_RUN_TO_ABS_POS
keyword[def] identifier[run_to_abs_pos] ( identifier[self] ,** identifier[kwargs] ): literal[string] keyword[for] identifier[key] keyword[in] identifier[kwargs] : identifier[setattr] ( identifier[self] , identifier[key] , identifier[kwargs] [ identifier[key] ]) identifier[self] . identifier[command] = identifier[self] . identifier[COMMAND_RUN_TO_ABS_POS]
def run_to_abs_pos(self, **kwargs): """ Run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`. """ for key in kwargs: setattr(self, key, kwargs[key]) # depends on [control=['for'], data=['key']] self.command = self.COMMAND_RUN_TO_ABS_POS
def delete(self): """Deletes matching objects from the Repository Does not throw error if no objects are matched. Returns the number of objects matched (which may not be equal to the number of objects deleted if objects rows already have the new value). """ # Fetch Model class and connected repository from Repository Factory deleted_item_count = 0 try: items = self.all() for item in items: item.delete() deleted_item_count += 1 except Exception: # FIXME Log Exception raise return deleted_item_count
def function[delete, parameter[self]]: constant[Deletes matching objects from the Repository Does not throw error if no objects are matched. Returns the number of objects matched (which may not be equal to the number of objects deleted if objects rows already have the new value). ] variable[deleted_item_count] assign[=] constant[0] <ast.Try object at 0x7da18fe93e20> return[name[deleted_item_count]]
keyword[def] identifier[delete] ( identifier[self] ): literal[string] identifier[deleted_item_count] = literal[int] keyword[try] : identifier[items] = identifier[self] . identifier[all] () keyword[for] identifier[item] keyword[in] identifier[items] : identifier[item] . identifier[delete] () identifier[deleted_item_count] += literal[int] keyword[except] identifier[Exception] : keyword[raise] keyword[return] identifier[deleted_item_count]
def delete(self): """Deletes matching objects from the Repository Does not throw error if no objects are matched. Returns the number of objects matched (which may not be equal to the number of objects deleted if objects rows already have the new value). """ # Fetch Model class and connected repository from Repository Factory deleted_item_count = 0 try: items = self.all() for item in items: item.delete() deleted_item_count += 1 # depends on [control=['for'], data=['item']] # depends on [control=['try'], data=[]] except Exception: # FIXME Log Exception raise # depends on [control=['except'], data=[]] return deleted_item_count
def build_srcdict(gta, prop): """Build a dictionary that maps from source name to the value of a source property Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object prop : str The name of the property we are mapping Returns ------- odict : dict Dictionary that maps from source name to the value of the specified property """ o = {} for s in gta.roi.sources: o[s.name] = s[prop] return o
def function[build_srcdict, parameter[gta, prop]]: constant[Build a dictionary that maps from source name to the value of a source property Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object prop : str The name of the property we are mapping Returns ------- odict : dict Dictionary that maps from source name to the value of the specified property ] variable[o] assign[=] dictionary[[], []] for taget[name[s]] in starred[name[gta].roi.sources] begin[:] call[name[o]][name[s].name] assign[=] call[name[s]][name[prop]] return[name[o]]
keyword[def] identifier[build_srcdict] ( identifier[gta] , identifier[prop] ): literal[string] identifier[o] ={} keyword[for] identifier[s] keyword[in] identifier[gta] . identifier[roi] . identifier[sources] : identifier[o] [ identifier[s] . identifier[name] ]= identifier[s] [ identifier[prop] ] keyword[return] identifier[o]
def build_srcdict(gta, prop): """Build a dictionary that maps from source name to the value of a source property Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object prop : str The name of the property we are mapping Returns ------- odict : dict Dictionary that maps from source name to the value of the specified property """ o = {} for s in gta.roi.sources: o[s.name] = s[prop] # depends on [control=['for'], data=['s']] return o
def send_dynamic_message(sender, message): """Send a dynamic message to the listeners. Dynamic messages represents a progress. Usually it will be appended to the previous messages. .. versionadded:: 3.3 :param sender: The sender. :type sender: object :param message: An instance of our rich message class. :type message: safe.messaging.Message """ dispatcher.send( signal=DYNAMIC_MESSAGE_SIGNAL, sender=sender, message=message)
def function[send_dynamic_message, parameter[sender, message]]: constant[Send a dynamic message to the listeners. Dynamic messages represents a progress. Usually it will be appended to the previous messages. .. versionadded:: 3.3 :param sender: The sender. :type sender: object :param message: An instance of our rich message class. :type message: safe.messaging.Message ] call[name[dispatcher].send, parameter[]]
keyword[def] identifier[send_dynamic_message] ( identifier[sender] , identifier[message] ): literal[string] identifier[dispatcher] . identifier[send] ( identifier[signal] = identifier[DYNAMIC_MESSAGE_SIGNAL] , identifier[sender] = identifier[sender] , identifier[message] = identifier[message] )
def send_dynamic_message(sender, message): """Send a dynamic message to the listeners. Dynamic messages represents a progress. Usually it will be appended to the previous messages. .. versionadded:: 3.3 :param sender: The sender. :type sender: object :param message: An instance of our rich message class. :type message: safe.messaging.Message """ dispatcher.send(signal=DYNAMIC_MESSAGE_SIGNAL, sender=sender, message=message)
def _constant_probability_density(self, X): """Probability density for the degenerate case of constant distribution. Note that the output of this method will be an array whose unique values are 0 and 1. More information can be found here: https://en.wikipedia.org/wiki/Degenerate_distribution Args: X(numpy.ndarray): Values to compute pdf. Returns: numpy.ndarray: Probability densisty for the given values """ result = np.zeros(X.shape) result[np.nonzero(X == self.constant_value)] = 1 return result
def function[_constant_probability_density, parameter[self, X]]: constant[Probability density for the degenerate case of constant distribution. Note that the output of this method will be an array whose unique values are 0 and 1. More information can be found here: https://en.wikipedia.org/wiki/Degenerate_distribution Args: X(numpy.ndarray): Values to compute pdf. Returns: numpy.ndarray: Probability densisty for the given values ] variable[result] assign[=] call[name[np].zeros, parameter[name[X].shape]] call[name[result]][call[name[np].nonzero, parameter[compare[name[X] equal[==] name[self].constant_value]]]] assign[=] constant[1] return[name[result]]
keyword[def] identifier[_constant_probability_density] ( identifier[self] , identifier[X] ): literal[string] identifier[result] = identifier[np] . identifier[zeros] ( identifier[X] . identifier[shape] ) identifier[result] [ identifier[np] . identifier[nonzero] ( identifier[X] == identifier[self] . identifier[constant_value] )]= literal[int] keyword[return] identifier[result]
def _constant_probability_density(self, X): """Probability density for the degenerate case of constant distribution. Note that the output of this method will be an array whose unique values are 0 and 1. More information can be found here: https://en.wikipedia.org/wiki/Degenerate_distribution Args: X(numpy.ndarray): Values to compute pdf. Returns: numpy.ndarray: Probability densisty for the given values """ result = np.zeros(X.shape) result[np.nonzero(X == self.constant_value)] = 1 return result
def setup_package(): """ Setup the package. """ with open('requirements.txt', 'r') as req_file: install_reqs = req_file.read().split('\n') cmdclass_ = {'antlr': AntlrBuildCommand} cmdclass_.update(versioneer.get_cmdclass()) setup( version=versioneer.get_version(), name='pymoca', maintainer="James Goppert", maintainer_email="james.goppert@gmail.com", description=DOCLINES[0], long_description="\n".join(DOCLINES[2:]), url='https://github.com/pymoca/pymoca', author='James Goppert', author_email='james.goppert@gmail.com', download_url='https://github.com/pymoca/pymoca', license='BSD', classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], install_requires=install_reqs, tests_require=['coverage >= 3.7.1', 'nose >= 1.3.1'], test_suite='nose.collector', python_requires='>=3.5', packages=find_packages("src"), package_dir={"": "src"}, include_package_data=True, cmdclass=cmdclass_ )
def function[setup_package, parameter[]]: constant[ Setup the package. ] with call[name[open], parameter[constant[requirements.txt], constant[r]]] begin[:] variable[install_reqs] assign[=] call[call[name[req_file].read, parameter[]].split, parameter[constant[ ]]] variable[cmdclass_] assign[=] dictionary[[<ast.Constant object at 0x7da1b257c280>], [<ast.Name object at 0x7da1b257c250>]] call[name[cmdclass_].update, parameter[call[name[versioneer].get_cmdclass, parameter[]]]] call[name[setup], parameter[]]
keyword[def] identifier[setup_package] (): literal[string] keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[req_file] : identifier[install_reqs] = identifier[req_file] . identifier[read] (). identifier[split] ( literal[string] ) identifier[cmdclass_] ={ literal[string] : identifier[AntlrBuildCommand] } identifier[cmdclass_] . identifier[update] ( identifier[versioneer] . identifier[get_cmdclass] ()) identifier[setup] ( identifier[version] = identifier[versioneer] . identifier[get_version] (), identifier[name] = literal[string] , identifier[maintainer] = literal[string] , identifier[maintainer_email] = literal[string] , identifier[description] = identifier[DOCLINES] [ literal[int] ], identifier[long_description] = literal[string] . identifier[join] ( identifier[DOCLINES] [ literal[int] :]), identifier[url] = literal[string] , identifier[author] = literal[string] , identifier[author_email] = literal[string] , identifier[download_url] = literal[string] , identifier[license] = literal[string] , identifier[classifiers] =[ identifier[_f] keyword[for] identifier[_f] keyword[in] identifier[CLASSIFIERS] . identifier[split] ( literal[string] ) keyword[if] identifier[_f] ], identifier[platforms] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ], identifier[install_requires] = identifier[install_reqs] , identifier[tests_require] =[ literal[string] , literal[string] ], identifier[test_suite] = literal[string] , identifier[python_requires] = literal[string] , identifier[packages] = identifier[find_packages] ( literal[string] ), identifier[package_dir] ={ literal[string] : literal[string] }, identifier[include_package_data] = keyword[True] , identifier[cmdclass] = identifier[cmdclass_] )
def setup_package(): """ Setup the package. """ with open('requirements.txt', 'r') as req_file: install_reqs = req_file.read().split('\n') # depends on [control=['with'], data=['req_file']] cmdclass_ = {'antlr': AntlrBuildCommand} cmdclass_.update(versioneer.get_cmdclass()) setup(version=versioneer.get_version(), name='pymoca', maintainer='James Goppert', maintainer_email='james.goppert@gmail.com', description=DOCLINES[0], long_description='\n'.join(DOCLINES[2:]), url='https://github.com/pymoca/pymoca', author='James Goppert', author_email='james.goppert@gmail.com', download_url='https://github.com/pymoca/pymoca', license='BSD', classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms=['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'], install_requires=install_reqs, tests_require=['coverage >= 3.7.1', 'nose >= 1.3.1'], test_suite='nose.collector', python_requires='>=3.5', packages=find_packages('src'), package_dir={'': 'src'}, include_package_data=True, cmdclass=cmdclass_)
def calc_anchors(preregistration_map, model, model_hemi, scale=1, sigma=Ellipsis, radius_weight=0, field_sign_weight=0, invert_rh_field_sign=False): ''' calc_anchors is a calculator that creates a set of anchor instructions for a registration. Required afferent parameters: @ invert_rh_field_sign May be set to True (default is False) to indicate that the right hemisphere's field signs will be incorrect relative to the model; this generally should be used whenever invert_rh_angle is also set to True. ''' wgts = preregistration_map.prop('weight') rads = preregistration_map.prop('radius') if np.isclose(radius_weight, 0): radius_weight = 0 ancs = retinotopy_anchors(preregistration_map, model, polar_angle='polar_angle', eccentricity='eccentricity', radius='radius', weight=wgts, weight_min=0, # taken care of already radius_weight=radius_weight, field_sign_weight=field_sign_weight, scale=scale, invert_field_sign=(model_hemi == 'rh' and invert_rh_field_sign), **({} if sigma is Ellipsis else {'sigma':sigma})) return ancs
def function[calc_anchors, parameter[preregistration_map, model, model_hemi, scale, sigma, radius_weight, field_sign_weight, invert_rh_field_sign]]: constant[ calc_anchors is a calculator that creates a set of anchor instructions for a registration. Required afferent parameters: @ invert_rh_field_sign May be set to True (default is False) to indicate that the right hemisphere's field signs will be incorrect relative to the model; this generally should be used whenever invert_rh_angle is also set to True. ] variable[wgts] assign[=] call[name[preregistration_map].prop, parameter[constant[weight]]] variable[rads] assign[=] call[name[preregistration_map].prop, parameter[constant[radius]]] if call[name[np].isclose, parameter[name[radius_weight], constant[0]]] begin[:] variable[radius_weight] assign[=] constant[0] variable[ancs] assign[=] call[name[retinotopy_anchors], parameter[name[preregistration_map], name[model]]] return[name[ancs]]
keyword[def] identifier[calc_anchors] ( identifier[preregistration_map] , identifier[model] , identifier[model_hemi] , identifier[scale] = literal[int] , identifier[sigma] = identifier[Ellipsis] , identifier[radius_weight] = literal[int] , identifier[field_sign_weight] = literal[int] , identifier[invert_rh_field_sign] = keyword[False] ): literal[string] identifier[wgts] = identifier[preregistration_map] . identifier[prop] ( literal[string] ) identifier[rads] = identifier[preregistration_map] . identifier[prop] ( literal[string] ) keyword[if] identifier[np] . identifier[isclose] ( identifier[radius_weight] , literal[int] ): identifier[radius_weight] = literal[int] identifier[ancs] = identifier[retinotopy_anchors] ( identifier[preregistration_map] , identifier[model] , identifier[polar_angle] = literal[string] , identifier[eccentricity] = literal[string] , identifier[radius] = literal[string] , identifier[weight] = identifier[wgts] , identifier[weight_min] = literal[int] , identifier[radius_weight] = identifier[radius_weight] , identifier[field_sign_weight] = identifier[field_sign_weight] , identifier[scale] = identifier[scale] , identifier[invert_field_sign] =( identifier[model_hemi] == literal[string] keyword[and] identifier[invert_rh_field_sign] ), **({} keyword[if] identifier[sigma] keyword[is] identifier[Ellipsis] keyword[else] { literal[string] : identifier[sigma] })) keyword[return] identifier[ancs]
def calc_anchors(preregistration_map, model, model_hemi, scale=1, sigma=Ellipsis, radius_weight=0, field_sign_weight=0, invert_rh_field_sign=False): """ calc_anchors is a calculator that creates a set of anchor instructions for a registration. Required afferent parameters: @ invert_rh_field_sign May be set to True (default is False) to indicate that the right hemisphere's field signs will be incorrect relative to the model; this generally should be used whenever invert_rh_angle is also set to True. """ wgts = preregistration_map.prop('weight') rads = preregistration_map.prop('radius') if np.isclose(radius_weight, 0): radius_weight = 0 # depends on [control=['if'], data=[]] # taken care of already ancs = retinotopy_anchors(preregistration_map, model, polar_angle='polar_angle', eccentricity='eccentricity', radius='radius', weight=wgts, weight_min=0, radius_weight=radius_weight, field_sign_weight=field_sign_weight, scale=scale, invert_field_sign=model_hemi == 'rh' and invert_rh_field_sign, **{} if sigma is Ellipsis else {'sigma': sigma}) return ancs
def check(self, *args): """ Checks the validity of the run parameters. Returns flag (True = OK), and a message which indicates the nature of the problem if the flag is False. """ ok = True msg = '' g = get_root(self).globals dtype = g.observe.rtype() expert = g.cpars['expert_level'] > 0 if dtype == 'bias' or dtype == 'flat' or dtype == 'dark': self.pi.configure(state='disable') self.prog_ob.configure(state='disable') self.target.disable() else: if expert: self.pi.configure(state='normal') self.prog_ob.configure(state='normal') self.prog_ob.enable() else: self.prog_ob.configure(state='disable') self.pi.configure(state='disable') self.prog_ob.disable() self.target.enable() if g.cpars['require_run_params']: if self.target.ok(): self.target.entry.config(bg=g.COL['main']) else: self.target.entry.config(bg=g.COL['error']) ok = False msg += 'Target name field cannot be blank\n' if dtype == 'data caution' or \ dtype == 'data' or dtype == 'technical': if self.prog_ob.ok(): self.prog_ob.config(bg=g.COL['main']) else: self.prog_ob.config(bg=g.COL['error']) ok = False msg += 'Programme or OB ID field cannot be blank\n' if self.pi.ok(): self.pi.config(bg=g.COL['main']) else: self.pi.config(bg=g.COL['error']) ok = False msg += 'Principal Investigator field cannot be blank\n' if self.observers.ok(): self.observers.config(bg=g.COL['main']) else: self.observers.config(bg=g.COL['error']) ok = False msg += 'Observers field cannot be blank' return (ok, msg)
def function[check, parameter[self]]: constant[ Checks the validity of the run parameters. Returns flag (True = OK), and a message which indicates the nature of the problem if the flag is False. ] variable[ok] assign[=] constant[True] variable[msg] assign[=] constant[] variable[g] assign[=] call[name[get_root], parameter[name[self]]].globals variable[dtype] assign[=] call[name[g].observe.rtype, parameter[]] variable[expert] assign[=] compare[call[name[g].cpars][constant[expert_level]] greater[>] constant[0]] if <ast.BoolOp object at 0x7da20cabfa00> begin[:] call[name[self].pi.configure, parameter[]] call[name[self].prog_ob.configure, parameter[]] call[name[self].target.disable, parameter[]] if call[name[g].cpars][constant[require_run_params]] begin[:] if call[name[self].target.ok, parameter[]] begin[:] call[name[self].target.entry.config, parameter[]] if <ast.BoolOp object at 0x7da204564430> begin[:] if call[name[self].prog_ob.ok, parameter[]] begin[:] call[name[self].prog_ob.config, parameter[]] if call[name[self].pi.ok, parameter[]] begin[:] call[name[self].pi.config, parameter[]] if call[name[self].observers.ok, parameter[]] begin[:] call[name[self].observers.config, parameter[]] return[tuple[[<ast.Name object at 0x7da207f995d0>, <ast.Name object at 0x7da207f98340>]]]
keyword[def] identifier[check] ( identifier[self] ,* identifier[args] ): literal[string] identifier[ok] = keyword[True] identifier[msg] = literal[string] identifier[g] = identifier[get_root] ( identifier[self] ). identifier[globals] identifier[dtype] = identifier[g] . identifier[observe] . identifier[rtype] () identifier[expert] = identifier[g] . identifier[cpars] [ literal[string] ]> literal[int] keyword[if] identifier[dtype] == literal[string] keyword[or] identifier[dtype] == literal[string] keyword[or] identifier[dtype] == literal[string] : identifier[self] . identifier[pi] . identifier[configure] ( identifier[state] = literal[string] ) identifier[self] . identifier[prog_ob] . identifier[configure] ( identifier[state] = literal[string] ) identifier[self] . identifier[target] . identifier[disable] () keyword[else] : keyword[if] identifier[expert] : identifier[self] . identifier[pi] . identifier[configure] ( identifier[state] = literal[string] ) identifier[self] . identifier[prog_ob] . identifier[configure] ( identifier[state] = literal[string] ) identifier[self] . identifier[prog_ob] . identifier[enable] () keyword[else] : identifier[self] . identifier[prog_ob] . identifier[configure] ( identifier[state] = literal[string] ) identifier[self] . identifier[pi] . identifier[configure] ( identifier[state] = literal[string] ) identifier[self] . identifier[prog_ob] . identifier[disable] () identifier[self] . identifier[target] . identifier[enable] () keyword[if] identifier[g] . identifier[cpars] [ literal[string] ]: keyword[if] identifier[self] . identifier[target] . identifier[ok] (): identifier[self] . identifier[target] . identifier[entry] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) keyword[else] : identifier[self] . identifier[target] . identifier[entry] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) identifier[ok] = keyword[False] identifier[msg] += literal[string] keyword[if] identifier[dtype] == literal[string] keyword[or] identifier[dtype] == literal[string] keyword[or] identifier[dtype] == literal[string] : keyword[if] identifier[self] . identifier[prog_ob] . identifier[ok] (): identifier[self] . identifier[prog_ob] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) keyword[else] : identifier[self] . identifier[prog_ob] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) identifier[ok] = keyword[False] identifier[msg] += literal[string] keyword[if] identifier[self] . identifier[pi] . identifier[ok] (): identifier[self] . identifier[pi] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) keyword[else] : identifier[self] . identifier[pi] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) identifier[ok] = keyword[False] identifier[msg] += literal[string] keyword[if] identifier[self] . identifier[observers] . identifier[ok] (): identifier[self] . identifier[observers] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) keyword[else] : identifier[self] . identifier[observers] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) identifier[ok] = keyword[False] identifier[msg] += literal[string] keyword[return] ( identifier[ok] , identifier[msg] )
def check(self, *args): """ Checks the validity of the run parameters. Returns flag (True = OK), and a message which indicates the nature of the problem if the flag is False. """ ok = True msg = '' g = get_root(self).globals dtype = g.observe.rtype() expert = g.cpars['expert_level'] > 0 if dtype == 'bias' or dtype == 'flat' or dtype == 'dark': self.pi.configure(state='disable') self.prog_ob.configure(state='disable') self.target.disable() # depends on [control=['if'], data=[]] else: if expert: self.pi.configure(state='normal') self.prog_ob.configure(state='normal') self.prog_ob.enable() # depends on [control=['if'], data=[]] else: self.prog_ob.configure(state='disable') self.pi.configure(state='disable') self.prog_ob.disable() self.target.enable() if g.cpars['require_run_params']: if self.target.ok(): self.target.entry.config(bg=g.COL['main']) # depends on [control=['if'], data=[]] else: self.target.entry.config(bg=g.COL['error']) ok = False msg += 'Target name field cannot be blank\n' if dtype == 'data caution' or dtype == 'data' or dtype == 'technical': if self.prog_ob.ok(): self.prog_ob.config(bg=g.COL['main']) # depends on [control=['if'], data=[]] else: self.prog_ob.config(bg=g.COL['error']) ok = False msg += 'Programme or OB ID field cannot be blank\n' if self.pi.ok(): self.pi.config(bg=g.COL['main']) # depends on [control=['if'], data=[]] else: self.pi.config(bg=g.COL['error']) ok = False msg += 'Principal Investigator field cannot be blank\n' # depends on [control=['if'], data=[]] if self.observers.ok(): self.observers.config(bg=g.COL['main']) # depends on [control=['if'], data=[]] else: self.observers.config(bg=g.COL['error']) ok = False msg += 'Observers field cannot be blank' # depends on [control=['if'], data=[]] return (ok, msg)
def convert_to_bool(x: Any, default: bool = None) -> bool: """ Transforms its input to a ``bool`` (or returns ``default`` if ``x`` is falsy but not itself a boolean). Accepts various common string versions. """ if isinstance(x, bool): return x if not x: # None, zero, blank string... return default try: return int(x) != 0 except (TypeError, ValueError): pass try: return float(x) != 0 except (TypeError, ValueError): pass if not isinstance(x, str): raise Exception("Unknown thing being converted to bool: {!r}".format(x)) x = x.upper() if x in ["Y", "YES", "T", "TRUE"]: return True if x in ["N", "NO", "F", "FALSE"]: return False raise Exception("Unknown thing being converted to bool: {!r}".format(x))
def function[convert_to_bool, parameter[x, default]]: constant[ Transforms its input to a ``bool`` (or returns ``default`` if ``x`` is falsy but not itself a boolean). Accepts various common string versions. ] if call[name[isinstance], parameter[name[x], name[bool]]] begin[:] return[name[x]] if <ast.UnaryOp object at 0x7da1b1836e00> begin[:] return[name[default]] <ast.Try object at 0x7da1b1835540> <ast.Try object at 0x7da1b1836710> if <ast.UnaryOp object at 0x7da1b18359f0> begin[:] <ast.Raise object at 0x7da1b18369b0> variable[x] assign[=] call[name[x].upper, parameter[]] if compare[name[x] in list[[<ast.Constant object at 0x7da1b1835ea0>, <ast.Constant object at 0x7da1b1837d60>, <ast.Constant object at 0x7da1b1836f80>, <ast.Constant object at 0x7da1b1834580>]]] begin[:] return[constant[True]] if compare[name[x] in list[[<ast.Constant object at 0x7da1b1834640>, <ast.Constant object at 0x7da1b18372e0>, <ast.Constant object at 0x7da1b1837040>, <ast.Constant object at 0x7da1b18352d0>]]] begin[:] return[constant[False]] <ast.Raise object at 0x7da1b1835c90>
keyword[def] identifier[convert_to_bool] ( identifier[x] : identifier[Any] , identifier[default] : identifier[bool] = keyword[None] )-> identifier[bool] : literal[string] keyword[if] identifier[isinstance] ( identifier[x] , identifier[bool] ): keyword[return] identifier[x] keyword[if] keyword[not] identifier[x] : keyword[return] identifier[default] keyword[try] : keyword[return] identifier[int] ( identifier[x] )!= literal[int] keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[pass] keyword[try] : keyword[return] identifier[float] ( identifier[x] )!= literal[int] keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[pass] keyword[if] keyword[not] identifier[isinstance] ( identifier[x] , identifier[str] ): keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[x] )) identifier[x] = identifier[x] . identifier[upper] () keyword[if] identifier[x] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[return] keyword[True] keyword[if] identifier[x] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[return] keyword[False] keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[x] ))
def convert_to_bool(x: Any, default: bool=None) -> bool: """ Transforms its input to a ``bool`` (or returns ``default`` if ``x`` is falsy but not itself a boolean). Accepts various common string versions. """ if isinstance(x, bool): return x # depends on [control=['if'], data=[]] if not x: # None, zero, blank string... return default # depends on [control=['if'], data=[]] try: return int(x) != 0 # depends on [control=['try'], data=[]] except (TypeError, ValueError): pass # depends on [control=['except'], data=[]] try: return float(x) != 0 # depends on [control=['try'], data=[]] except (TypeError, ValueError): pass # depends on [control=['except'], data=[]] if not isinstance(x, str): raise Exception('Unknown thing being converted to bool: {!r}'.format(x)) # depends on [control=['if'], data=[]] x = x.upper() if x in ['Y', 'YES', 'T', 'TRUE']: return True # depends on [control=['if'], data=[]] if x in ['N', 'NO', 'F', 'FALSE']: return False # depends on [control=['if'], data=[]] raise Exception('Unknown thing being converted to bool: {!r}'.format(x))
def _fmt(self, tag, msg): """Format a string for inclusion in the exception's string representation. If msg is None, format to empty string. If msg has a single line, format to: tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is truncated to 1024 chars. """ msg = msg or '<unset>' msg = str(msg) msg = msg.strip() if not msg: return if len(msg) > 2048: msg = msg[:1024] + '...' if msg.count('\n') <= 1: return '{}: {}\n'.format(tag, msg.strip()) else: return '{}:\n {}\n'.format(tag, msg.replace('\n', '\n ').strip())
def function[_fmt, parameter[self, tag, msg]]: constant[Format a string for inclusion in the exception's string representation. If msg is None, format to empty string. If msg has a single line, format to: tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is truncated to 1024 chars. ] variable[msg] assign[=] <ast.BoolOp object at 0x7da1b1ad6080> variable[msg] assign[=] call[name[str], parameter[name[msg]]] variable[msg] assign[=] call[name[msg].strip, parameter[]] if <ast.UnaryOp object at 0x7da20e9b20e0> begin[:] return[None] if compare[call[name[len], parameter[name[msg]]] greater[>] constant[2048]] begin[:] variable[msg] assign[=] binary_operation[call[name[msg]][<ast.Slice object at 0x7da1b1ad40a0>] + constant[...]] if compare[call[name[msg].count, parameter[constant[ ]]] less_or_equal[<=] constant[1]] begin[:] return[call[constant[{}: {} ].format, parameter[name[tag], call[name[msg].strip, parameter[]]]]]
keyword[def] identifier[_fmt] ( identifier[self] , identifier[tag] , identifier[msg] ): literal[string] identifier[msg] = identifier[msg] keyword[or] literal[string] identifier[msg] = identifier[str] ( identifier[msg] ) identifier[msg] = identifier[msg] . identifier[strip] () keyword[if] keyword[not] identifier[msg] : keyword[return] keyword[if] identifier[len] ( identifier[msg] )> literal[int] : identifier[msg] = identifier[msg] [: literal[int] ]+ literal[string] keyword[if] identifier[msg] . identifier[count] ( literal[string] )<= literal[int] : keyword[return] literal[string] . identifier[format] ( identifier[tag] , identifier[msg] . identifier[strip] ()) keyword[else] : keyword[return] literal[string] . identifier[format] ( identifier[tag] , identifier[msg] . identifier[replace] ( literal[string] , literal[string] ). identifier[strip] ())
def _fmt(self, tag, msg): """Format a string for inclusion in the exception's string representation. If msg is None, format to empty string. If msg has a single line, format to: tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is truncated to 1024 chars. """ msg = msg or '<unset>' msg = str(msg) msg = msg.strip() if not msg: return # depends on [control=['if'], data=[]] if len(msg) > 2048: msg = msg[:1024] + '...' # depends on [control=['if'], data=[]] if msg.count('\n') <= 1: return '{}: {}\n'.format(tag, msg.strip()) # depends on [control=['if'], data=[]] else: return '{}:\n {}\n'.format(tag, msg.replace('\n', '\n ').strip())
def authorize(self, email, permission_type='read', cloud=None, api_key=None, version=None, **kwargs): """ This API endpoint allows you to authorize another user to access your model in a read or write capacity. Before calling authorize, you must first make sure your model has been registered. Inputs: email - String: The email of the user you would like to share access with. permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`. Users with `write` permissions can add new input examples and train models. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ kwargs['permission_type'] = permission_type kwargs['email'] = email url_params = {"batch": False, "api_key": api_key, "version": version, "method": "authorize"} return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
def function[authorize, parameter[self, email, permission_type, cloud, api_key, version]]: constant[ This API endpoint allows you to authorize another user to access your model in a read or write capacity. Before calling authorize, you must first make sure your model has been registered. Inputs: email - String: The email of the user you would like to share access with. permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`. Users with `write` permissions can add new input examples and train models. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. ] call[name[kwargs]][constant[permission_type]] assign[=] name[permission_type] call[name[kwargs]][constant[email]] assign[=] name[email] variable[url_params] assign[=] dictionary[[<ast.Constant object at 0x7da2041d9960>, <ast.Constant object at 0x7da2041d82e0>, <ast.Constant object at 0x7da2041d9240>, <ast.Constant object at 0x7da2041d9540>], [<ast.Constant object at 0x7da2041d8670>, <ast.Name object at 0x7da2041dbac0>, <ast.Name object at 0x7da2041d90f0>, <ast.Constant object at 0x7da2041db2e0>]] return[call[name[self]._api_handler, parameter[constant[None]]]]
keyword[def] identifier[authorize] ( identifier[self] , identifier[email] , identifier[permission_type] = literal[string] , identifier[cloud] = keyword[None] , identifier[api_key] = keyword[None] , identifier[version] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= identifier[permission_type] identifier[kwargs] [ literal[string] ]= identifier[email] identifier[url_params] ={ literal[string] : keyword[False] , literal[string] : identifier[api_key] , literal[string] : identifier[version] , literal[string] : literal[string] } keyword[return] identifier[self] . identifier[_api_handler] ( keyword[None] , identifier[cloud] = identifier[cloud] , identifier[api] = literal[string] , identifier[url_params] = identifier[url_params] ,** identifier[kwargs] )
def authorize(self, email, permission_type='read', cloud=None, api_key=None, version=None, **kwargs): """ This API endpoint allows you to authorize another user to access your model in a read or write capacity. Before calling authorize, you must first make sure your model has been registered. Inputs: email - String: The email of the user you would like to share access with. permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`. Users with `write` permissions can add new input examples and train models. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ kwargs['permission_type'] = permission_type kwargs['email'] = email url_params = {'batch': False, 'api_key': api_key, 'version': version, 'method': 'authorize'} return self._api_handler(None, cloud=cloud, api='custom', url_params=url_params, **kwargs)
def _handle_double_click(self, event): """ Double click with left mouse button focuses the element""" if event.get_button()[1] == 1: # Left mouse button path_info = self.tree_view.get_path_at_pos(int(event.x), int(event.y)) if path_info: # Valid entry was clicked on path = path_info[0] iter = self.list_store.get_iter(path) model = self.list_store.get_value(iter, self.MODEL_STORAGE_ID) selection = self.model.get_state_machine_m().selection selection.focus = model
def function[_handle_double_click, parameter[self, event]]: constant[ Double click with left mouse button focuses the element] if compare[call[call[name[event].get_button, parameter[]]][constant[1]] equal[==] constant[1]] begin[:] variable[path_info] assign[=] call[name[self].tree_view.get_path_at_pos, parameter[call[name[int], parameter[name[event].x]], call[name[int], parameter[name[event].y]]]] if name[path_info] begin[:] variable[path] assign[=] call[name[path_info]][constant[0]] variable[iter] assign[=] call[name[self].list_store.get_iter, parameter[name[path]]] variable[model] assign[=] call[name[self].list_store.get_value, parameter[name[iter], name[self].MODEL_STORAGE_ID]] variable[selection] assign[=] call[name[self].model.get_state_machine_m, parameter[]].selection name[selection].focus assign[=] name[model]
keyword[def] identifier[_handle_double_click] ( identifier[self] , identifier[event] ): literal[string] keyword[if] identifier[event] . identifier[get_button] ()[ literal[int] ]== literal[int] : identifier[path_info] = identifier[self] . identifier[tree_view] . identifier[get_path_at_pos] ( identifier[int] ( identifier[event] . identifier[x] ), identifier[int] ( identifier[event] . identifier[y] )) keyword[if] identifier[path_info] : identifier[path] = identifier[path_info] [ literal[int] ] identifier[iter] = identifier[self] . identifier[list_store] . identifier[get_iter] ( identifier[path] ) identifier[model] = identifier[self] . identifier[list_store] . identifier[get_value] ( identifier[iter] , identifier[self] . identifier[MODEL_STORAGE_ID] ) identifier[selection] = identifier[self] . identifier[model] . identifier[get_state_machine_m] (). identifier[selection] identifier[selection] . identifier[focus] = identifier[model]
def _handle_double_click(self, event): """ Double click with left mouse button focuses the element""" if event.get_button()[1] == 1: # Left mouse button path_info = self.tree_view.get_path_at_pos(int(event.x), int(event.y)) if path_info: # Valid entry was clicked on path = path_info[0] iter = self.list_store.get_iter(path) model = self.list_store.get_value(iter, self.MODEL_STORAGE_ID) selection = self.model.get_state_machine_m().selection selection.focus = model # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def sign_tx(network, tx, wifs=[], **kwargs): """ :param tx: a transaction :param wifs: the list of WIFs required to sign this transaction. :return: :class:`Tx <Tx>` object, modified in place This is a convenience function used to sign a transaction. The transaction must have "unspents" set by, for example, calling tx.unspents_from_db. Returns the signed Tx transaction, or raises an exception. Usage:: >> sign_tx(network, tx, wifs=["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"]) """ keychain = network.keychain() keychain.add_secrets((network.parse.wif(_) for _ in wifs)) solver = tx.Solver(tx) solver.sign(keychain, **kwargs)
def function[sign_tx, parameter[network, tx, wifs]]: constant[ :param tx: a transaction :param wifs: the list of WIFs required to sign this transaction. :return: :class:`Tx <Tx>` object, modified in place This is a convenience function used to sign a transaction. The transaction must have "unspents" set by, for example, calling tx.unspents_from_db. Returns the signed Tx transaction, or raises an exception. Usage:: >> sign_tx(network, tx, wifs=["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"]) ] variable[keychain] assign[=] call[name[network].keychain, parameter[]] call[name[keychain].add_secrets, parameter[<ast.GeneratorExp object at 0x7da1b1d5d510>]] variable[solver] assign[=] call[name[tx].Solver, parameter[name[tx]]] call[name[solver].sign, parameter[name[keychain]]]
keyword[def] identifier[sign_tx] ( identifier[network] , identifier[tx] , identifier[wifs] =[],** identifier[kwargs] ): literal[string] identifier[keychain] = identifier[network] . identifier[keychain] () identifier[keychain] . identifier[add_secrets] (( identifier[network] . identifier[parse] . identifier[wif] ( identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[wifs] )) identifier[solver] = identifier[tx] . identifier[Solver] ( identifier[tx] ) identifier[solver] . identifier[sign] ( identifier[keychain] ,** identifier[kwargs] )
def sign_tx(network, tx, wifs=[], **kwargs): """ :param tx: a transaction :param wifs: the list of WIFs required to sign this transaction. :return: :class:`Tx <Tx>` object, modified in place This is a convenience function used to sign a transaction. The transaction must have "unspents" set by, for example, calling tx.unspents_from_db. Returns the signed Tx transaction, or raises an exception. Usage:: >> sign_tx(network, tx, wifs=["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"]) """ keychain = network.keychain() keychain.add_secrets((network.parse.wif(_) for _ in wifs)) solver = tx.Solver(tx) solver.sign(keychain, **kwargs)
def add_plugin(self, plugin): """Add the given plugin.""" # allow plugins loaded via entry points to override builtin plugins new_name = self.plugin_name(plugin) self._plugins[:] = [p for p in self._plugins if self.plugin_name(p) != new_name] self._plugins.append(plugin)
def function[add_plugin, parameter[self, plugin]]: constant[Add the given plugin.] variable[new_name] assign[=] call[name[self].plugin_name, parameter[name[plugin]]] call[name[self]._plugins][<ast.Slice object at 0x7da1b1b6baf0>] assign[=] <ast.ListComp object at 0x7da1b1b6b760> call[name[self]._plugins.append, parameter[name[plugin]]]
keyword[def] identifier[add_plugin] ( identifier[self] , identifier[plugin] ): literal[string] identifier[new_name] = identifier[self] . identifier[plugin_name] ( identifier[plugin] ) identifier[self] . identifier[_plugins] [:]=[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[_plugins] keyword[if] identifier[self] . identifier[plugin_name] ( identifier[p] )!= identifier[new_name] ] identifier[self] . identifier[_plugins] . identifier[append] ( identifier[plugin] )
def add_plugin(self, plugin): """Add the given plugin.""" # allow plugins loaded via entry points to override builtin plugins new_name = self.plugin_name(plugin) self._plugins[:] = [p for p in self._plugins if self.plugin_name(p) != new_name] self._plugins.append(plugin)
def apply_transformation(self, structure): """ Returns most primitive cell for structure. Args: structure: A structure Returns: The same structure in a conventional standard setting """ sga = SpacegroupAnalyzer(structure, symprec=self.symprec, angle_tolerance=self.angle_tolerance) return sga.get_conventional_standard_structure(international_monoclinic=self.international_monoclinic)
def function[apply_transformation, parameter[self, structure]]: constant[ Returns most primitive cell for structure. Args: structure: A structure Returns: The same structure in a conventional standard setting ] variable[sga] assign[=] call[name[SpacegroupAnalyzer], parameter[name[structure]]] return[call[name[sga].get_conventional_standard_structure, parameter[]]]
keyword[def] identifier[apply_transformation] ( identifier[self] , identifier[structure] ): literal[string] identifier[sga] = identifier[SpacegroupAnalyzer] ( identifier[structure] , identifier[symprec] = identifier[self] . identifier[symprec] , identifier[angle_tolerance] = identifier[self] . identifier[angle_tolerance] ) keyword[return] identifier[sga] . identifier[get_conventional_standard_structure] ( identifier[international_monoclinic] = identifier[self] . identifier[international_monoclinic] )
def apply_transformation(self, structure): """ Returns most primitive cell for structure. Args: structure: A structure Returns: The same structure in a conventional standard setting """ sga = SpacegroupAnalyzer(structure, symprec=self.symprec, angle_tolerance=self.angle_tolerance) return sga.get_conventional_standard_structure(international_monoclinic=self.international_monoclinic)
def _grid_widgets(self): """Put the widgets in the correct position based on self.__compound.""" orient = str(self._scale.cget('orient')) self._scale.grid(row=2, column=2, sticky='ew' if orient == tk.HORIZONTAL else 'ns', padx=(0, self.__entryscalepad) if self.__compound is tk.RIGHT else (self.__entryscalepad, 0) if self.__compound is tk.LEFT else 0, pady=(0, self.__entryscalepad) if self.__compound is tk.BOTTOM else (self.__entryscalepad, 0) if self.__compound is tk.TOP else 0) self._entry.grid(row=1 if self.__compound is tk.TOP else 3 if self.__compound is tk.BOTTOM else 2, column=1 if self.__compound is tk.LEFT else 3 if self.__compound is tk.RIGHT else 2) if orient == tk.HORIZONTAL: self.columnconfigure(0, weight=0) self.columnconfigure(2, weight=1) self.columnconfigure(4, weight=0) self.rowconfigure(0, weight=1) self.rowconfigure(2, weight=0) self.rowconfigure(4, weight=1) else: self.rowconfigure(0, weight=0) self.rowconfigure(2, weight=1) self.rowconfigure(4, weight=0) self.columnconfigure(0, weight=1) self.columnconfigure(2, weight=0) self.columnconfigure(4, weight=1)
def function[_grid_widgets, parameter[self]]: constant[Put the widgets in the correct position based on self.__compound.] variable[orient] assign[=] call[name[str], parameter[call[name[self]._scale.cget, parameter[constant[orient]]]]] call[name[self]._scale.grid, parameter[]] call[name[self]._entry.grid, parameter[]] if compare[name[orient] equal[==] name[tk].HORIZONTAL] begin[:] call[name[self].columnconfigure, parameter[constant[0]]] call[name[self].columnconfigure, parameter[constant[2]]] call[name[self].columnconfigure, parameter[constant[4]]] call[name[self].rowconfigure, parameter[constant[0]]] call[name[self].rowconfigure, parameter[constant[2]]] call[name[self].rowconfigure, parameter[constant[4]]]
keyword[def] identifier[_grid_widgets] ( identifier[self] ): literal[string] identifier[orient] = identifier[str] ( identifier[self] . identifier[_scale] . identifier[cget] ( literal[string] )) identifier[self] . identifier[_scale] . identifier[grid] ( identifier[row] = literal[int] , identifier[column] = literal[int] , identifier[sticky] = literal[string] keyword[if] identifier[orient] == identifier[tk] . identifier[HORIZONTAL] keyword[else] literal[string] , identifier[padx] =( literal[int] , identifier[self] . identifier[__entryscalepad] ) keyword[if] identifier[self] . identifier[__compound] keyword[is] identifier[tk] . identifier[RIGHT] keyword[else] ( identifier[self] . identifier[__entryscalepad] , literal[int] ) keyword[if] identifier[self] . identifier[__compound] keyword[is] identifier[tk] . identifier[LEFT] keyword[else] literal[int] , identifier[pady] =( literal[int] , identifier[self] . identifier[__entryscalepad] ) keyword[if] identifier[self] . identifier[__compound] keyword[is] identifier[tk] . identifier[BOTTOM] keyword[else] ( identifier[self] . identifier[__entryscalepad] , literal[int] ) keyword[if] identifier[self] . identifier[__compound] keyword[is] identifier[tk] . identifier[TOP] keyword[else] literal[int] ) identifier[self] . identifier[_entry] . identifier[grid] ( identifier[row] = literal[int] keyword[if] identifier[self] . identifier[__compound] keyword[is] identifier[tk] . identifier[TOP] keyword[else] literal[int] keyword[if] identifier[self] . identifier[__compound] keyword[is] identifier[tk] . identifier[BOTTOM] keyword[else] literal[int] , identifier[column] = literal[int] keyword[if] identifier[self] . identifier[__compound] keyword[is] identifier[tk] . identifier[LEFT] keyword[else] literal[int] keyword[if] identifier[self] . identifier[__compound] keyword[is] identifier[tk] . identifier[RIGHT] keyword[else] literal[int] ) keyword[if] identifier[orient] == identifier[tk] . identifier[HORIZONTAL] : identifier[self] . identifier[columnconfigure] ( literal[int] , identifier[weight] = literal[int] ) identifier[self] . identifier[columnconfigure] ( literal[int] , identifier[weight] = literal[int] ) identifier[self] . identifier[columnconfigure] ( literal[int] , identifier[weight] = literal[int] ) identifier[self] . identifier[rowconfigure] ( literal[int] , identifier[weight] = literal[int] ) identifier[self] . identifier[rowconfigure] ( literal[int] , identifier[weight] = literal[int] ) identifier[self] . identifier[rowconfigure] ( literal[int] , identifier[weight] = literal[int] ) keyword[else] : identifier[self] . identifier[rowconfigure] ( literal[int] , identifier[weight] = literal[int] ) identifier[self] . identifier[rowconfigure] ( literal[int] , identifier[weight] = literal[int] ) identifier[self] . identifier[rowconfigure] ( literal[int] , identifier[weight] = literal[int] ) identifier[self] . identifier[columnconfigure] ( literal[int] , identifier[weight] = literal[int] ) identifier[self] . identifier[columnconfigure] ( literal[int] , identifier[weight] = literal[int] ) identifier[self] . identifier[columnconfigure] ( literal[int] , identifier[weight] = literal[int] )
def _grid_widgets(self): """Put the widgets in the correct position based on self.__compound.""" orient = str(self._scale.cget('orient')) self._scale.grid(row=2, column=2, sticky='ew' if orient == tk.HORIZONTAL else 'ns', padx=(0, self.__entryscalepad) if self.__compound is tk.RIGHT else (self.__entryscalepad, 0) if self.__compound is tk.LEFT else 0, pady=(0, self.__entryscalepad) if self.__compound is tk.BOTTOM else (self.__entryscalepad, 0) if self.__compound is tk.TOP else 0) self._entry.grid(row=1 if self.__compound is tk.TOP else 3 if self.__compound is tk.BOTTOM else 2, column=1 if self.__compound is tk.LEFT else 3 if self.__compound is tk.RIGHT else 2) if orient == tk.HORIZONTAL: self.columnconfigure(0, weight=0) self.columnconfigure(2, weight=1) self.columnconfigure(4, weight=0) self.rowconfigure(0, weight=1) self.rowconfigure(2, weight=0) self.rowconfigure(4, weight=1) # depends on [control=['if'], data=[]] else: self.rowconfigure(0, weight=0) self.rowconfigure(2, weight=1) self.rowconfigure(4, weight=0) self.columnconfigure(0, weight=1) self.columnconfigure(2, weight=0) self.columnconfigure(4, weight=1)
def gpio_properties(self): """Returns the properties of the user-controllable GPIOs. Provided the device supports user-controllable GPIOs, they will be returned by this method. Args: self (JLink): the ``JLink`` instance Returns: A list of ``JLinkGPIODescriptor`` instances totalling the number of requested properties. Raises: JLinkException: on error. """ res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0) if res < 0: raise errors.JLinkException(res) num_props = res buf = (structs.JLinkGPIODescriptor * num_props)() res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props) if res < 0: raise errors.JLinkException(res) return list(buf)
def function[gpio_properties, parameter[self]]: constant[Returns the properties of the user-controllable GPIOs. Provided the device supports user-controllable GPIOs, they will be returned by this method. Args: self (JLink): the ``JLink`` instance Returns: A list of ``JLinkGPIODescriptor`` instances totalling the number of requested properties. Raises: JLinkException: on error. ] variable[res] assign[=] call[name[self]._dll.JLINK_EMU_GPIO_GetProps, parameter[constant[0], constant[0]]] if compare[name[res] less[<] constant[0]] begin[:] <ast.Raise object at 0x7da204622ce0> variable[num_props] assign[=] name[res] variable[buf] assign[=] call[binary_operation[name[structs].JLinkGPIODescriptor * name[num_props]], parameter[]] variable[res] assign[=] call[name[self]._dll.JLINK_EMU_GPIO_GetProps, parameter[call[name[ctypes].byref, parameter[name[buf]]], name[num_props]]] if compare[name[res] less[<] constant[0]] begin[:] <ast.Raise object at 0x7da2046205b0> return[call[name[list], parameter[name[buf]]]]
keyword[def] identifier[gpio_properties] ( identifier[self] ): literal[string] identifier[res] = identifier[self] . identifier[_dll] . identifier[JLINK_EMU_GPIO_GetProps] ( literal[int] , literal[int] ) keyword[if] identifier[res] < literal[int] : keyword[raise] identifier[errors] . identifier[JLinkException] ( identifier[res] ) identifier[num_props] = identifier[res] identifier[buf] =( identifier[structs] . identifier[JLinkGPIODescriptor] * identifier[num_props] )() identifier[res] = identifier[self] . identifier[_dll] . identifier[JLINK_EMU_GPIO_GetProps] ( identifier[ctypes] . identifier[byref] ( identifier[buf] ), identifier[num_props] ) keyword[if] identifier[res] < literal[int] : keyword[raise] identifier[errors] . identifier[JLinkException] ( identifier[res] ) keyword[return] identifier[list] ( identifier[buf] )
def gpio_properties(self): """Returns the properties of the user-controllable GPIOs. Provided the device supports user-controllable GPIOs, they will be returned by this method. Args: self (JLink): the ``JLink`` instance Returns: A list of ``JLinkGPIODescriptor`` instances totalling the number of requested properties. Raises: JLinkException: on error. """ res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0) if res < 0: raise errors.JLinkException(res) # depends on [control=['if'], data=['res']] num_props = res buf = (structs.JLinkGPIODescriptor * num_props)() res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props) if res < 0: raise errors.JLinkException(res) # depends on [control=['if'], data=['res']] return list(buf)
def _expect_token(self, expected): """ Compares the next token in the stream to the specified token. `expected` Expected token string to match. * Raises a ``ParseError`` exception if token doesn't match `expected`. """ item = self._lexer.get_token() if not item: raise ParseError(u'Unexpected end of file') else: line_no, token = item if token != expected: raise ParseError(u"Unexpected token '{0}', " u"expecting '{1}' on line {2}" .format(common.from_utf8(token.strip()), expected, line_no))
def function[_expect_token, parameter[self, expected]]: constant[ Compares the next token in the stream to the specified token. `expected` Expected token string to match. * Raises a ``ParseError`` exception if token doesn't match `expected`. ] variable[item] assign[=] call[name[self]._lexer.get_token, parameter[]] if <ast.UnaryOp object at 0x7da18f721180> begin[:] <ast.Raise object at 0x7da18f722a70> if compare[name[token] not_equal[!=] name[expected]] begin[:] <ast.Raise object at 0x7da18f723e80>
keyword[def] identifier[_expect_token] ( identifier[self] , identifier[expected] ): literal[string] identifier[item] = identifier[self] . identifier[_lexer] . identifier[get_token] () keyword[if] keyword[not] identifier[item] : keyword[raise] identifier[ParseError] ( literal[string] ) keyword[else] : identifier[line_no] , identifier[token] = identifier[item] keyword[if] identifier[token] != identifier[expected] : keyword[raise] identifier[ParseError] ( literal[string] literal[string] . identifier[format] ( identifier[common] . identifier[from_utf8] ( identifier[token] . identifier[strip] ()), identifier[expected] , identifier[line_no] ))
def _expect_token(self, expected): """ Compares the next token in the stream to the specified token. `expected` Expected token string to match. * Raises a ``ParseError`` exception if token doesn't match `expected`. """ item = self._lexer.get_token() if not item: raise ParseError(u'Unexpected end of file') # depends on [control=['if'], data=[]] else: (line_no, token) = item if token != expected: raise ParseError(u"Unexpected token '{0}', expecting '{1}' on line {2}".format(common.from_utf8(token.strip()), expected, line_no)) # depends on [control=['if'], data=['token', 'expected']]
def getPcn(dsz, Nv, dimN=2, dimC=1, crp=False, zm=False): """Construct the constraint set projection function for convolutional dictionary update problem. Parameters ---------- dsz : tuple Filter support size(s), specified using the same format as the `dsz` parameter of :func:`bcrop` Nv : tuple Sizes of problem spatial indices dimN : int, optional (default 2) Number of problem spatial indices dimC : int, optional (default 1) Number of problem channel indices crp : bool, optional (default False) Flag indicating whether the result should be cropped to the support of the largest filter in the dictionary. zm : bool, optional (default False) Flag indicating whether the projection function should include filter mean subtraction Returns ------- fn : function Constraint set projection function """ fncdict = {(False, False): _Pcn, (False, True): _Pcn_zm, (True, False): _Pcn_crp, (True, True): _Pcn_zm_crp} fnc = fncdict[(crp, zm)] return functools.partial(fnc, dsz=dsz, Nv=Nv, dimN=dimN, dimC=dimC)
def function[getPcn, parameter[dsz, Nv, dimN, dimC, crp, zm]]: constant[Construct the constraint set projection function for convolutional dictionary update problem. Parameters ---------- dsz : tuple Filter support size(s), specified using the same format as the `dsz` parameter of :func:`bcrop` Nv : tuple Sizes of problem spatial indices dimN : int, optional (default 2) Number of problem spatial indices dimC : int, optional (default 1) Number of problem channel indices crp : bool, optional (default False) Flag indicating whether the result should be cropped to the support of the largest filter in the dictionary. zm : bool, optional (default False) Flag indicating whether the projection function should include filter mean subtraction Returns ------- fn : function Constraint set projection function ] variable[fncdict] assign[=] dictionary[[<ast.Tuple object at 0x7da1b06c5a80>, <ast.Tuple object at 0x7da1b06c52a0>, <ast.Tuple object at 0x7da1b06c5240>, <ast.Tuple object at 0x7da1b06c5b10>], [<ast.Name object at 0x7da1b06c4940>, <ast.Name object at 0x7da1b06c5a20>, <ast.Name object at 0x7da1b06c58d0>, <ast.Name object at 0x7da1b06c5930>]] variable[fnc] assign[=] call[name[fncdict]][tuple[[<ast.Name object at 0x7da1b06c49a0>, <ast.Name object at 0x7da1b06c44c0>]]] return[call[name[functools].partial, parameter[name[fnc]]]]
keyword[def] identifier[getPcn] ( identifier[dsz] , identifier[Nv] , identifier[dimN] = literal[int] , identifier[dimC] = literal[int] , identifier[crp] = keyword[False] , identifier[zm] = keyword[False] ): literal[string] identifier[fncdict] ={( keyword[False] , keyword[False] ): identifier[_Pcn] , ( keyword[False] , keyword[True] ): identifier[_Pcn_zm] , ( keyword[True] , keyword[False] ): identifier[_Pcn_crp] , ( keyword[True] , keyword[True] ): identifier[_Pcn_zm_crp] } identifier[fnc] = identifier[fncdict] [( identifier[crp] , identifier[zm] )] keyword[return] identifier[functools] . identifier[partial] ( identifier[fnc] , identifier[dsz] = identifier[dsz] , identifier[Nv] = identifier[Nv] , identifier[dimN] = identifier[dimN] , identifier[dimC] = identifier[dimC] )
def getPcn(dsz, Nv, dimN=2, dimC=1, crp=False, zm=False): """Construct the constraint set projection function for convolutional dictionary update problem. Parameters ---------- dsz : tuple Filter support size(s), specified using the same format as the `dsz` parameter of :func:`bcrop` Nv : tuple Sizes of problem spatial indices dimN : int, optional (default 2) Number of problem spatial indices dimC : int, optional (default 1) Number of problem channel indices crp : bool, optional (default False) Flag indicating whether the result should be cropped to the support of the largest filter in the dictionary. zm : bool, optional (default False) Flag indicating whether the projection function should include filter mean subtraction Returns ------- fn : function Constraint set projection function """ fncdict = {(False, False): _Pcn, (False, True): _Pcn_zm, (True, False): _Pcn_crp, (True, True): _Pcn_zm_crp} fnc = fncdict[crp, zm] return functools.partial(fnc, dsz=dsz, Nv=Nv, dimN=dimN, dimC=dimC)
def save(self, *args, **kwargs): """ We want to do dimension checks and/or resizing BEFORE the original image is saved. Note that if we can't get image dimensions, it's considered an invalid image and we return without saving. If the image has changed, sets self.thumb to None, triggering post_save thumbnailer. """ img = self.image # Check if this is an already existing photo try: old_self = self.__class__.objects.get(id=self.id) except ObjectDoesNotExist: old_self = None # Run on new and changed images: if self.id is None or self.thumb is None or (old_self.image != img): try: height = img.height width = img.width except Exception as error: # We aren't dealing with a reliable image, so.... #print("Error getting image height or width: {}".format(error)) return # If image is vertical or square (treated as vertical)... if height >= width: self.is_vertical = True if width > 900 or height > 1200: """ The image is larger than we want. We're going to downsize it BEFORE it is saved, using PIL on the InMemoryUploadedFile. """ image = Image.open(img) image.resize((900, 1200), Image.ANTIALIAS) image.save(img.path) try: ezthumb_field = get_thumbnailer(self.image) self.thumb = ezthumb_field.get_thumbnail({ 'size': (80, 80), 'crop': ',-10' }).url.replace("\\", "/") except Exception as error: print("Error thumbnailing {}: {}".format(self.id, error)) super(ContentImage, self).save(*args, **kwargs)
def function[save, parameter[self]]: constant[ We want to do dimension checks and/or resizing BEFORE the original image is saved. Note that if we can't get image dimensions, it's considered an invalid image and we return without saving. If the image has changed, sets self.thumb to None, triggering post_save thumbnailer. ] variable[img] assign[=] name[self].image <ast.Try object at 0x7da1b26aec50> if <ast.BoolOp object at 0x7da1b1403280> begin[:] <ast.Try object at 0x7da1b16e2fb0> if compare[name[height] greater_or_equal[>=] name[width]] begin[:] name[self].is_vertical assign[=] constant[True] if <ast.BoolOp object at 0x7da1b16e2710> begin[:] constant[ The image is larger than we want. We're going to downsize it BEFORE it is saved, using PIL on the InMemoryUploadedFile. ] variable[image] assign[=] call[name[Image].open, parameter[name[img]]] call[name[image].resize, parameter[tuple[[<ast.Constant object at 0x7da1b16e2e30>, <ast.Constant object at 0x7da1b16e3550>]], name[Image].ANTIALIAS]] call[name[image].save, parameter[name[img].path]] <ast.Try object at 0x7da1b16e3be0> call[call[name[super], parameter[name[ContentImage], name[self]]].save, parameter[<ast.Starred object at 0x7da1b16e2c20>]]
keyword[def] identifier[save] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[img] = identifier[self] . identifier[image] keyword[try] : identifier[old_self] = identifier[self] . identifier[__class__] . identifier[objects] . identifier[get] ( identifier[id] = identifier[self] . identifier[id] ) keyword[except] identifier[ObjectDoesNotExist] : identifier[old_self] = keyword[None] keyword[if] identifier[self] . identifier[id] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[thumb] keyword[is] keyword[None] keyword[or] ( identifier[old_self] . identifier[image] != identifier[img] ): keyword[try] : identifier[height] = identifier[img] . identifier[height] identifier[width] = identifier[img] . identifier[width] keyword[except] identifier[Exception] keyword[as] identifier[error] : keyword[return] keyword[if] identifier[height] >= identifier[width] : identifier[self] . identifier[is_vertical] = keyword[True] keyword[if] identifier[width] > literal[int] keyword[or] identifier[height] > literal[int] : literal[string] identifier[image] = identifier[Image] . identifier[open] ( identifier[img] ) identifier[image] . identifier[resize] (( literal[int] , literal[int] ), identifier[Image] . identifier[ANTIALIAS] ) identifier[image] . identifier[save] ( identifier[img] . identifier[path] ) keyword[try] : identifier[ezthumb_field] = identifier[get_thumbnailer] ( identifier[self] . identifier[image] ) identifier[self] . identifier[thumb] = identifier[ezthumb_field] . identifier[get_thumbnail] ({ literal[string] :( literal[int] , literal[int] ), literal[string] : literal[string] }). identifier[url] . identifier[replace] ( literal[string] , literal[string] ) keyword[except] identifier[Exception] keyword[as] identifier[error] : identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[id] , identifier[error] )) identifier[super] ( identifier[ContentImage] , identifier[self] ). identifier[save] (* identifier[args] ,** identifier[kwargs] )
def save(self, *args, **kwargs): """ We want to do dimension checks and/or resizing BEFORE the original image is saved. Note that if we can't get image dimensions, it's considered an invalid image and we return without saving. If the image has changed, sets self.thumb to None, triggering post_save thumbnailer. """ img = self.image # Check if this is an already existing photo try: old_self = self.__class__.objects.get(id=self.id) # depends on [control=['try'], data=[]] except ObjectDoesNotExist: old_self = None # depends on [control=['except'], data=[]] # Run on new and changed images: if self.id is None or self.thumb is None or old_self.image != img: try: height = img.height width = img.width # depends on [control=['try'], data=[]] except Exception as error: # We aren't dealing with a reliable image, so.... #print("Error getting image height or width: {}".format(error)) return # depends on [control=['except'], data=[]] # If image is vertical or square (treated as vertical)... if height >= width: self.is_vertical = True # depends on [control=['if'], data=[]] if width > 900 or height > 1200: "\n The image is larger than we want.\n We're going to downsize it BEFORE it is saved,\n using PIL on the InMemoryUploadedFile.\n " image = Image.open(img) image.resize((900, 1200), Image.ANTIALIAS) image.save(img.path) # depends on [control=['if'], data=[]] try: ezthumb_field = get_thumbnailer(self.image) self.thumb = ezthumb_field.get_thumbnail({'size': (80, 80), 'crop': ',-10'}).url.replace('\\', '/') # depends on [control=['try'], data=[]] except Exception as error: print('Error thumbnailing {}: {}'.format(self.id, error)) # depends on [control=['except'], data=['error']] # depends on [control=['if'], data=[]] super(ContentImage, self).save(*args, **kwargs)
def simple_db_engine(reader=None, srnos=None): """engine that gets values from the simple excel 'db'""" if reader is None: reader = dbreader.Reader() logger.debug("No reader provided. Creating one myself.") info_dict = dict() info_dict["filenames"] = [reader.get_cell_name(srno) for srno in srnos] info_dict["masses"] = [reader.get_mass(srno) for srno in srnos] info_dict["total_masses"] = [reader.get_total_mass(srno) for srno in srnos] info_dict["loadings"] = [reader.get_loading(srno) for srno in srnos] info_dict["fixed"] = [reader.inspect_hd5f_fixed(srno) for srno in srnos] info_dict["labels"] = [reader.get_label(srno) for srno in srnos] info_dict["cell_type"] = [reader.get_cell_type(srno) for srno in srnos] info_dict["raw_file_names"] = [] info_dict["cellpy_file_names"] = [] logger.debug("created info-dict") for key in list(info_dict.keys()): logger.debug("%s: %s" % (key, str(info_dict[key]))) _groups = [reader.get_group(srno) for srno in srnos] logger.debug(">\ngroups: %s" % str(_groups)) groups = helper.fix_groups(_groups) info_dict["groups"] = groups my_timer_start = time.time() filename_cache = [] info_dict = helper.find_files(info_dict, filename_cache) my_timer_end = time.time() if (my_timer_end - my_timer_start) > 5.0: logger.info( "The function _find_files was very slow. " "Save your info_df so you don't have to run it again!" ) info_df = pd.DataFrame(info_dict) info_df = info_df.sort_values(["groups", "filenames"]) info_df = helper.make_unique_groups(info_df) info_df["labels"] = info_df["filenames"].apply(helper.create_labels) info_df.set_index("filenames", inplace=True) return info_df
def function[simple_db_engine, parameter[reader, srnos]]: constant[engine that gets values from the simple excel 'db'] if compare[name[reader] is constant[None]] begin[:] variable[reader] assign[=] call[name[dbreader].Reader, parameter[]] call[name[logger].debug, parameter[constant[No reader provided. Creating one myself.]]] variable[info_dict] assign[=] call[name[dict], parameter[]] call[name[info_dict]][constant[filenames]] assign[=] <ast.ListComp object at 0x7da1b1b9d7b0> call[name[info_dict]][constant[masses]] assign[=] <ast.ListComp object at 0x7da1b1b9cfd0> call[name[info_dict]][constant[total_masses]] assign[=] <ast.ListComp object at 0x7da1b1b9eb30> call[name[info_dict]][constant[loadings]] assign[=] <ast.ListComp object at 0x7da1b1b9c310> call[name[info_dict]][constant[fixed]] assign[=] <ast.ListComp object at 0x7da1b1b9ffa0> call[name[info_dict]][constant[labels]] assign[=] <ast.ListComp object at 0x7da1b1b9d390> call[name[info_dict]][constant[cell_type]] assign[=] <ast.ListComp object at 0x7da1b1a1ef80> call[name[info_dict]][constant[raw_file_names]] assign[=] list[[]] call[name[info_dict]][constant[cellpy_file_names]] assign[=] list[[]] call[name[logger].debug, parameter[constant[created info-dict]]] for taget[name[key]] in starred[call[name[list], parameter[call[name[info_dict].keys, parameter[]]]]] begin[:] call[name[logger].debug, parameter[binary_operation[constant[%s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1b9c7c0>, <ast.Call object at 0x7da1b1b9ce80>]]]]] variable[_groups] assign[=] <ast.ListComp object at 0x7da1b1992bc0> call[name[logger].debug, parameter[binary_operation[constant[> groups: %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[_groups]]]]]] variable[groups] assign[=] call[name[helper].fix_groups, parameter[name[_groups]]] call[name[info_dict]][constant[groups]] assign[=] name[groups] variable[my_timer_start] assign[=] call[name[time].time, parameter[]] variable[filename_cache] assign[=] list[[]] variable[info_dict] assign[=] call[name[helper].find_files, parameter[name[info_dict], name[filename_cache]]] variable[my_timer_end] assign[=] call[name[time].time, parameter[]] if compare[binary_operation[name[my_timer_end] - name[my_timer_start]] greater[>] constant[5.0]] begin[:] call[name[logger].info, parameter[constant[The function _find_files was very slow. Save your info_df so you don't have to run it again!]]] variable[info_df] assign[=] call[name[pd].DataFrame, parameter[name[info_dict]]] variable[info_df] assign[=] call[name[info_df].sort_values, parameter[list[[<ast.Constant object at 0x7da1b191cc40>, <ast.Constant object at 0x7da1b191e920>]]]] variable[info_df] assign[=] call[name[helper].make_unique_groups, parameter[name[info_df]]] call[name[info_df]][constant[labels]] assign[=] call[call[name[info_df]][constant[filenames]].apply, parameter[name[helper].create_labels]] call[name[info_df].set_index, parameter[constant[filenames]]] return[name[info_df]]
keyword[def] identifier[simple_db_engine] ( identifier[reader] = keyword[None] , identifier[srnos] = keyword[None] ): literal[string] keyword[if] identifier[reader] keyword[is] keyword[None] : identifier[reader] = identifier[dbreader] . identifier[Reader] () identifier[logger] . identifier[debug] ( literal[string] ) identifier[info_dict] = identifier[dict] () identifier[info_dict] [ literal[string] ]=[ identifier[reader] . identifier[get_cell_name] ( identifier[srno] ) keyword[for] identifier[srno] keyword[in] identifier[srnos] ] identifier[info_dict] [ literal[string] ]=[ identifier[reader] . identifier[get_mass] ( identifier[srno] ) keyword[for] identifier[srno] keyword[in] identifier[srnos] ] identifier[info_dict] [ literal[string] ]=[ identifier[reader] . identifier[get_total_mass] ( identifier[srno] ) keyword[for] identifier[srno] keyword[in] identifier[srnos] ] identifier[info_dict] [ literal[string] ]=[ identifier[reader] . identifier[get_loading] ( identifier[srno] ) keyword[for] identifier[srno] keyword[in] identifier[srnos] ] identifier[info_dict] [ literal[string] ]=[ identifier[reader] . identifier[inspect_hd5f_fixed] ( identifier[srno] ) keyword[for] identifier[srno] keyword[in] identifier[srnos] ] identifier[info_dict] [ literal[string] ]=[ identifier[reader] . identifier[get_label] ( identifier[srno] ) keyword[for] identifier[srno] keyword[in] identifier[srnos] ] identifier[info_dict] [ literal[string] ]=[ identifier[reader] . identifier[get_cell_type] ( identifier[srno] ) keyword[for] identifier[srno] keyword[in] identifier[srnos] ] identifier[info_dict] [ literal[string] ]=[] identifier[info_dict] [ literal[string] ]=[] identifier[logger] . identifier[debug] ( literal[string] ) keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[info_dict] . identifier[keys] ()): identifier[logger] . identifier[debug] ( literal[string] %( identifier[key] , identifier[str] ( identifier[info_dict] [ identifier[key] ]))) identifier[_groups] =[ identifier[reader] . identifier[get_group] ( identifier[srno] ) keyword[for] identifier[srno] keyword[in] identifier[srnos] ] identifier[logger] . identifier[debug] ( literal[string] % identifier[str] ( identifier[_groups] )) identifier[groups] = identifier[helper] . identifier[fix_groups] ( identifier[_groups] ) identifier[info_dict] [ literal[string] ]= identifier[groups] identifier[my_timer_start] = identifier[time] . identifier[time] () identifier[filename_cache] =[] identifier[info_dict] = identifier[helper] . identifier[find_files] ( identifier[info_dict] , identifier[filename_cache] ) identifier[my_timer_end] = identifier[time] . identifier[time] () keyword[if] ( identifier[my_timer_end] - identifier[my_timer_start] )> literal[int] : identifier[logger] . identifier[info] ( literal[string] literal[string] ) identifier[info_df] = identifier[pd] . identifier[DataFrame] ( identifier[info_dict] ) identifier[info_df] = identifier[info_df] . identifier[sort_values] ([ literal[string] , literal[string] ]) identifier[info_df] = identifier[helper] . identifier[make_unique_groups] ( identifier[info_df] ) identifier[info_df] [ literal[string] ]= identifier[info_df] [ literal[string] ]. identifier[apply] ( identifier[helper] . identifier[create_labels] ) identifier[info_df] . identifier[set_index] ( literal[string] , identifier[inplace] = keyword[True] ) keyword[return] identifier[info_df]
def simple_db_engine(reader=None, srnos=None): """engine that gets values from the simple excel 'db'""" if reader is None: reader = dbreader.Reader() logger.debug('No reader provided. Creating one myself.') # depends on [control=['if'], data=['reader']] info_dict = dict() info_dict['filenames'] = [reader.get_cell_name(srno) for srno in srnos] info_dict['masses'] = [reader.get_mass(srno) for srno in srnos] info_dict['total_masses'] = [reader.get_total_mass(srno) for srno in srnos] info_dict['loadings'] = [reader.get_loading(srno) for srno in srnos] info_dict['fixed'] = [reader.inspect_hd5f_fixed(srno) for srno in srnos] info_dict['labels'] = [reader.get_label(srno) for srno in srnos] info_dict['cell_type'] = [reader.get_cell_type(srno) for srno in srnos] info_dict['raw_file_names'] = [] info_dict['cellpy_file_names'] = [] logger.debug('created info-dict') for key in list(info_dict.keys()): logger.debug('%s: %s' % (key, str(info_dict[key]))) # depends on [control=['for'], data=['key']] _groups = [reader.get_group(srno) for srno in srnos] logger.debug('>\ngroups: %s' % str(_groups)) groups = helper.fix_groups(_groups) info_dict['groups'] = groups my_timer_start = time.time() filename_cache = [] info_dict = helper.find_files(info_dict, filename_cache) my_timer_end = time.time() if my_timer_end - my_timer_start > 5.0: logger.info("The function _find_files was very slow. Save your info_df so you don't have to run it again!") # depends on [control=['if'], data=[]] info_df = pd.DataFrame(info_dict) info_df = info_df.sort_values(['groups', 'filenames']) info_df = helper.make_unique_groups(info_df) info_df['labels'] = info_df['filenames'].apply(helper.create_labels) info_df.set_index('filenames', inplace=True) return info_df
def is_parsable(url): """Check if the given URL is parsable (make sure it's a valid URL). If it is parsable, also cache it. Args: url (str): The URL to check. Returns: bool: True if parsable, False otherwise. """ try: parsed = urlparse(url) URLHelper.__cache[url] = parsed return True except: return False
def function[is_parsable, parameter[url]]: constant[Check if the given URL is parsable (make sure it's a valid URL). If it is parsable, also cache it. Args: url (str): The URL to check. Returns: bool: True if parsable, False otherwise. ] <ast.Try object at 0x7da18f09eaa0>
keyword[def] identifier[is_parsable] ( identifier[url] ): literal[string] keyword[try] : identifier[parsed] = identifier[urlparse] ( identifier[url] ) identifier[URLHelper] . identifier[__cache] [ identifier[url] ]= identifier[parsed] keyword[return] keyword[True] keyword[except] : keyword[return] keyword[False]
def is_parsable(url): """Check if the given URL is parsable (make sure it's a valid URL). If it is parsable, also cache it. Args: url (str): The URL to check. Returns: bool: True if parsable, False otherwise. """ try: parsed = urlparse(url) URLHelper.__cache[url] = parsed return True # depends on [control=['try'], data=[]] except: return False # depends on [control=['except'], data=[]]
def prune_directory(self): """Delete any objects that can be loaded and are expired according to the current lifetime setting. A file will be deleted if the following conditions are met: - The file extension matches :py:meth:`bucketcache.backends.Backend.file_extension` - The object can be loaded by the configured backend. - The object's expiration date has passed. Returns: File size and number of files deleted. :rtype: :py:class:`~bucketcache.utilities.PrunedFilesInfo` .. note:: For any buckets that share directories, ``prune_directory`` will affect files saved with both, if they use the same backend class. This is not destructive, because only files that have expired according to the lifetime of the original bucket are deleted. """ glob = '*.{ext}'.format(ext=self.backend.file_extension) totalsize = 0 totalnum = 0 for f in self._path.glob(glob): filesize = f.stat().st_size key_hash = f.stem in_cache = key_hash in self._cache try: self._get_obj_from_hash(key_hash) except KeyExpirationError: # File has been deleted by `_get_obj_from_hash` totalsize += filesize totalnum += 1 except KeyInvalidError: pass except Exception: raise else: if not in_cache: del self._cache[key_hash] return PrunedFilesInfo(size=totalsize, num=totalnum)
def function[prune_directory, parameter[self]]: constant[Delete any objects that can be loaded and are expired according to the current lifetime setting. A file will be deleted if the following conditions are met: - The file extension matches :py:meth:`bucketcache.backends.Backend.file_extension` - The object can be loaded by the configured backend. - The object's expiration date has passed. Returns: File size and number of files deleted. :rtype: :py:class:`~bucketcache.utilities.PrunedFilesInfo` .. note:: For any buckets that share directories, ``prune_directory`` will affect files saved with both, if they use the same backend class. This is not destructive, because only files that have expired according to the lifetime of the original bucket are deleted. ] variable[glob] assign[=] call[constant[*.{ext}].format, parameter[]] variable[totalsize] assign[=] constant[0] variable[totalnum] assign[=] constant[0] for taget[name[f]] in starred[call[name[self]._path.glob, parameter[name[glob]]]] begin[:] variable[filesize] assign[=] call[name[f].stat, parameter[]].st_size variable[key_hash] assign[=] name[f].stem variable[in_cache] assign[=] compare[name[key_hash] in name[self]._cache] <ast.Try object at 0x7da18f58e680> return[call[name[PrunedFilesInfo], parameter[]]]
keyword[def] identifier[prune_directory] ( identifier[self] ): literal[string] identifier[glob] = literal[string] . identifier[format] ( identifier[ext] = identifier[self] . identifier[backend] . identifier[file_extension] ) identifier[totalsize] = literal[int] identifier[totalnum] = literal[int] keyword[for] identifier[f] keyword[in] identifier[self] . identifier[_path] . identifier[glob] ( identifier[glob] ): identifier[filesize] = identifier[f] . identifier[stat] (). identifier[st_size] identifier[key_hash] = identifier[f] . identifier[stem] identifier[in_cache] = identifier[key_hash] keyword[in] identifier[self] . identifier[_cache] keyword[try] : identifier[self] . identifier[_get_obj_from_hash] ( identifier[key_hash] ) keyword[except] identifier[KeyExpirationError] : identifier[totalsize] += identifier[filesize] identifier[totalnum] += literal[int] keyword[except] identifier[KeyInvalidError] : keyword[pass] keyword[except] identifier[Exception] : keyword[raise] keyword[else] : keyword[if] keyword[not] identifier[in_cache] : keyword[del] identifier[self] . identifier[_cache] [ identifier[key_hash] ] keyword[return] identifier[PrunedFilesInfo] ( identifier[size] = identifier[totalsize] , identifier[num] = identifier[totalnum] )
def prune_directory(self): """Delete any objects that can be loaded and are expired according to the current lifetime setting. A file will be deleted if the following conditions are met: - The file extension matches :py:meth:`bucketcache.backends.Backend.file_extension` - The object can be loaded by the configured backend. - The object's expiration date has passed. Returns: File size and number of files deleted. :rtype: :py:class:`~bucketcache.utilities.PrunedFilesInfo` .. note:: For any buckets that share directories, ``prune_directory`` will affect files saved with both, if they use the same backend class. This is not destructive, because only files that have expired according to the lifetime of the original bucket are deleted. """ glob = '*.{ext}'.format(ext=self.backend.file_extension) totalsize = 0 totalnum = 0 for f in self._path.glob(glob): filesize = f.stat().st_size key_hash = f.stem in_cache = key_hash in self._cache try: self._get_obj_from_hash(key_hash) # depends on [control=['try'], data=[]] except KeyExpirationError: # File has been deleted by `_get_obj_from_hash` totalsize += filesize totalnum += 1 # depends on [control=['except'], data=[]] except KeyInvalidError: pass # depends on [control=['except'], data=[]] except Exception: raise # depends on [control=['except'], data=[]] else: if not in_cache: del self._cache[key_hash] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] return PrunedFilesInfo(size=totalsize, num=totalnum)
def list_vpnservices(self, retrieve_all=True, **_params): """Fetches a list of all configured VPN services for a project.""" return self.list('vpnservices', self.vpnservices_path, retrieve_all, **_params)
def function[list_vpnservices, parameter[self, retrieve_all]]: constant[Fetches a list of all configured VPN services for a project.] return[call[name[self].list, parameter[constant[vpnservices], name[self].vpnservices_path, name[retrieve_all]]]]
keyword[def] identifier[list_vpnservices] ( identifier[self] , identifier[retrieve_all] = keyword[True] ,** identifier[_params] ): literal[string] keyword[return] identifier[self] . identifier[list] ( literal[string] , identifier[self] . identifier[vpnservices_path] , identifier[retrieve_all] , ** identifier[_params] )
def list_vpnservices(self, retrieve_all=True, **_params): """Fetches a list of all configured VPN services for a project.""" return self.list('vpnservices', self.vpnservices_path, retrieve_all, **_params)
def reboot(vm_name, call=None): ''' Call GCE 'reset' on the instance. CLI Example: .. code-block:: bash salt-cloud -a reboot myinstance ''' if call != 'action': raise SaltCloudSystemExit( 'The reboot action must be called with -a or --action.' ) conn = get_conn() __utils__['cloud.fire_event']( 'event', 'reboot instance', 'salt/cloud/{0}/rebooting'.format(vm_name), args={'name': vm_name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) result = conn.reboot_node( conn.ex_get_node(vm_name) ) __utils__['cloud.fire_event']( 'event', 'reboot instance', 'salt/cloud/{0}/rebooted'.format(vm_name), args={'name': vm_name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return result
def function[reboot, parameter[vm_name, call]]: constant[ Call GCE 'reset' on the instance. CLI Example: .. code-block:: bash salt-cloud -a reboot myinstance ] if compare[name[call] not_equal[!=] constant[action]] begin[:] <ast.Raise object at 0x7da1b21845b0> variable[conn] assign[=] call[name[get_conn], parameter[]] call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[reboot instance], call[constant[salt/cloud/{0}/rebooting].format, parameter[name[vm_name]]]]] variable[result] assign[=] call[name[conn].reboot_node, parameter[call[name[conn].ex_get_node, parameter[name[vm_name]]]]] call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[reboot instance], call[constant[salt/cloud/{0}/rebooted].format, parameter[name[vm_name]]]]] return[name[result]]
keyword[def] identifier[reboot] ( identifier[vm_name] , identifier[call] = keyword[None] ): literal[string] keyword[if] identifier[call] != literal[string] : keyword[raise] identifier[SaltCloudSystemExit] ( literal[string] ) identifier[conn] = identifier[get_conn] () identifier[__utils__] [ literal[string] ]( literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[vm_name] ), identifier[args] ={ literal[string] : identifier[vm_name] }, identifier[sock_dir] = identifier[__opts__] [ literal[string] ], identifier[transport] = identifier[__opts__] [ literal[string] ] ) identifier[result] = identifier[conn] . identifier[reboot_node] ( identifier[conn] . identifier[ex_get_node] ( identifier[vm_name] ) ) identifier[__utils__] [ literal[string] ]( literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[vm_name] ), identifier[args] ={ literal[string] : identifier[vm_name] }, identifier[sock_dir] = identifier[__opts__] [ literal[string] ], identifier[transport] = identifier[__opts__] [ literal[string] ] ) keyword[return] identifier[result]
def reboot(vm_name, call=None): """ Call GCE 'reset' on the instance. CLI Example: .. code-block:: bash salt-cloud -a reboot myinstance """ if call != 'action': raise SaltCloudSystemExit('The reboot action must be called with -a or --action.') # depends on [control=['if'], data=[]] conn = get_conn() __utils__['cloud.fire_event']('event', 'reboot instance', 'salt/cloud/{0}/rebooting'.format(vm_name), args={'name': vm_name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) result = conn.reboot_node(conn.ex_get_node(vm_name)) __utils__['cloud.fire_event']('event', 'reboot instance', 'salt/cloud/{0}/rebooted'.format(vm_name), args={'name': vm_name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) return result
def kpath_from_seekpath(cls, seekpath, point_coords): r"""Convert seekpath-formatted kpoints path to sumo-preferred format. If 'GAMMA' is used as a label this will be replaced by '\Gamma'. Args: seekpath (list): A :obj:`list` of 2-tuples containing the labels at each side of each segment of the k-point path:: [(A, B), (B, C), (C, D), ...] where a break in the sequence is indicated by a non-repeating label. E.g.:: [(A, B), (B, C), (D, E), ...] for a break between C and D. point_coords (dict): Dict of coordinates corresponding to k-point labels:: {'GAMMA': [0., 0., 0.], ...} Returns: dict: The path and k-points as:: { 'path', [[l1, l2, l3], [l4, l5], ...], 'kpoints', {l1: [a1, b1, c1], l2: [a2, b2, c2], ...} } """ # convert from seekpath format e.g. [(l1, l2), (l2, l3), (l4, l5)] # to our preferred representation [[l1, l2, l3], [l4, l5]] path = [[seekpath[0][0]]] for (k1, k2) in seekpath: if path[-1] and path[-1][-1] == k1: path[-1].append(k2) else: path.append([k1, k2]) # Rebuild kpoints dictionary skipping any positions not on path # (chain(*list) flattens nested list; set() removes duplicates.) kpoints = {p: point_coords[p] for p in set(chain(*path))} # Every path should include Gamma-point. Change the label to \Gamma assert 'GAMMA' in kpoints kpoints[r'\Gamma'] = kpoints.pop('GAMMA') path = [[label.replace('GAMMA', r'\Gamma') for label in subpath] for subpath in path] return {'kpoints': kpoints, 'path': path}
def function[kpath_from_seekpath, parameter[cls, seekpath, point_coords]]: constant[Convert seekpath-formatted kpoints path to sumo-preferred format. If 'GAMMA' is used as a label this will be replaced by '\Gamma'. Args: seekpath (list): A :obj:`list` of 2-tuples containing the labels at each side of each segment of the k-point path:: [(A, B), (B, C), (C, D), ...] where a break in the sequence is indicated by a non-repeating label. E.g.:: [(A, B), (B, C), (D, E), ...] for a break between C and D. point_coords (dict): Dict of coordinates corresponding to k-point labels:: {'GAMMA': [0., 0., 0.], ...} Returns: dict: The path and k-points as:: { 'path', [[l1, l2, l3], [l4, l5], ...], 'kpoints', {l1: [a1, b1, c1], l2: [a2, b2, c2], ...} } ] variable[path] assign[=] list[[<ast.List object at 0x7da1b24b2e30>]] for taget[tuple[[<ast.Name object at 0x7da1b24b12a0>, <ast.Name object at 0x7da1b24b1570>]]] in starred[name[seekpath]] begin[:] if <ast.BoolOp object at 0x7da1b24b1840> begin[:] call[call[name[path]][<ast.UnaryOp object at 0x7da1b24b0a90>].append, parameter[name[k2]]] variable[kpoints] assign[=] <ast.DictComp object at 0x7da1b24b11e0> assert[compare[constant[GAMMA] in name[kpoints]]] call[name[kpoints]][constant[\Gamma]] assign[=] call[name[kpoints].pop, parameter[constant[GAMMA]]] variable[path] assign[=] <ast.ListComp object at 0x7da1b24b35e0> return[dictionary[[<ast.Constant object at 0x7da1b24b2260>, <ast.Constant object at 0x7da1b24b2500>], [<ast.Name object at 0x7da1b24b21a0>, <ast.Name object at 0x7da1b24b2200>]]]
keyword[def] identifier[kpath_from_seekpath] ( identifier[cls] , identifier[seekpath] , identifier[point_coords] ): literal[string] identifier[path] =[[ identifier[seekpath] [ literal[int] ][ literal[int] ]]] keyword[for] ( identifier[k1] , identifier[k2] ) keyword[in] identifier[seekpath] : keyword[if] identifier[path] [- literal[int] ] keyword[and] identifier[path] [- literal[int] ][- literal[int] ]== identifier[k1] : identifier[path] [- literal[int] ]. identifier[append] ( identifier[k2] ) keyword[else] : identifier[path] . identifier[append] ([ identifier[k1] , identifier[k2] ]) identifier[kpoints] ={ identifier[p] : identifier[point_coords] [ identifier[p] ] keyword[for] identifier[p] keyword[in] identifier[set] ( identifier[chain] (* identifier[path] ))} keyword[assert] literal[string] keyword[in] identifier[kpoints] identifier[kpoints] [ literal[string] ]= identifier[kpoints] . identifier[pop] ( literal[string] ) identifier[path] =[[ identifier[label] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[label] keyword[in] identifier[subpath] ] keyword[for] identifier[subpath] keyword[in] identifier[path] ] keyword[return] { literal[string] : identifier[kpoints] , literal[string] : identifier[path] }
def kpath_from_seekpath(cls, seekpath, point_coords): """Convert seekpath-formatted kpoints path to sumo-preferred format. If 'GAMMA' is used as a label this will be replaced by '\\Gamma'. Args: seekpath (list): A :obj:`list` of 2-tuples containing the labels at each side of each segment of the k-point path:: [(A, B), (B, C), (C, D), ...] where a break in the sequence is indicated by a non-repeating label. E.g.:: [(A, B), (B, C), (D, E), ...] for a break between C and D. point_coords (dict): Dict of coordinates corresponding to k-point labels:: {'GAMMA': [0., 0., 0.], ...} Returns: dict: The path and k-points as:: { 'path', [[l1, l2, l3], [l4, l5], ...], 'kpoints', {l1: [a1, b1, c1], l2: [a2, b2, c2], ...} } """ # convert from seekpath format e.g. [(l1, l2), (l2, l3), (l4, l5)] # to our preferred representation [[l1, l2, l3], [l4, l5]] path = [[seekpath[0][0]]] for (k1, k2) in seekpath: if path[-1] and path[-1][-1] == k1: path[-1].append(k2) # depends on [control=['if'], data=[]] else: path.append([k1, k2]) # depends on [control=['for'], data=[]] # Rebuild kpoints dictionary skipping any positions not on path # (chain(*list) flattens nested list; set() removes duplicates.) kpoints = {p: point_coords[p] for p in set(chain(*path))} # Every path should include Gamma-point. Change the label to \Gamma assert 'GAMMA' in kpoints kpoints['\\Gamma'] = kpoints.pop('GAMMA') path = [[label.replace('GAMMA', '\\Gamma') for label in subpath] for subpath in path] return {'kpoints': kpoints, 'path': path}
def _define_array_view(data_type): """Define a new view object for a `Array` type.""" element_type = data_type.element_type element_view = _resolve_view(element_type) if element_view is None: mixins = (_DirectArrayViewMixin,) attributes = _get_mixin_attributes(mixins) elif isinstance(element_type, _ATOMIC): mixins = (_IndirectAtomicArrayViewMixin,) attributes = _get_mixin_attributes(mixins) attributes.update({ '_element_view': element_view, }) else: mixins = (_IndirectCompositeArrayViewMixin,) attributes = _get_mixin_attributes(mixins) attributes.update({ '_element_view': element_view, }) name = data_type.name if data_type.name else 'ArrayView' return type(name, (), attributes)
def function[_define_array_view, parameter[data_type]]: constant[Define a new view object for a `Array` type.] variable[element_type] assign[=] name[data_type].element_type variable[element_view] assign[=] call[name[_resolve_view], parameter[name[element_type]]] if compare[name[element_view] is constant[None]] begin[:] variable[mixins] assign[=] tuple[[<ast.Name object at 0x7da18dc9b550>]] variable[attributes] assign[=] call[name[_get_mixin_attributes], parameter[name[mixins]]] variable[name] assign[=] <ast.IfExp object at 0x7da18dc986d0> return[call[name[type], parameter[name[name], tuple[[]], name[attributes]]]]
keyword[def] identifier[_define_array_view] ( identifier[data_type] ): literal[string] identifier[element_type] = identifier[data_type] . identifier[element_type] identifier[element_view] = identifier[_resolve_view] ( identifier[element_type] ) keyword[if] identifier[element_view] keyword[is] keyword[None] : identifier[mixins] =( identifier[_DirectArrayViewMixin] ,) identifier[attributes] = identifier[_get_mixin_attributes] ( identifier[mixins] ) keyword[elif] identifier[isinstance] ( identifier[element_type] , identifier[_ATOMIC] ): identifier[mixins] =( identifier[_IndirectAtomicArrayViewMixin] ,) identifier[attributes] = identifier[_get_mixin_attributes] ( identifier[mixins] ) identifier[attributes] . identifier[update] ({ literal[string] : identifier[element_view] , }) keyword[else] : identifier[mixins] =( identifier[_IndirectCompositeArrayViewMixin] ,) identifier[attributes] = identifier[_get_mixin_attributes] ( identifier[mixins] ) identifier[attributes] . identifier[update] ({ literal[string] : identifier[element_view] , }) identifier[name] = identifier[data_type] . identifier[name] keyword[if] identifier[data_type] . identifier[name] keyword[else] literal[string] keyword[return] identifier[type] ( identifier[name] ,(), identifier[attributes] )
def _define_array_view(data_type): """Define a new view object for a `Array` type.""" element_type = data_type.element_type element_view = _resolve_view(element_type) if element_view is None: mixins = (_DirectArrayViewMixin,) attributes = _get_mixin_attributes(mixins) # depends on [control=['if'], data=[]] elif isinstance(element_type, _ATOMIC): mixins = (_IndirectAtomicArrayViewMixin,) attributes = _get_mixin_attributes(mixins) attributes.update({'_element_view': element_view}) # depends on [control=['if'], data=[]] else: mixins = (_IndirectCompositeArrayViewMixin,) attributes = _get_mixin_attributes(mixins) attributes.update({'_element_view': element_view}) name = data_type.name if data_type.name else 'ArrayView' return type(name, (), attributes)
def derivatives(self, x, y, Rs, theta_Rs, r_core, center_x=0, center_y=0): """ deflection angles :param x: x coordinate :param y: y coordinate :param Rs: scale radius :param rho0: central core density :param r_core: core radius :param center_x: :param center_y: :return: """ rho0 = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs, r_core=r_core) if Rs < 0.0000001: Rs = 0.0000001 x_ = x - center_x y_ = y - center_y R = np.sqrt(x_ ** 2 + y_ ** 2) dx, dy = self.coreBurkAlpha(R, Rs, rho0, r_core, x_, y_) return dx, dy
def function[derivatives, parameter[self, x, y, Rs, theta_Rs, r_core, center_x, center_y]]: constant[ deflection angles :param x: x coordinate :param y: y coordinate :param Rs: scale radius :param rho0: central core density :param r_core: core radius :param center_x: :param center_y: :return: ] variable[rho0] assign[=] call[name[self]._alpha2rho0, parameter[]] if compare[name[Rs] less[<] constant[1e-07]] begin[:] variable[Rs] assign[=] constant[1e-07] variable[x_] assign[=] binary_operation[name[x] - name[center_x]] variable[y_] assign[=] binary_operation[name[y] - name[center_y]] variable[R] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[x_] ** constant[2]] + binary_operation[name[y_] ** constant[2]]]]] <ast.Tuple object at 0x7da204622ce0> assign[=] call[name[self].coreBurkAlpha, parameter[name[R], name[Rs], name[rho0], name[r_core], name[x_], name[y_]]] return[tuple[[<ast.Name object at 0x7da204621ab0>, <ast.Name object at 0x7da204623a00>]]]
keyword[def] identifier[derivatives] ( identifier[self] , identifier[x] , identifier[y] , identifier[Rs] , identifier[theta_Rs] , identifier[r_core] , identifier[center_x] = literal[int] , identifier[center_y] = literal[int] ): literal[string] identifier[rho0] = identifier[self] . identifier[_alpha2rho0] ( identifier[theta_Rs] = identifier[theta_Rs] , identifier[Rs] = identifier[Rs] , identifier[r_core] = identifier[r_core] ) keyword[if] identifier[Rs] < literal[int] : identifier[Rs] = literal[int] identifier[x_] = identifier[x] - identifier[center_x] identifier[y_] = identifier[y] - identifier[center_y] identifier[R] = identifier[np] . identifier[sqrt] ( identifier[x_] ** literal[int] + identifier[y_] ** literal[int] ) identifier[dx] , identifier[dy] = identifier[self] . identifier[coreBurkAlpha] ( identifier[R] , identifier[Rs] , identifier[rho0] , identifier[r_core] , identifier[x_] , identifier[y_] ) keyword[return] identifier[dx] , identifier[dy]
def derivatives(self, x, y, Rs, theta_Rs, r_core, center_x=0, center_y=0): """ deflection angles :param x: x coordinate :param y: y coordinate :param Rs: scale radius :param rho0: central core density :param r_core: core radius :param center_x: :param center_y: :return: """ rho0 = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs, r_core=r_core) if Rs < 1e-07: Rs = 1e-07 # depends on [control=['if'], data=['Rs']] x_ = x - center_x y_ = y - center_y R = np.sqrt(x_ ** 2 + y_ ** 2) (dx, dy) = self.coreBurkAlpha(R, Rs, rho0, r_core, x_, y_) return (dx, dy)
def set_empty_region(self, region_id, type_id, generated_at, error_if_entries_present=True): """ Prepares for the given region+item combo by instantiating a :py:class:`HistoryItemsInRegionList` instance, which will track region ID, type ID, and generated time. This is mostly used for the JSON deserialization process in case there are no orders for the given region+item combo. :param int region_id: The region ID. :param int type_id: The item's type ID. :param datetime.datetime generated_at: The time that the order set was generated. :keyword bool error_if_entries_present: If True, raise an exception if an entry already exists for this item+region combo when this is called. This failsafe may be disabled by passing False here. """ key = '%s_%s' % (region_id, type_id) if error_if_entries_present and self._history.has_key(key): raise ItemAlreadyPresentError( "Orders already exist for the given region and type ID. " "Pass error_if_orders_present=False to disable this failsafe, " "if desired." ) self._history[key] = HistoryItemsInRegionList( region_id, type_id, generated_at)
def function[set_empty_region, parameter[self, region_id, type_id, generated_at, error_if_entries_present]]: constant[ Prepares for the given region+item combo by instantiating a :py:class:`HistoryItemsInRegionList` instance, which will track region ID, type ID, and generated time. This is mostly used for the JSON deserialization process in case there are no orders for the given region+item combo. :param int region_id: The region ID. :param int type_id: The item's type ID. :param datetime.datetime generated_at: The time that the order set was generated. :keyword bool error_if_entries_present: If True, raise an exception if an entry already exists for this item+region combo when this is called. This failsafe may be disabled by passing False here. ] variable[key] assign[=] binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f810c10>, <ast.Name object at 0x7da18f8108b0>]]] if <ast.BoolOp object at 0x7da18f811420> begin[:] <ast.Raise object at 0x7da18f8116c0> call[name[self]._history][name[key]] assign[=] call[name[HistoryItemsInRegionList], parameter[name[region_id], name[type_id], name[generated_at]]]
keyword[def] identifier[set_empty_region] ( identifier[self] , identifier[region_id] , identifier[type_id] , identifier[generated_at] , identifier[error_if_entries_present] = keyword[True] ): literal[string] identifier[key] = literal[string] %( identifier[region_id] , identifier[type_id] ) keyword[if] identifier[error_if_entries_present] keyword[and] identifier[self] . identifier[_history] . identifier[has_key] ( identifier[key] ): keyword[raise] identifier[ItemAlreadyPresentError] ( literal[string] literal[string] literal[string] ) identifier[self] . identifier[_history] [ identifier[key] ]= identifier[HistoryItemsInRegionList] ( identifier[region_id] , identifier[type_id] , identifier[generated_at] )
def set_empty_region(self, region_id, type_id, generated_at, error_if_entries_present=True): """ Prepares for the given region+item combo by instantiating a :py:class:`HistoryItemsInRegionList` instance, which will track region ID, type ID, and generated time. This is mostly used for the JSON deserialization process in case there are no orders for the given region+item combo. :param int region_id: The region ID. :param int type_id: The item's type ID. :param datetime.datetime generated_at: The time that the order set was generated. :keyword bool error_if_entries_present: If True, raise an exception if an entry already exists for this item+region combo when this is called. This failsafe may be disabled by passing False here. """ key = '%s_%s' % (region_id, type_id) if error_if_entries_present and self._history.has_key(key): raise ItemAlreadyPresentError('Orders already exist for the given region and type ID. Pass error_if_orders_present=False to disable this failsafe, if desired.') # depends on [control=['if'], data=[]] self._history[key] = HistoryItemsInRegionList(region_id, type_id, generated_at)
def exhaustive_ontology_ilx_diff_row_only( self, ontology_row: dict ) -> dict: ''' WARNING RUNTIME IS AWEFUL ''' results = [] header = ['Index'] + list(self.existing_ids.columns) for row in self.existing_ids.itertuples(): row = {header[i]:val for i, val in enumerate(row)} check_list = [ { 'external_ontology_row': ontology_row, 'ilx_rows': [row], }, ] # First layer for each external row. Second is for each potential ilx row. It's simple here 1-1. result = self.__exhaustive_diff(check_list)[0][0] if result['same']: results.append(result) return results
def function[exhaustive_ontology_ilx_diff_row_only, parameter[self, ontology_row]]: constant[ WARNING RUNTIME IS AWEFUL ] variable[results] assign[=] list[[]] variable[header] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b1ad4790>]] + call[name[list], parameter[name[self].existing_ids.columns]]] for taget[name[row]] in starred[call[name[self].existing_ids.itertuples, parameter[]]] begin[:] variable[row] assign[=] <ast.DictComp object at 0x7da1b1ad44f0> variable[check_list] assign[=] list[[<ast.Dict object at 0x7da1b1ad41f0>]] variable[result] assign[=] call[call[call[name[self].__exhaustive_diff, parameter[name[check_list]]]][constant[0]]][constant[0]] if call[name[result]][constant[same]] begin[:] call[name[results].append, parameter[name[result]]] return[name[results]]
keyword[def] identifier[exhaustive_ontology_ilx_diff_row_only] ( identifier[self] , identifier[ontology_row] : identifier[dict] )-> identifier[dict] : literal[string] identifier[results] =[] identifier[header] =[ literal[string] ]+ identifier[list] ( identifier[self] . identifier[existing_ids] . identifier[columns] ) keyword[for] identifier[row] keyword[in] identifier[self] . identifier[existing_ids] . identifier[itertuples] (): identifier[row] ={ identifier[header] [ identifier[i] ]: identifier[val] keyword[for] identifier[i] , identifier[val] keyword[in] identifier[enumerate] ( identifier[row] )} identifier[check_list] =[ { literal[string] : identifier[ontology_row] , literal[string] :[ identifier[row] ], }, ] identifier[result] = identifier[self] . identifier[__exhaustive_diff] ( identifier[check_list] )[ literal[int] ][ literal[int] ] keyword[if] identifier[result] [ literal[string] ]: identifier[results] . identifier[append] ( identifier[result] ) keyword[return] identifier[results]
def exhaustive_ontology_ilx_diff_row_only(self, ontology_row: dict) -> dict: """ WARNING RUNTIME IS AWEFUL """ results = [] header = ['Index'] + list(self.existing_ids.columns) for row in self.existing_ids.itertuples(): row = {header[i]: val for (i, val) in enumerate(row)} check_list = [{'external_ontology_row': ontology_row, 'ilx_rows': [row]}] # First layer for each external row. Second is for each potential ilx row. It's simple here 1-1. result = self.__exhaustive_diff(check_list)[0][0] if result['same']: results.append(result) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']] return results
def checksum(data): """ :return: int """ assert isinstance(data, bytes) assert len(data) >= MINIMUM_MESSAGE_SIZE - 2 assert len(data) <= MAXIMUM_MESSAGE_SIZE - 2 __checksum = 0 for data_byte in data: __checksum += data_byte __checksum = -(__checksum % 256) + 256 try: __checksum = bytes([__checksum]) except ValueError: __checksum = bytes([0]) return __checksum
def function[checksum, parameter[data]]: constant[ :return: int ] assert[call[name[isinstance], parameter[name[data], name[bytes]]]] assert[compare[call[name[len], parameter[name[data]]] greater_or_equal[>=] binary_operation[name[MINIMUM_MESSAGE_SIZE] - constant[2]]]] assert[compare[call[name[len], parameter[name[data]]] less_or_equal[<=] binary_operation[name[MAXIMUM_MESSAGE_SIZE] - constant[2]]]] variable[__checksum] assign[=] constant[0] for taget[name[data_byte]] in starred[name[data]] begin[:] <ast.AugAssign object at 0x7da20c6e63b0> variable[__checksum] assign[=] binary_operation[<ast.UnaryOp object at 0x7da20c6e6e00> + constant[256]] <ast.Try object at 0x7da20c6e4370> return[name[__checksum]]
keyword[def] identifier[checksum] ( identifier[data] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[data] , identifier[bytes] ) keyword[assert] identifier[len] ( identifier[data] )>= identifier[MINIMUM_MESSAGE_SIZE] - literal[int] keyword[assert] identifier[len] ( identifier[data] )<= identifier[MAXIMUM_MESSAGE_SIZE] - literal[int] identifier[__checksum] = literal[int] keyword[for] identifier[data_byte] keyword[in] identifier[data] : identifier[__checksum] += identifier[data_byte] identifier[__checksum] =-( identifier[__checksum] % literal[int] )+ literal[int] keyword[try] : identifier[__checksum] = identifier[bytes] ([ identifier[__checksum] ]) keyword[except] identifier[ValueError] : identifier[__checksum] = identifier[bytes] ([ literal[int] ]) keyword[return] identifier[__checksum]
def checksum(data): """ :return: int """ assert isinstance(data, bytes) assert len(data) >= MINIMUM_MESSAGE_SIZE - 2 assert len(data) <= MAXIMUM_MESSAGE_SIZE - 2 __checksum = 0 for data_byte in data: __checksum += data_byte # depends on [control=['for'], data=['data_byte']] __checksum = -(__checksum % 256) + 256 try: __checksum = bytes([__checksum]) # depends on [control=['try'], data=[]] except ValueError: __checksum = bytes([0]) # depends on [control=['except'], data=[]] return __checksum
def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) for t in types: if t in registry: return registry[t]
def function[_find_adapter, parameter[registry, ob]]: constant[Return an adapter factory for `ob` from `registry`] variable[types] assign[=] call[name[_always_object], parameter[call[name[inspect].getmro, parameter[call[name[getattr], parameter[name[ob], constant[__class__], call[name[type], parameter[name[ob]]]]]]]]] for taget[name[t]] in starred[name[types]] begin[:] if compare[name[t] in name[registry]] begin[:] return[call[name[registry]][name[t]]]
keyword[def] identifier[_find_adapter] ( identifier[registry] , identifier[ob] ): literal[string] identifier[types] = identifier[_always_object] ( identifier[inspect] . identifier[getmro] ( identifier[getattr] ( identifier[ob] , literal[string] , identifier[type] ( identifier[ob] )))) keyword[for] identifier[t] keyword[in] identifier[types] : keyword[if] identifier[t] keyword[in] identifier[registry] : keyword[return] identifier[registry] [ identifier[t] ]
def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) for t in types: if t in registry: return registry[t] # depends on [control=['if'], data=['t', 'registry']] # depends on [control=['for'], data=['t']]
def delete_tag(self, tags): """Delete tags for a Point in the language you specify. Case will be ignored and any tags matching lower-cased will be deleted. Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `tags` (mandatory) (list) - the list of tags you want to delete from your Point, e.g. ["garden", "soil"] """ if isinstance(tags, str): tags = [tags] evt = self._client._request_point_tag_update(self._type, self.__lid, self.__pid, tags, delete=True) self._client._wait_and_except_if_failed(evt)
def function[delete_tag, parameter[self, tags]]: constant[Delete tags for a Point in the language you specify. Case will be ignored and any tags matching lower-cased will be deleted. Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `tags` (mandatory) (list) - the list of tags you want to delete from your Point, e.g. ["garden", "soil"] ] if call[name[isinstance], parameter[name[tags], name[str]]] begin[:] variable[tags] assign[=] list[[<ast.Name object at 0x7da1b1baaf80>]] variable[evt] assign[=] call[name[self]._client._request_point_tag_update, parameter[name[self]._type, name[self].__lid, name[self].__pid, name[tags]]] call[name[self]._client._wait_and_except_if_failed, parameter[name[evt]]]
keyword[def] identifier[delete_tag] ( identifier[self] , identifier[tags] ): literal[string] keyword[if] identifier[isinstance] ( identifier[tags] , identifier[str] ): identifier[tags] =[ identifier[tags] ] identifier[evt] = identifier[self] . identifier[_client] . identifier[_request_point_tag_update] ( identifier[self] . identifier[_type] , identifier[self] . identifier[__lid] , identifier[self] . identifier[__pid] , identifier[tags] , identifier[delete] = keyword[True] ) identifier[self] . identifier[_client] . identifier[_wait_and_except_if_failed] ( identifier[evt] )
def delete_tag(self, tags): """Delete tags for a Point in the language you specify. Case will be ignored and any tags matching lower-cased will be deleted. Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `tags` (mandatory) (list) - the list of tags you want to delete from your Point, e.g. ["garden", "soil"] """ if isinstance(tags, str): tags = [tags] # depends on [control=['if'], data=[]] evt = self._client._request_point_tag_update(self._type, self.__lid, self.__pid, tags, delete=True) self._client._wait_and_except_if_failed(evt)
def symmetric_elliot_function( signal, derivative=False ): """ A fast approximation of tanh """ s = 1.0 # steepness abs_signal = (1 + np.abs(signal * s)) if derivative: return s / abs_signal**2 else: # Return the activation signal return (signal * s) / abs_signal
def function[symmetric_elliot_function, parameter[signal, derivative]]: constant[ A fast approximation of tanh ] variable[s] assign[=] constant[1.0] variable[abs_signal] assign[=] binary_operation[constant[1] + call[name[np].abs, parameter[binary_operation[name[signal] * name[s]]]]] if name[derivative] begin[:] return[binary_operation[name[s] / binary_operation[name[abs_signal] ** constant[2]]]]
keyword[def] identifier[symmetric_elliot_function] ( identifier[signal] , identifier[derivative] = keyword[False] ): literal[string] identifier[s] = literal[int] identifier[abs_signal] =( literal[int] + identifier[np] . identifier[abs] ( identifier[signal] * identifier[s] )) keyword[if] identifier[derivative] : keyword[return] identifier[s] / identifier[abs_signal] ** literal[int] keyword[else] : keyword[return] ( identifier[signal] * identifier[s] )/ identifier[abs_signal]
def symmetric_elliot_function(signal, derivative=False): """ A fast approximation of tanh """ s = 1.0 # steepness abs_signal = 1 + np.abs(signal * s) if derivative: return s / abs_signal ** 2 # depends on [control=['if'], data=[]] else: # Return the activation signal return signal * s / abs_signal
def from_object(cls, obj): # type: (Any) -> UrlPath """ Attempt to convert any object into a UrlPath. Raise a value error if this is not possible. """ if isinstance(obj, UrlPath): return obj if isinstance(obj, _compat.string_types): return UrlPath.parse(obj) if isinstance(obj, PathParam): return UrlPath(obj) if isinstance(obj, (tuple, list)): return UrlPath(*obj) raise ValueError("Unable to convert object to UrlPath `%r`" % obj)
def function[from_object, parameter[cls, obj]]: constant[ Attempt to convert any object into a UrlPath. Raise a value error if this is not possible. ] if call[name[isinstance], parameter[name[obj], name[UrlPath]]] begin[:] return[name[obj]] if call[name[isinstance], parameter[name[obj], name[_compat].string_types]] begin[:] return[call[name[UrlPath].parse, parameter[name[obj]]]] if call[name[isinstance], parameter[name[obj], name[PathParam]]] begin[:] return[call[name[UrlPath], parameter[name[obj]]]] if call[name[isinstance], parameter[name[obj], tuple[[<ast.Name object at 0x7da20c6abc70>, <ast.Name object at 0x7da20c6a8dc0>]]]] begin[:] return[call[name[UrlPath], parameter[<ast.Starred object at 0x7da20c6aa1a0>]]] <ast.Raise object at 0x7da20c7953c0>
keyword[def] identifier[from_object] ( identifier[cls] , identifier[obj] ): literal[string] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[UrlPath] ): keyword[return] identifier[obj] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[_compat] . identifier[string_types] ): keyword[return] identifier[UrlPath] . identifier[parse] ( identifier[obj] ) keyword[if] identifier[isinstance] ( identifier[obj] , identifier[PathParam] ): keyword[return] identifier[UrlPath] ( identifier[obj] ) keyword[if] identifier[isinstance] ( identifier[obj] ,( identifier[tuple] , identifier[list] )): keyword[return] identifier[UrlPath] (* identifier[obj] ) keyword[raise] identifier[ValueError] ( literal[string] % identifier[obj] )
def from_object(cls, obj): # type: (Any) -> UrlPath '\n Attempt to convert any object into a UrlPath.\n\n Raise a value error if this is not possible.\n ' if isinstance(obj, UrlPath): return obj # depends on [control=['if'], data=[]] if isinstance(obj, _compat.string_types): return UrlPath.parse(obj) # depends on [control=['if'], data=[]] if isinstance(obj, PathParam): return UrlPath(obj) # depends on [control=['if'], data=[]] if isinstance(obj, (tuple, list)): return UrlPath(*obj) # depends on [control=['if'], data=[]] raise ValueError('Unable to convert object to UrlPath `%r`' % obj)
def _indexMib(self): """Rebuild a tree from MIB objects found at currently loaded modules. If currently existing tree is out of date, walk over all Managed Objects and Instances to structure Management Instrumentation objects into a tree of the following layout: MibTree | +----MibScalar | | | +-----MibScalarInstance | +----MibTable | +----MibTableRow | +-------MibTableColumn | +------MibScalarInstance(s) Notes ----- Only Managed Objects (i.e. `OBJECT-TYPE`) get indexed here, various MIB definitions and constants can't be SNMP managed so we drop them. """ if self.lastBuildId == self.mibBuilder.lastBuildId: return (MibScalarInstance, MibScalar, MibTableColumn, MibTableRow, MibTable) = self.mibBuilder.importSymbols( 'SNMPv2-SMI', 'MibScalarInstance', 'MibScalar', 'MibTableColumn', 'MibTableRow', 'MibTable' ) mibTree, = self.mibBuilder.importSymbols('SNMPv2-SMI', 'iso') scalars = {} instances = {} tables = {} rows = {} cols = {} # Sort by module name to give user a chance to slip-in # custom MIB modules (that would be sorted out first) mibSymbols = list(self.mibBuilder.mibSymbols.items()) mibSymbols.sort(key=lambda x: x[0], reverse=True) for modName, mibMod in mibSymbols: for symObj in mibMod.values(): if isinstance(symObj, MibTable): tables[symObj.name] = symObj elif isinstance(symObj, MibTableRow): rows[symObj.name] = symObj elif isinstance(symObj, MibTableColumn): cols[symObj.name] = symObj elif isinstance(symObj, MibScalarInstance): instances[symObj.name] = symObj elif isinstance(symObj, MibScalar): scalars[symObj.name] = symObj # Detach items from each other for symName, parentName in self.lastBuildSyms.items(): if parentName in scalars: scalars[parentName].unregisterSubtrees(symName) elif parentName in cols: cols[parentName].unregisterSubtrees(symName) elif parentName in rows: rows[parentName].unregisterSubtrees(symName) else: mibTree.unregisterSubtrees(symName) lastBuildSyms = {} # Attach Managed Objects Instances to Managed Objects for inst in instances.values(): if inst.typeName in scalars: scalars[inst.typeName].registerSubtrees(inst) elif inst.typeName in cols: cols[inst.typeName].registerSubtrees(inst) else: raise error.SmiError( 'Orphan MIB scalar instance %r at ' '%r' % (inst, self)) lastBuildSyms[inst.name] = inst.typeName # Attach Table Columns to Table Rows for col in cols.values(): rowName = col.name[:-1] # XXX if rowName in rows: rows[rowName].registerSubtrees(col) else: raise error.SmiError( 'Orphan MIB table column %r at ' '%r' % (col, self)) lastBuildSyms[col.name] = rowName # Attach Table Rows to MIB tree for row in rows.values(): mibTree.registerSubtrees(row) lastBuildSyms[row.name] = mibTree.name # Attach Tables to MIB tree for table in tables.values(): mibTree.registerSubtrees(table) lastBuildSyms[table.name] = mibTree.name # Attach Scalars to MIB tree for scalar in scalars.values(): mibTree.registerSubtrees(scalar) lastBuildSyms[scalar.name] = mibTree.name self.lastBuildSyms = lastBuildSyms self.lastBuildId = self.mibBuilder.lastBuildId debug.logger & debug.FLAG_INS and debug.logger('_indexMib: rebuilt')
def function[_indexMib, parameter[self]]: constant[Rebuild a tree from MIB objects found at currently loaded modules. If currently existing tree is out of date, walk over all Managed Objects and Instances to structure Management Instrumentation objects into a tree of the following layout: MibTree | +----MibScalar | | | +-----MibScalarInstance | +----MibTable | +----MibTableRow | +-------MibTableColumn | +------MibScalarInstance(s) Notes ----- Only Managed Objects (i.e. `OBJECT-TYPE`) get indexed here, various MIB definitions and constants can't be SNMP managed so we drop them. ] if compare[name[self].lastBuildId equal[==] name[self].mibBuilder.lastBuildId] begin[:] return[None] <ast.Tuple object at 0x7da2041da4a0> assign[=] call[name[self].mibBuilder.importSymbols, parameter[constant[SNMPv2-SMI], constant[MibScalarInstance], constant[MibScalar], constant[MibTableColumn], constant[MibTableRow], constant[MibTable]]] <ast.Tuple object at 0x7da2041d97e0> assign[=] call[name[self].mibBuilder.importSymbols, parameter[constant[SNMPv2-SMI], constant[iso]]] variable[scalars] assign[=] dictionary[[], []] variable[instances] assign[=] dictionary[[], []] variable[tables] assign[=] dictionary[[], []] variable[rows] assign[=] dictionary[[], []] variable[cols] assign[=] dictionary[[], []] variable[mibSymbols] assign[=] call[name[list], parameter[call[name[self].mibBuilder.mibSymbols.items, parameter[]]]] call[name[mibSymbols].sort, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b155f700>, <ast.Name object at 0x7da1b155d030>]]] in starred[name[mibSymbols]] begin[:] for taget[name[symObj]] in starred[call[name[mibMod].values, parameter[]]] begin[:] if call[name[isinstance], parameter[name[symObj], name[MibTable]]] begin[:] call[name[tables]][name[symObj].name] assign[=] name[symObj] for taget[tuple[[<ast.Name object at 0x7da1b155c100>, <ast.Name object at 0x7da1b155c1f0>]]] in starred[call[name[self].lastBuildSyms.items, parameter[]]] begin[:] if compare[name[parentName] in name[scalars]] begin[:] call[call[name[scalars]][name[parentName]].unregisterSubtrees, parameter[name[symName]]] variable[lastBuildSyms] assign[=] dictionary[[], []] for taget[name[inst]] in starred[call[name[instances].values, parameter[]]] begin[:] if compare[name[inst].typeName in name[scalars]] begin[:] call[call[name[scalars]][name[inst].typeName].registerSubtrees, parameter[name[inst]]] call[name[lastBuildSyms]][name[inst].name] assign[=] name[inst].typeName for taget[name[col]] in starred[call[name[cols].values, parameter[]]] begin[:] variable[rowName] assign[=] call[name[col].name][<ast.Slice object at 0x7da1b17fb340>] if compare[name[rowName] in name[rows]] begin[:] call[call[name[rows]][name[rowName]].registerSubtrees, parameter[name[col]]] call[name[lastBuildSyms]][name[col].name] assign[=] name[rowName] for taget[name[row]] in starred[call[name[rows].values, parameter[]]] begin[:] call[name[mibTree].registerSubtrees, parameter[name[row]]] call[name[lastBuildSyms]][name[row].name] assign[=] name[mibTree].name for taget[name[table]] in starred[call[name[tables].values, parameter[]]] begin[:] call[name[mibTree].registerSubtrees, parameter[name[table]]] call[name[lastBuildSyms]][name[table].name] assign[=] name[mibTree].name for taget[name[scalar]] in starred[call[name[scalars].values, parameter[]]] begin[:] call[name[mibTree].registerSubtrees, parameter[name[scalar]]] call[name[lastBuildSyms]][name[scalar].name] assign[=] name[mibTree].name name[self].lastBuildSyms assign[=] name[lastBuildSyms] name[self].lastBuildId assign[=] name[self].mibBuilder.lastBuildId <ast.BoolOp object at 0x7da1b17fbac0>
keyword[def] identifier[_indexMib] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[lastBuildId] == identifier[self] . identifier[mibBuilder] . identifier[lastBuildId] : keyword[return] ( identifier[MibScalarInstance] , identifier[MibScalar] , identifier[MibTableColumn] , identifier[MibTableRow] , identifier[MibTable] )= identifier[self] . identifier[mibBuilder] . identifier[importSymbols] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ) identifier[mibTree] ,= identifier[self] . identifier[mibBuilder] . identifier[importSymbols] ( literal[string] , literal[string] ) identifier[scalars] ={} identifier[instances] ={} identifier[tables] ={} identifier[rows] ={} identifier[cols] ={} identifier[mibSymbols] = identifier[list] ( identifier[self] . identifier[mibBuilder] . identifier[mibSymbols] . identifier[items] ()) identifier[mibSymbols] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[reverse] = keyword[True] ) keyword[for] identifier[modName] , identifier[mibMod] keyword[in] identifier[mibSymbols] : keyword[for] identifier[symObj] keyword[in] identifier[mibMod] . identifier[values] (): keyword[if] identifier[isinstance] ( identifier[symObj] , identifier[MibTable] ): identifier[tables] [ identifier[symObj] . identifier[name] ]= identifier[symObj] keyword[elif] identifier[isinstance] ( identifier[symObj] , identifier[MibTableRow] ): identifier[rows] [ identifier[symObj] . identifier[name] ]= identifier[symObj] keyword[elif] identifier[isinstance] ( identifier[symObj] , identifier[MibTableColumn] ): identifier[cols] [ identifier[symObj] . identifier[name] ]= identifier[symObj] keyword[elif] identifier[isinstance] ( identifier[symObj] , identifier[MibScalarInstance] ): identifier[instances] [ identifier[symObj] . identifier[name] ]= identifier[symObj] keyword[elif] identifier[isinstance] ( identifier[symObj] , identifier[MibScalar] ): identifier[scalars] [ identifier[symObj] . identifier[name] ]= identifier[symObj] keyword[for] identifier[symName] , identifier[parentName] keyword[in] identifier[self] . identifier[lastBuildSyms] . identifier[items] (): keyword[if] identifier[parentName] keyword[in] identifier[scalars] : identifier[scalars] [ identifier[parentName] ]. identifier[unregisterSubtrees] ( identifier[symName] ) keyword[elif] identifier[parentName] keyword[in] identifier[cols] : identifier[cols] [ identifier[parentName] ]. identifier[unregisterSubtrees] ( identifier[symName] ) keyword[elif] identifier[parentName] keyword[in] identifier[rows] : identifier[rows] [ identifier[parentName] ]. identifier[unregisterSubtrees] ( identifier[symName] ) keyword[else] : identifier[mibTree] . identifier[unregisterSubtrees] ( identifier[symName] ) identifier[lastBuildSyms] ={} keyword[for] identifier[inst] keyword[in] identifier[instances] . identifier[values] (): keyword[if] identifier[inst] . identifier[typeName] keyword[in] identifier[scalars] : identifier[scalars] [ identifier[inst] . identifier[typeName] ]. identifier[registerSubtrees] ( identifier[inst] ) keyword[elif] identifier[inst] . identifier[typeName] keyword[in] identifier[cols] : identifier[cols] [ identifier[inst] . identifier[typeName] ]. identifier[registerSubtrees] ( identifier[inst] ) keyword[else] : keyword[raise] identifier[error] . identifier[SmiError] ( literal[string] literal[string] %( identifier[inst] , identifier[self] )) identifier[lastBuildSyms] [ identifier[inst] . identifier[name] ]= identifier[inst] . identifier[typeName] keyword[for] identifier[col] keyword[in] identifier[cols] . identifier[values] (): identifier[rowName] = identifier[col] . identifier[name] [:- literal[int] ] keyword[if] identifier[rowName] keyword[in] identifier[rows] : identifier[rows] [ identifier[rowName] ]. identifier[registerSubtrees] ( identifier[col] ) keyword[else] : keyword[raise] identifier[error] . identifier[SmiError] ( literal[string] literal[string] %( identifier[col] , identifier[self] )) identifier[lastBuildSyms] [ identifier[col] . identifier[name] ]= identifier[rowName] keyword[for] identifier[row] keyword[in] identifier[rows] . identifier[values] (): identifier[mibTree] . identifier[registerSubtrees] ( identifier[row] ) identifier[lastBuildSyms] [ identifier[row] . identifier[name] ]= identifier[mibTree] . identifier[name] keyword[for] identifier[table] keyword[in] identifier[tables] . identifier[values] (): identifier[mibTree] . identifier[registerSubtrees] ( identifier[table] ) identifier[lastBuildSyms] [ identifier[table] . identifier[name] ]= identifier[mibTree] . identifier[name] keyword[for] identifier[scalar] keyword[in] identifier[scalars] . identifier[values] (): identifier[mibTree] . identifier[registerSubtrees] ( identifier[scalar] ) identifier[lastBuildSyms] [ identifier[scalar] . identifier[name] ]= identifier[mibTree] . identifier[name] identifier[self] . identifier[lastBuildSyms] = identifier[lastBuildSyms] identifier[self] . identifier[lastBuildId] = identifier[self] . identifier[mibBuilder] . identifier[lastBuildId] identifier[debug] . identifier[logger] & identifier[debug] . identifier[FLAG_INS] keyword[and] identifier[debug] . identifier[logger] ( literal[string] )
def _indexMib(self): """Rebuild a tree from MIB objects found at currently loaded modules. If currently existing tree is out of date, walk over all Managed Objects and Instances to structure Management Instrumentation objects into a tree of the following layout: MibTree | +----MibScalar | | | +-----MibScalarInstance | +----MibTable | +----MibTableRow | +-------MibTableColumn | +------MibScalarInstance(s) Notes ----- Only Managed Objects (i.e. `OBJECT-TYPE`) get indexed here, various MIB definitions and constants can't be SNMP managed so we drop them. """ if self.lastBuildId == self.mibBuilder.lastBuildId: return # depends on [control=['if'], data=[]] (MibScalarInstance, MibScalar, MibTableColumn, MibTableRow, MibTable) = self.mibBuilder.importSymbols('SNMPv2-SMI', 'MibScalarInstance', 'MibScalar', 'MibTableColumn', 'MibTableRow', 'MibTable') (mibTree,) = self.mibBuilder.importSymbols('SNMPv2-SMI', 'iso') scalars = {} instances = {} tables = {} rows = {} cols = {} # Sort by module name to give user a chance to slip-in # custom MIB modules (that would be sorted out first) mibSymbols = list(self.mibBuilder.mibSymbols.items()) mibSymbols.sort(key=lambda x: x[0], reverse=True) for (modName, mibMod) in mibSymbols: for symObj in mibMod.values(): if isinstance(symObj, MibTable): tables[symObj.name] = symObj # depends on [control=['if'], data=[]] elif isinstance(symObj, MibTableRow): rows[symObj.name] = symObj # depends on [control=['if'], data=[]] elif isinstance(symObj, MibTableColumn): cols[symObj.name] = symObj # depends on [control=['if'], data=[]] elif isinstance(symObj, MibScalarInstance): instances[symObj.name] = symObj # depends on [control=['if'], data=[]] elif isinstance(symObj, MibScalar): scalars[symObj.name] = symObj # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['symObj']] # depends on [control=['for'], data=[]] # Detach items from each other for (symName, parentName) in self.lastBuildSyms.items(): if parentName in scalars: scalars[parentName].unregisterSubtrees(symName) # depends on [control=['if'], data=['parentName', 'scalars']] elif parentName in cols: cols[parentName].unregisterSubtrees(symName) # depends on [control=['if'], data=['parentName', 'cols']] elif parentName in rows: rows[parentName].unregisterSubtrees(symName) # depends on [control=['if'], data=['parentName', 'rows']] else: mibTree.unregisterSubtrees(symName) # depends on [control=['for'], data=[]] lastBuildSyms = {} # Attach Managed Objects Instances to Managed Objects for inst in instances.values(): if inst.typeName in scalars: scalars[inst.typeName].registerSubtrees(inst) # depends on [control=['if'], data=['scalars']] elif inst.typeName in cols: cols[inst.typeName].registerSubtrees(inst) # depends on [control=['if'], data=['cols']] else: raise error.SmiError('Orphan MIB scalar instance %r at %r' % (inst, self)) lastBuildSyms[inst.name] = inst.typeName # depends on [control=['for'], data=['inst']] # Attach Table Columns to Table Rows for col in cols.values(): rowName = col.name[:-1] # XXX if rowName in rows: rows[rowName].registerSubtrees(col) # depends on [control=['if'], data=['rowName', 'rows']] else: raise error.SmiError('Orphan MIB table column %r at %r' % (col, self)) lastBuildSyms[col.name] = rowName # depends on [control=['for'], data=['col']] # Attach Table Rows to MIB tree for row in rows.values(): mibTree.registerSubtrees(row) lastBuildSyms[row.name] = mibTree.name # depends on [control=['for'], data=['row']] # Attach Tables to MIB tree for table in tables.values(): mibTree.registerSubtrees(table) lastBuildSyms[table.name] = mibTree.name # depends on [control=['for'], data=['table']] # Attach Scalars to MIB tree for scalar in scalars.values(): mibTree.registerSubtrees(scalar) lastBuildSyms[scalar.name] = mibTree.name # depends on [control=['for'], data=['scalar']] self.lastBuildSyms = lastBuildSyms self.lastBuildId = self.mibBuilder.lastBuildId debug.logger & debug.FLAG_INS and debug.logger('_indexMib: rebuilt')
def resource(self, uri, methods=frozenset({'GET'}), host=None, strict_slashes=None, stream=False, version=None, name=None, **kwargs): """ Create a blueprint resource route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :return: function or class instance Accepts any keyword argument that will be passed to the app resource. """ if strict_slashes is None: strict_slashes = self.strict_slashes def decorator(handler): self.resources.append(( FutureRoute(handler, uri, methods, host, strict_slashes, stream, version, name), kwargs)) return handler return decorator
def function[resource, parameter[self, uri, methods, host, strict_slashes, stream, version, name]]: constant[ Create a blueprint resource route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :return: function or class instance Accepts any keyword argument that will be passed to the app resource. ] if compare[name[strict_slashes] is constant[None]] begin[:] variable[strict_slashes] assign[=] name[self].strict_slashes def function[decorator, parameter[handler]]: call[name[self].resources.append, parameter[tuple[[<ast.Call object at 0x7da2044c1780>, <ast.Name object at 0x7da2044c00a0>]]]] return[name[handler]] return[name[decorator]]
keyword[def] identifier[resource] ( identifier[self] , identifier[uri] , identifier[methods] = identifier[frozenset] ({ literal[string] }), identifier[host] = keyword[None] , identifier[strict_slashes] = keyword[None] , identifier[stream] = keyword[False] , identifier[version] = keyword[None] , identifier[name] = keyword[None] , ** identifier[kwargs] ): literal[string] keyword[if] identifier[strict_slashes] keyword[is] keyword[None] : identifier[strict_slashes] = identifier[self] . identifier[strict_slashes] keyword[def] identifier[decorator] ( identifier[handler] ): identifier[self] . identifier[resources] . identifier[append] (( identifier[FutureRoute] ( identifier[handler] , identifier[uri] , identifier[methods] , identifier[host] , identifier[strict_slashes] , identifier[stream] , identifier[version] , identifier[name] ), identifier[kwargs] )) keyword[return] identifier[handler] keyword[return] identifier[decorator]
def resource(self, uri, methods=frozenset({'GET'}), host=None, strict_slashes=None, stream=False, version=None, name=None, **kwargs): """ Create a blueprint resource route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :return: function or class instance Accepts any keyword argument that will be passed to the app resource. """ if strict_slashes is None: strict_slashes = self.strict_slashes # depends on [control=['if'], data=['strict_slashes']] def decorator(handler): self.resources.append((FutureRoute(handler, uri, methods, host, strict_slashes, stream, version, name), kwargs)) return handler return decorator
def open_fastq(in_file): """ open a fastq file, using gzip if it is gzipped from bcbio package """ _, ext = os.path.splitext(in_file) if ext == ".gz": return gzip.open(in_file, 'rb') if ext in [".fastq", ".fq", ".fasta", ".fa"]: return open(in_file, 'r') return ValueError("File needs to be fastq|fasta|fq|fa [.gz]")
def function[open_fastq, parameter[in_file]]: constant[ open a fastq file, using gzip if it is gzipped from bcbio package ] <ast.Tuple object at 0x7da1b0274eb0> assign[=] call[name[os].path.splitext, parameter[name[in_file]]] if compare[name[ext] equal[==] constant[.gz]] begin[:] return[call[name[gzip].open, parameter[name[in_file], constant[rb]]]] if compare[name[ext] in list[[<ast.Constant object at 0x7da20c992d70>, <ast.Constant object at 0x7da20c993e20>, <ast.Constant object at 0x7da20c992ad0>, <ast.Constant object at 0x7da20c991a80>]]] begin[:] return[call[name[open], parameter[name[in_file], constant[r]]]] return[call[name[ValueError], parameter[constant[File needs to be fastq|fasta|fq|fa [.gz]]]]]
keyword[def] identifier[open_fastq] ( identifier[in_file] ): literal[string] identifier[_] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[in_file] ) keyword[if] identifier[ext] == literal[string] : keyword[return] identifier[gzip] . identifier[open] ( identifier[in_file] , literal[string] ) keyword[if] identifier[ext] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[return] identifier[open] ( identifier[in_file] , literal[string] ) keyword[return] identifier[ValueError] ( literal[string] )
def open_fastq(in_file): """ open a fastq file, using gzip if it is gzipped from bcbio package """ (_, ext) = os.path.splitext(in_file) if ext == '.gz': return gzip.open(in_file, 'rb') # depends on [control=['if'], data=[]] if ext in ['.fastq', '.fq', '.fasta', '.fa']: return open(in_file, 'r') # depends on [control=['if'], data=[]] return ValueError('File needs to be fastq|fasta|fq|fa [.gz]')
def freeze(name=None, force=False, **kwargs): ''' Save the list of package and repos in a freeze file. As this module is build on top of the pkg module, the user can send extra attributes to the underlying pkg module via kwargs. This function will call ``pkg.list_pkgs`` and ``pkg.list_repos``, and any additional arguments will be passed through to those functions. name Name of the frozen state. Optional. force If true, overwrite the state. Optional. CLI Example: .. code-block:: bash salt '*' freezer.freeze salt '*' freezer.freeze pre_install salt '*' freezer.freeze force=True root=/chroot ''' states_path = _states_path() try: os.makedirs(states_path) except OSError as e: msg = 'Error when trying to create the freezer storage %s: %s' log.error(msg, states_path, e) raise CommandExecutionError(msg % (states_path, e)) if status(name) and not force: raise CommandExecutionError('The state is already present. Use ' 'force parameter to overwrite.') safe_kwargs = clean_kwargs(**kwargs) pkgs = __salt__['pkg.list_pkgs'](**safe_kwargs) repos = __salt__['pkg.list_repos'](**safe_kwargs) for name, content in zip(_paths(name), (pkgs, repos)): with fopen(name, 'w') as fp: json.dump(content, fp) return True
def function[freeze, parameter[name, force]]: constant[ Save the list of package and repos in a freeze file. As this module is build on top of the pkg module, the user can send extra attributes to the underlying pkg module via kwargs. This function will call ``pkg.list_pkgs`` and ``pkg.list_repos``, and any additional arguments will be passed through to those functions. name Name of the frozen state. Optional. force If true, overwrite the state. Optional. CLI Example: .. code-block:: bash salt '*' freezer.freeze salt '*' freezer.freeze pre_install salt '*' freezer.freeze force=True root=/chroot ] variable[states_path] assign[=] call[name[_states_path], parameter[]] <ast.Try object at 0x7da20e962f80> if <ast.BoolOp object at 0x7da20c6a8ee0> begin[:] <ast.Raise object at 0x7da20c6a9300> variable[safe_kwargs] assign[=] call[name[clean_kwargs], parameter[]] variable[pkgs] assign[=] call[call[name[__salt__]][constant[pkg.list_pkgs]], parameter[]] variable[repos] assign[=] call[call[name[__salt__]][constant[pkg.list_repos]], parameter[]] for taget[tuple[[<ast.Name object at 0x7da20e74be50>, <ast.Name object at 0x7da20e748820>]]] in starred[call[name[zip], parameter[call[name[_paths], parameter[name[name]]], tuple[[<ast.Name object at 0x7da20e749b40>, <ast.Name object at 0x7da20e7486d0>]]]]] begin[:] with call[name[fopen], parameter[name[name], constant[w]]] begin[:] call[name[json].dump, parameter[name[content], name[fp]]] return[constant[True]]
keyword[def] identifier[freeze] ( identifier[name] = keyword[None] , identifier[force] = keyword[False] ,** identifier[kwargs] ): literal[string] identifier[states_path] = identifier[_states_path] () keyword[try] : identifier[os] . identifier[makedirs] ( identifier[states_path] ) keyword[except] identifier[OSError] keyword[as] identifier[e] : identifier[msg] = literal[string] identifier[log] . identifier[error] ( identifier[msg] , identifier[states_path] , identifier[e] ) keyword[raise] identifier[CommandExecutionError] ( identifier[msg] %( identifier[states_path] , identifier[e] )) keyword[if] identifier[status] ( identifier[name] ) keyword[and] keyword[not] identifier[force] : keyword[raise] identifier[CommandExecutionError] ( literal[string] literal[string] ) identifier[safe_kwargs] = identifier[clean_kwargs] (** identifier[kwargs] ) identifier[pkgs] = identifier[__salt__] [ literal[string] ](** identifier[safe_kwargs] ) identifier[repos] = identifier[__salt__] [ literal[string] ](** identifier[safe_kwargs] ) keyword[for] identifier[name] , identifier[content] keyword[in] identifier[zip] ( identifier[_paths] ( identifier[name] ),( identifier[pkgs] , identifier[repos] )): keyword[with] identifier[fopen] ( identifier[name] , literal[string] ) keyword[as] identifier[fp] : identifier[json] . identifier[dump] ( identifier[content] , identifier[fp] ) keyword[return] keyword[True]
def freeze(name=None, force=False, **kwargs): """ Save the list of package and repos in a freeze file. As this module is build on top of the pkg module, the user can send extra attributes to the underlying pkg module via kwargs. This function will call ``pkg.list_pkgs`` and ``pkg.list_repos``, and any additional arguments will be passed through to those functions. name Name of the frozen state. Optional. force If true, overwrite the state. Optional. CLI Example: .. code-block:: bash salt '*' freezer.freeze salt '*' freezer.freeze pre_install salt '*' freezer.freeze force=True root=/chroot """ states_path = _states_path() try: os.makedirs(states_path) # depends on [control=['try'], data=[]] except OSError as e: msg = 'Error when trying to create the freezer storage %s: %s' log.error(msg, states_path, e) raise CommandExecutionError(msg % (states_path, e)) # depends on [control=['except'], data=['e']] if status(name) and (not force): raise CommandExecutionError('The state is already present. Use force parameter to overwrite.') # depends on [control=['if'], data=[]] safe_kwargs = clean_kwargs(**kwargs) pkgs = __salt__['pkg.list_pkgs'](**safe_kwargs) repos = __salt__['pkg.list_repos'](**safe_kwargs) for (name, content) in zip(_paths(name), (pkgs, repos)): with fopen(name, 'w') as fp: json.dump(content, fp) # depends on [control=['with'], data=['fp']] # depends on [control=['for'], data=[]] return True
def divide_url(self, url): """ divide url into host and path two parts """ if 'https://' in url: host = url[8:].split('/')[0] path = url[8 + len(host):] elif 'http://' in url: host = url[7:].split('/')[0] path = url[7 + len(host):] else: host = url.split('/')[0] path = url[len(host):] return host, path
def function[divide_url, parameter[self, url]]: constant[ divide url into host and path two parts ] if compare[constant[https://] in name[url]] begin[:] variable[host] assign[=] call[call[call[name[url]][<ast.Slice object at 0x7da1b282b280>].split, parameter[constant[/]]]][constant[0]] variable[path] assign[=] call[name[url]][<ast.Slice object at 0x7da1b282ab60>] return[tuple[[<ast.Name object at 0x7da1b265d7b0>, <ast.Name object at 0x7da1b265d8d0>]]]
keyword[def] identifier[divide_url] ( identifier[self] , identifier[url] ): literal[string] keyword[if] literal[string] keyword[in] identifier[url] : identifier[host] = identifier[url] [ literal[int] :]. identifier[split] ( literal[string] )[ literal[int] ] identifier[path] = identifier[url] [ literal[int] + identifier[len] ( identifier[host] ):] keyword[elif] literal[string] keyword[in] identifier[url] : identifier[host] = identifier[url] [ literal[int] :]. identifier[split] ( literal[string] )[ literal[int] ] identifier[path] = identifier[url] [ literal[int] + identifier[len] ( identifier[host] ):] keyword[else] : identifier[host] = identifier[url] . identifier[split] ( literal[string] )[ literal[int] ] identifier[path] = identifier[url] [ identifier[len] ( identifier[host] ):] keyword[return] identifier[host] , identifier[path]
def divide_url(self, url): """ divide url into host and path two parts """ if 'https://' in url: host = url[8:].split('/')[0] path = url[8 + len(host):] # depends on [control=['if'], data=['url']] elif 'http://' in url: host = url[7:].split('/')[0] path = url[7 + len(host):] # depends on [control=['if'], data=['url']] else: host = url.split('/')[0] path = url[len(host):] return (host, path)
def _connected(self, link_uri): """ This callback is called form the Crazyflie API when a Crazyflie has been connected and the TOCs have been downloaded.""" print('Connected to %s' % link_uri) # The definition of the logconfig can be made before connecting self._lg_stab = LogConfig(name='Stabilizer', period_in_ms=10) self._lg_stab.add_variable('stabilizer.roll', 'float') self._lg_stab.add_variable('stabilizer.pitch', 'float') self._lg_stab.add_variable('stabilizer.yaw', 'float') # Adding the configuration cannot be done until a Crazyflie is # connected, since we need to check that the variables we # would like to log are in the TOC. try: self._cf.log.add_config(self._lg_stab) # This callback will receive the data self._lg_stab.data_received_cb.add_callback(self._stab_log_data) # This callback will be called on errors self._lg_stab.error_cb.add_callback(self._stab_log_error) # Start the logging self._lg_stab.start() except KeyError as e: print('Could not start log configuration,' '{} not found in TOC'.format(str(e))) except AttributeError: print('Could not add Stabilizer log config, bad configuration.') # Start a timer to disconnect in 10s t = Timer(5, self._cf.close_link) t.start()
def function[_connected, parameter[self, link_uri]]: constant[ This callback is called form the Crazyflie API when a Crazyflie has been connected and the TOCs have been downloaded.] call[name[print], parameter[binary_operation[constant[Connected to %s] <ast.Mod object at 0x7da2590d6920> name[link_uri]]]] name[self]._lg_stab assign[=] call[name[LogConfig], parameter[]] call[name[self]._lg_stab.add_variable, parameter[constant[stabilizer.roll], constant[float]]] call[name[self]._lg_stab.add_variable, parameter[constant[stabilizer.pitch], constant[float]]] call[name[self]._lg_stab.add_variable, parameter[constant[stabilizer.yaw], constant[float]]] <ast.Try object at 0x7da1b16848b0> variable[t] assign[=] call[name[Timer], parameter[constant[5], name[self]._cf.close_link]] call[name[t].start, parameter[]]
keyword[def] identifier[_connected] ( identifier[self] , identifier[link_uri] ): literal[string] identifier[print] ( literal[string] % identifier[link_uri] ) identifier[self] . identifier[_lg_stab] = identifier[LogConfig] ( identifier[name] = literal[string] , identifier[period_in_ms] = literal[int] ) identifier[self] . identifier[_lg_stab] . identifier[add_variable] ( literal[string] , literal[string] ) identifier[self] . identifier[_lg_stab] . identifier[add_variable] ( literal[string] , literal[string] ) identifier[self] . identifier[_lg_stab] . identifier[add_variable] ( literal[string] , literal[string] ) keyword[try] : identifier[self] . identifier[_cf] . identifier[log] . identifier[add_config] ( identifier[self] . identifier[_lg_stab] ) identifier[self] . identifier[_lg_stab] . identifier[data_received_cb] . identifier[add_callback] ( identifier[self] . identifier[_stab_log_data] ) identifier[self] . identifier[_lg_stab] . identifier[error_cb] . identifier[add_callback] ( identifier[self] . identifier[_stab_log_error] ) identifier[self] . identifier[_lg_stab] . identifier[start] () keyword[except] identifier[KeyError] keyword[as] identifier[e] : identifier[print] ( literal[string] literal[string] . identifier[format] ( identifier[str] ( identifier[e] ))) keyword[except] identifier[AttributeError] : identifier[print] ( literal[string] ) identifier[t] = identifier[Timer] ( literal[int] , identifier[self] . identifier[_cf] . identifier[close_link] ) identifier[t] . identifier[start] ()
def _connected(self, link_uri): """ This callback is called form the Crazyflie API when a Crazyflie has been connected and the TOCs have been downloaded.""" print('Connected to %s' % link_uri) # The definition of the logconfig can be made before connecting self._lg_stab = LogConfig(name='Stabilizer', period_in_ms=10) self._lg_stab.add_variable('stabilizer.roll', 'float') self._lg_stab.add_variable('stabilizer.pitch', 'float') self._lg_stab.add_variable('stabilizer.yaw', 'float') # Adding the configuration cannot be done until a Crazyflie is # connected, since we need to check that the variables we # would like to log are in the TOC. try: self._cf.log.add_config(self._lg_stab) # This callback will receive the data self._lg_stab.data_received_cb.add_callback(self._stab_log_data) # This callback will be called on errors self._lg_stab.error_cb.add_callback(self._stab_log_error) # Start the logging self._lg_stab.start() # depends on [control=['try'], data=[]] except KeyError as e: print('Could not start log configuration,{} not found in TOC'.format(str(e))) # depends on [control=['except'], data=['e']] except AttributeError: print('Could not add Stabilizer log config, bad configuration.') # depends on [control=['except'], data=[]] # Start a timer to disconnect in 10s t = Timer(5, self._cf.close_link) t.start()
def parse_cuda_device(cuda_device: Union[str, int, List[int]]) -> Union[int, List[int]]: """ Disambiguates single GPU and multiple GPU settings for cuda_device param. """ def from_list(strings): if len(strings) > 1: return [int(d) for d in strings] elif len(strings) == 1: return int(strings[0]) else: return -1 if isinstance(cuda_device, str): return from_list(re.split(r',\s*', cuda_device)) elif isinstance(cuda_device, int): return cuda_device elif isinstance(cuda_device, list): return from_list(cuda_device) else: # TODO(brendanr): Determine why mypy can't tell that this matches the Union. return int(cuda_device)
def function[parse_cuda_device, parameter[cuda_device]]: constant[ Disambiguates single GPU and multiple GPU settings for cuda_device param. ] def function[from_list, parameter[strings]]: if compare[call[name[len], parameter[name[strings]]] greater[>] constant[1]] begin[:] return[<ast.ListComp object at 0x7da2054a5f30>] if call[name[isinstance], parameter[name[cuda_device], name[str]]] begin[:] return[call[name[from_list], parameter[call[name[re].split, parameter[constant[,\s*], name[cuda_device]]]]]]
keyword[def] identifier[parse_cuda_device] ( identifier[cuda_device] : identifier[Union] [ identifier[str] , identifier[int] , identifier[List] [ identifier[int] ]])-> identifier[Union] [ identifier[int] , identifier[List] [ identifier[int] ]]: literal[string] keyword[def] identifier[from_list] ( identifier[strings] ): keyword[if] identifier[len] ( identifier[strings] )> literal[int] : keyword[return] [ identifier[int] ( identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[strings] ] keyword[elif] identifier[len] ( identifier[strings] )== literal[int] : keyword[return] identifier[int] ( identifier[strings] [ literal[int] ]) keyword[else] : keyword[return] - literal[int] keyword[if] identifier[isinstance] ( identifier[cuda_device] , identifier[str] ): keyword[return] identifier[from_list] ( identifier[re] . identifier[split] ( literal[string] , identifier[cuda_device] )) keyword[elif] identifier[isinstance] ( identifier[cuda_device] , identifier[int] ): keyword[return] identifier[cuda_device] keyword[elif] identifier[isinstance] ( identifier[cuda_device] , identifier[list] ): keyword[return] identifier[from_list] ( identifier[cuda_device] ) keyword[else] : keyword[return] identifier[int] ( identifier[cuda_device] )
def parse_cuda_device(cuda_device: Union[str, int, List[int]]) -> Union[int, List[int]]: """ Disambiguates single GPU and multiple GPU settings for cuda_device param. """ def from_list(strings): if len(strings) > 1: return [int(d) for d in strings] # depends on [control=['if'], data=[]] elif len(strings) == 1: return int(strings[0]) # depends on [control=['if'], data=[]] else: return -1 if isinstance(cuda_device, str): return from_list(re.split(',\\s*', cuda_device)) # depends on [control=['if'], data=[]] elif isinstance(cuda_device, int): return cuda_device # depends on [control=['if'], data=[]] elif isinstance(cuda_device, list): return from_list(cuda_device) # depends on [control=['if'], data=[]] else: # TODO(brendanr): Determine why mypy can't tell that this matches the Union. return int(cuda_device)
def _init_nxapi(opts): ''' Open a connection to the NX-OS switch over NX-API. As the communication is HTTP(S) based, there is no connection to maintain, however, in order to test the connectivity and make sure we are able to bring up this Minion, we are executing a very simple command (``show clock``) which doesn't come with much overhead and it's sufficient to confirm we are indeed able to connect to the NX-API endpoint as configured. ''' proxy_dict = opts.get('proxy', {}) conn_args = copy.deepcopy(proxy_dict) conn_args.pop('proxytype', None) opts['multiprocessing'] = conn_args.pop('multiprocessing', True) # This is not a SSH-based proxy, so it should be safe to enable # multiprocessing. try: rpc_reply = __utils__['nxos.nxapi_request']('show clock', **conn_args) # Execute a very simple command to confirm we are able to connect properly DEVICE_DETAILS['conn_args'] = conn_args DEVICE_DETAILS['initialized'] = True DEVICE_DETAILS['up'] = True DEVICE_DETAILS['no_save_config'] = opts['proxy'].get('no_save_config', False) except Exception as ex: log.error('Unable to connect to %s', conn_args['host']) log.error('Please check the following:\n') log.error('-- Verify that "feature nxapi" is enabled on your NX-OS device: %s', conn_args['host']) log.error('-- Verify that nxapi settings on the NX-OS device and proxy minion config file match') log.error('-- Exception Generated: %s', ex) exit() log.info('nxapi DEVICE_DETAILS info: %s', DEVICE_DETAILS) return True
def function[_init_nxapi, parameter[opts]]: constant[ Open a connection to the NX-OS switch over NX-API. As the communication is HTTP(S) based, there is no connection to maintain, however, in order to test the connectivity and make sure we are able to bring up this Minion, we are executing a very simple command (``show clock``) which doesn't come with much overhead and it's sufficient to confirm we are indeed able to connect to the NX-API endpoint as configured. ] variable[proxy_dict] assign[=] call[name[opts].get, parameter[constant[proxy], dictionary[[], []]]] variable[conn_args] assign[=] call[name[copy].deepcopy, parameter[name[proxy_dict]]] call[name[conn_args].pop, parameter[constant[proxytype], constant[None]]] call[name[opts]][constant[multiprocessing]] assign[=] call[name[conn_args].pop, parameter[constant[multiprocessing], constant[True]]] <ast.Try object at 0x7da1b1f95a50> call[name[log].info, parameter[constant[nxapi DEVICE_DETAILS info: %s], name[DEVICE_DETAILS]]] return[constant[True]]
keyword[def] identifier[_init_nxapi] ( identifier[opts] ): literal[string] identifier[proxy_dict] = identifier[opts] . identifier[get] ( literal[string] ,{}) identifier[conn_args] = identifier[copy] . identifier[deepcopy] ( identifier[proxy_dict] ) identifier[conn_args] . identifier[pop] ( literal[string] , keyword[None] ) identifier[opts] [ literal[string] ]= identifier[conn_args] . identifier[pop] ( literal[string] , keyword[True] ) keyword[try] : identifier[rpc_reply] = identifier[__utils__] [ literal[string] ]( literal[string] ,** identifier[conn_args] ) identifier[DEVICE_DETAILS] [ literal[string] ]= identifier[conn_args] identifier[DEVICE_DETAILS] [ literal[string] ]= keyword[True] identifier[DEVICE_DETAILS] [ literal[string] ]= keyword[True] identifier[DEVICE_DETAILS] [ literal[string] ]= identifier[opts] [ literal[string] ]. identifier[get] ( literal[string] , keyword[False] ) keyword[except] identifier[Exception] keyword[as] identifier[ex] : identifier[log] . identifier[error] ( literal[string] , identifier[conn_args] [ literal[string] ]) identifier[log] . identifier[error] ( literal[string] ) identifier[log] . identifier[error] ( literal[string] , identifier[conn_args] [ literal[string] ]) identifier[log] . identifier[error] ( literal[string] ) identifier[log] . identifier[error] ( literal[string] , identifier[ex] ) identifier[exit] () identifier[log] . identifier[info] ( literal[string] , identifier[DEVICE_DETAILS] ) keyword[return] keyword[True]
def _init_nxapi(opts): """ Open a connection to the NX-OS switch over NX-API. As the communication is HTTP(S) based, there is no connection to maintain, however, in order to test the connectivity and make sure we are able to bring up this Minion, we are executing a very simple command (``show clock``) which doesn't come with much overhead and it's sufficient to confirm we are indeed able to connect to the NX-API endpoint as configured. """ proxy_dict = opts.get('proxy', {}) conn_args = copy.deepcopy(proxy_dict) conn_args.pop('proxytype', None) opts['multiprocessing'] = conn_args.pop('multiprocessing', True) # This is not a SSH-based proxy, so it should be safe to enable # multiprocessing. try: rpc_reply = __utils__['nxos.nxapi_request']('show clock', **conn_args) # Execute a very simple command to confirm we are able to connect properly DEVICE_DETAILS['conn_args'] = conn_args DEVICE_DETAILS['initialized'] = True DEVICE_DETAILS['up'] = True DEVICE_DETAILS['no_save_config'] = opts['proxy'].get('no_save_config', False) # depends on [control=['try'], data=[]] except Exception as ex: log.error('Unable to connect to %s', conn_args['host']) log.error('Please check the following:\n') log.error('-- Verify that "feature nxapi" is enabled on your NX-OS device: %s', conn_args['host']) log.error('-- Verify that nxapi settings on the NX-OS device and proxy minion config file match') log.error('-- Exception Generated: %s', ex) exit() # depends on [control=['except'], data=['ex']] log.info('nxapi DEVICE_DETAILS info: %s', DEVICE_DETAILS) return True
def get_genus_type(self): """Gets the genus type of this object. return: (osid.type.Type) - the genus type of this object compliance: mandatory - This method must be implemented. """ if self._my_genus_type_map is None: url_path = '/handcar/services/learning/types/' + self._my_map['genusTypeId'] # url_str = self._base_url + '/types/' + self._my_map['genusTypeId'] # self._my_genus_type_map = self._load_json(url_str) self._my_genus_type_map = self._get_request(url_path) return Type(self._my_genus_type_map)
def function[get_genus_type, parameter[self]]: constant[Gets the genus type of this object. return: (osid.type.Type) - the genus type of this object compliance: mandatory - This method must be implemented. ] if compare[name[self]._my_genus_type_map is constant[None]] begin[:] variable[url_path] assign[=] binary_operation[constant[/handcar/services/learning/types/] + call[name[self]._my_map][constant[genusTypeId]]] name[self]._my_genus_type_map assign[=] call[name[self]._get_request, parameter[name[url_path]]] return[call[name[Type], parameter[name[self]._my_genus_type_map]]]
keyword[def] identifier[get_genus_type] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_my_genus_type_map] keyword[is] keyword[None] : identifier[url_path] = literal[string] + identifier[self] . identifier[_my_map] [ literal[string] ] identifier[self] . identifier[_my_genus_type_map] = identifier[self] . identifier[_get_request] ( identifier[url_path] ) keyword[return] identifier[Type] ( identifier[self] . identifier[_my_genus_type_map] )
def get_genus_type(self): """Gets the genus type of this object. return: (osid.type.Type) - the genus type of this object compliance: mandatory - This method must be implemented. """ if self._my_genus_type_map is None: url_path = '/handcar/services/learning/types/' + self._my_map['genusTypeId'] # url_str = self._base_url + '/types/' + self._my_map['genusTypeId'] # self._my_genus_type_map = self._load_json(url_str) self._my_genus_type_map = self._get_request(url_path) # depends on [control=['if'], data=[]] return Type(self._my_genus_type_map)
def outputs_of(self, partition_index): """The outputs of the partition at ``partition_index``. Note that this returns a tuple of element indices, since coarse- grained blackboxes may have multiple outputs. """ partition = self.partition[partition_index] outputs = set(partition).intersection(self.output_indices) return tuple(sorted(outputs))
def function[outputs_of, parameter[self, partition_index]]: constant[The outputs of the partition at ``partition_index``. Note that this returns a tuple of element indices, since coarse- grained blackboxes may have multiple outputs. ] variable[partition] assign[=] call[name[self].partition][name[partition_index]] variable[outputs] assign[=] call[call[name[set], parameter[name[partition]]].intersection, parameter[name[self].output_indices]] return[call[name[tuple], parameter[call[name[sorted], parameter[name[outputs]]]]]]
keyword[def] identifier[outputs_of] ( identifier[self] , identifier[partition_index] ): literal[string] identifier[partition] = identifier[self] . identifier[partition] [ identifier[partition_index] ] identifier[outputs] = identifier[set] ( identifier[partition] ). identifier[intersection] ( identifier[self] . identifier[output_indices] ) keyword[return] identifier[tuple] ( identifier[sorted] ( identifier[outputs] ))
def outputs_of(self, partition_index): """The outputs of the partition at ``partition_index``. Note that this returns a tuple of element indices, since coarse- grained blackboxes may have multiple outputs. """ partition = self.partition[partition_index] outputs = set(partition).intersection(self.output_indices) return tuple(sorted(outputs))
def makeHttpRequest(method, url, payload, headers, retries=MAX_RETRIES, session=None): """ Make an HTTP request and retry it until success, return request """ retry = -1 response = None while retry < retries: retry += 1 # if this isn't the first retry then we sleep if retry > 0: snooze = float(retry * retry) / 10.0 log.info('Sleeping %0.2f seconds for exponential backoff', snooze) time.sleep(snooze) # Seek payload to start, if it is a file if hasattr(payload, 'seek'): payload.seek(0) log.debug('Making attempt %d', retry) try: response = makeSingleHttpRequest(method, url, payload, headers, session) except requests.exceptions.RequestException as rerr: if retry < retries: log.warn('Retrying because of: %s' % rerr) continue # raise a connection exception raise rerr # Handle non 2xx status code and retry if possible try: response.raise_for_status() except requests.exceptions.RequestException: pass status = response.status_code if 500 <= status and status < 600 and retry < retries: if retry < retries: log.warn('Retrying because of: %d status' % status) continue else: raise exceptions.TaskclusterRestFailure("Unknown Server Error", superExc=None) return response # This code-path should be unreachable assert False, "Error from last retry should have been raised!"
def function[makeHttpRequest, parameter[method, url, payload, headers, retries, session]]: constant[ Make an HTTP request and retry it until success, return request ] variable[retry] assign[=] <ast.UnaryOp object at 0x7da1b0471090> variable[response] assign[=] constant[None] while compare[name[retry] less[<] name[retries]] begin[:] <ast.AugAssign object at 0x7da1b0472230> if compare[name[retry] greater[>] constant[0]] begin[:] variable[snooze] assign[=] binary_operation[call[name[float], parameter[binary_operation[name[retry] * name[retry]]]] / constant[10.0]] call[name[log].info, parameter[constant[Sleeping %0.2f seconds for exponential backoff], name[snooze]]] call[name[time].sleep, parameter[name[snooze]]] if call[name[hasattr], parameter[name[payload], constant[seek]]] begin[:] call[name[payload].seek, parameter[constant[0]]] call[name[log].debug, parameter[constant[Making attempt %d], name[retry]]] <ast.Try object at 0x7da20c6a95a0> <ast.Try object at 0x7da1b04734f0> variable[status] assign[=] name[response].status_code if <ast.BoolOp object at 0x7da1b0472c20> begin[:] if compare[name[retry] less[<] name[retries]] begin[:] call[name[log].warn, parameter[binary_operation[constant[Retrying because of: %d status] <ast.Mod object at 0x7da2590d6920> name[status]]]] continue return[name[response]] assert[constant[False]]
keyword[def] identifier[makeHttpRequest] ( identifier[method] , identifier[url] , identifier[payload] , identifier[headers] , identifier[retries] = identifier[MAX_RETRIES] , identifier[session] = keyword[None] ): literal[string] identifier[retry] =- literal[int] identifier[response] = keyword[None] keyword[while] identifier[retry] < identifier[retries] : identifier[retry] += literal[int] keyword[if] identifier[retry] > literal[int] : identifier[snooze] = identifier[float] ( identifier[retry] * identifier[retry] )/ literal[int] identifier[log] . identifier[info] ( literal[string] , identifier[snooze] ) identifier[time] . identifier[sleep] ( identifier[snooze] ) keyword[if] identifier[hasattr] ( identifier[payload] , literal[string] ): identifier[payload] . identifier[seek] ( literal[int] ) identifier[log] . identifier[debug] ( literal[string] , identifier[retry] ) keyword[try] : identifier[response] = identifier[makeSingleHttpRequest] ( identifier[method] , identifier[url] , identifier[payload] , identifier[headers] , identifier[session] ) keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] keyword[as] identifier[rerr] : keyword[if] identifier[retry] < identifier[retries] : identifier[log] . identifier[warn] ( literal[string] % identifier[rerr] ) keyword[continue] keyword[raise] identifier[rerr] keyword[try] : identifier[response] . identifier[raise_for_status] () keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] : keyword[pass] identifier[status] = identifier[response] . identifier[status_code] keyword[if] literal[int] <= identifier[status] keyword[and] identifier[status] < literal[int] keyword[and] identifier[retry] < identifier[retries] : keyword[if] identifier[retry] < identifier[retries] : identifier[log] . identifier[warn] ( literal[string] % identifier[status] ) keyword[continue] keyword[else] : keyword[raise] identifier[exceptions] . identifier[TaskclusterRestFailure] ( literal[string] , identifier[superExc] = keyword[None] ) keyword[return] identifier[response] keyword[assert] keyword[False] , literal[string]
def makeHttpRequest(method, url, payload, headers, retries=MAX_RETRIES, session=None): """ Make an HTTP request and retry it until success, return request """ retry = -1 response = None while retry < retries: retry += 1 # if this isn't the first retry then we sleep if retry > 0: snooze = float(retry * retry) / 10.0 log.info('Sleeping %0.2f seconds for exponential backoff', snooze) time.sleep(snooze) # depends on [control=['if'], data=['retry']] # Seek payload to start, if it is a file if hasattr(payload, 'seek'): payload.seek(0) # depends on [control=['if'], data=[]] log.debug('Making attempt %d', retry) try: response = makeSingleHttpRequest(method, url, payload, headers, session) # depends on [control=['try'], data=[]] except requests.exceptions.RequestException as rerr: if retry < retries: log.warn('Retrying because of: %s' % rerr) continue # depends on [control=['if'], data=[]] # raise a connection exception raise rerr # depends on [control=['except'], data=['rerr']] # Handle non 2xx status code and retry if possible try: response.raise_for_status() # depends on [control=['try'], data=[]] except requests.exceptions.RequestException: pass # depends on [control=['except'], data=[]] status = response.status_code if 500 <= status and status < 600 and (retry < retries): if retry < retries: log.warn('Retrying because of: %d status' % status) continue # depends on [control=['if'], data=[]] else: raise exceptions.TaskclusterRestFailure('Unknown Server Error', superExc=None) # depends on [control=['if'], data=[]] return response # depends on [control=['while'], data=['retry', 'retries']] # This code-path should be unreachable assert False, 'Error from last retry should have been raised!'
def copy(self): """Return a shallow copy of a RangeSet.""" cpy = self.__class__() cpy._autostep = self._autostep cpy.padding = self.padding cpy.update(self) return cpy
def function[copy, parameter[self]]: constant[Return a shallow copy of a RangeSet.] variable[cpy] assign[=] call[name[self].__class__, parameter[]] name[cpy]._autostep assign[=] name[self]._autostep name[cpy].padding assign[=] name[self].padding call[name[cpy].update, parameter[name[self]]] return[name[cpy]]
keyword[def] identifier[copy] ( identifier[self] ): literal[string] identifier[cpy] = identifier[self] . identifier[__class__] () identifier[cpy] . identifier[_autostep] = identifier[self] . identifier[_autostep] identifier[cpy] . identifier[padding] = identifier[self] . identifier[padding] identifier[cpy] . identifier[update] ( identifier[self] ) keyword[return] identifier[cpy]
def copy(self): """Return a shallow copy of a RangeSet.""" cpy = self.__class__() cpy._autostep = self._autostep cpy.padding = self.padding cpy.update(self) return cpy
def get_acronyms(fulltext): """Find acronyms and expansions from the fulltext. If needed, acronyms can already contain a dictionary of previously found acronyms that will be merged with the current results. """ acronyms = {} for m in ACRONYM_BRACKETS_REGEX.finditer(fulltext): acronym = DOTS_REGEX.sub("", m.group(1)) potential_expansion = fulltext[m.start() - 80:m.start()].replace("\n", " ") # Strip potential_expansion = re.sub("(\W).(\W)", "\1\2", potential_expansion) potential_expansion = re.sub("(\w)\(s\)\W", "\1", potential_expansion) potential_expansion = re.sub("""[^\w'"]+$""", "", potential_expansion) potential_expansion = re.sub("[[(].+[\])]", "", potential_expansion) potential_expansion = re.sub(" {2,}", " ", potential_expansion) # LEVEL 0: expansion between quotes # Double quotes match = re.search(""""([^"]+)["]$""", potential_expansion) if match is None: # Single quotes match = re.search("""'([^"]+)[']$""", potential_expansion) if match is not None: if acronym in match.group(1): continue pattern = "" for char in acronym[:-1]: pattern += "%s\w+\W*" % char pattern += "%s\w+" % acronym[-1] if re.search(pattern, match.group(1), re.I) is not None: _add_expansion_to_acronym_dict(acronym, match.group(1), 0, acronyms) continue pattern = "\W(" for char in acronym[:-1]: pattern += "%s\w+\W+" % char pattern += "%s\w+)$" % acronym[-1] # LEVEL 1: expansion with uppercase initials match = re.search(pattern, potential_expansion) if match is not None: _add_expansion_to_acronym_dict( acronym, match.group(1), 1, acronyms) continue # LEVEL 2: expansion with initials match = re.search(pattern, potential_expansion, re.I) if match is not None: _add_expansion_to_acronym_dict( acronym, match.group(1), 2, acronyms) continue # LEVEL 3: expansion with initials and STOPLIST potential_expansion_stripped = " ".join([word for word in _words(potential_expansion) if word not in STOPLIST]) match = re.search(pattern, potential_expansion_stripped, re.I) if match is not None: first_expansion_word = re.search("\w+", match.group(1)).group() start = potential_expansion.lower().rfind(first_expansion_word) _add_expansion_to_acronym_dict( acronym, potential_expansion[start:], 3, acronyms ) continue # LEVEL 4: expansion with fuzzy initials and stoplist reversed_words = _words(potential_expansion_stripped) reversed_words.reverse() reversed_acronym = list(acronym.lower()) reversed_acronym.reverse() index0 = 0 index1 = 0 word = "" try: while index0 < len(reversed_acronym) and index1 < len( reversed_words): word = reversed_words[index1] if index0 + 1 < len(reversed_words): next_word = reversed_words[index0 + 1] else: next_word = "_" char = reversed_acronym[index0] if index0 + 1 < len(reversed_acronym): next_char = reversed_acronym[index0 + 1] else: next_char = "_" if char == next_char and \ word.startswith(char) and \ word.count(char) > 1 and \ not next_word.startswith(char): index0 += 2 index1 += 1 if word.startswith(char): index0 += 1 index1 += 1 elif char in word and \ not word.endswith(char) and \ word.startswith(next_char): index0 += 2 index1 += 1 else: word = "" break except IndexError: word = "" if not word.startswith(char): word = "" if word: start = potential_expansion.lower().rfind(word) _add_expansion_to_acronym_dict(acronym, potential_expansion[start:], 4, acronyms) continue # LEVEL 5: expansion with fuzzy initials reversed_words = _words(potential_expansion.lower()) reversed_words.reverse() reversed_acronym = list(acronym.lower()) reversed_acronym.reverse() index0 = 0 index1 = 0 word = "" try: while index0 < len(reversed_acronym) and index1 < len( reversed_words): word = reversed_words[index1] if index0 + 1 < len(reversed_words): next_word = reversed_words[index0 + 1] else: next_word = "" char = reversed_acronym[index0] if index0 + 1 < len(reversed_acronym): next_char = reversed_acronym[index0 + 1] else: next_char = "" if char == next_char and word.startswith(char) and \ word.count(char) > 1 and \ not next_word.startswith(char): index0 += 2 index1 += 1 if word.startswith(char): index0 += 1 index1 += 1 elif char in word and \ not word.endswith(char) and \ word.startswith(next_char): index0 += 2 index1 += 1 else: word = "" break except IndexError: word = "" if not word.startswith(char): word = "" if word: start = potential_expansion.lower().rfind(word) _add_expansion_to_acronym_dict(acronym, potential_expansion[start:], 5, acronyms) continue return acronyms
def function[get_acronyms, parameter[fulltext]]: constant[Find acronyms and expansions from the fulltext. If needed, acronyms can already contain a dictionary of previously found acronyms that will be merged with the current results. ] variable[acronyms] assign[=] dictionary[[], []] for taget[name[m]] in starred[call[name[ACRONYM_BRACKETS_REGEX].finditer, parameter[name[fulltext]]]] begin[:] variable[acronym] assign[=] call[name[DOTS_REGEX].sub, parameter[constant[], call[name[m].group, parameter[constant[1]]]]] variable[potential_expansion] assign[=] call[call[name[fulltext]][<ast.Slice object at 0x7da1b0ae28f0>].replace, parameter[constant[ ], constant[ ]]] variable[potential_expansion] assign[=] call[name[re].sub, parameter[constant[(\W).(\W)], constant[], name[potential_expansion]]] variable[potential_expansion] assign[=] call[name[re].sub, parameter[constant[(\w)\(s\)\W], constant[], name[potential_expansion]]] variable[potential_expansion] assign[=] call[name[re].sub, parameter[constant[[^\w'"]+$], constant[], name[potential_expansion]]] variable[potential_expansion] assign[=] call[name[re].sub, parameter[constant[[[(].+[\])]], constant[], name[potential_expansion]]] variable[potential_expansion] assign[=] call[name[re].sub, parameter[constant[ {2,}], constant[ ], name[potential_expansion]]] variable[match] assign[=] call[name[re].search, parameter[constant["([^"]+)["]$], name[potential_expansion]]] if compare[name[match] is constant[None]] begin[:] variable[match] assign[=] call[name[re].search, parameter[constant['([^"]+)[']$], name[potential_expansion]]] if compare[name[match] is_not constant[None]] begin[:] if compare[name[acronym] in call[name[match].group, parameter[constant[1]]]] begin[:] continue variable[pattern] assign[=] constant[] for taget[name[char]] in starred[call[name[acronym]][<ast.Slice object at 0x7da1b09b78e0>]] begin[:] <ast.AugAssign object at 0x7da1b09b7700> <ast.AugAssign object at 0x7da1b09b6b30> if compare[call[name[re].search, parameter[name[pattern], call[name[match].group, parameter[constant[1]]], name[re].I]] is_not constant[None]] begin[:] call[name[_add_expansion_to_acronym_dict], parameter[name[acronym], call[name[match].group, parameter[constant[1]]], constant[0], name[acronyms]]] continue variable[pattern] assign[=] constant[\W(] for taget[name[char]] in starred[call[name[acronym]][<ast.Slice object at 0x7da1b09b7ac0>]] begin[:] <ast.AugAssign object at 0x7da1b09b7a00> <ast.AugAssign object at 0x7da1b09b7190> variable[match] assign[=] call[name[re].search, parameter[name[pattern], name[potential_expansion]]] if compare[name[match] is_not constant[None]] begin[:] call[name[_add_expansion_to_acronym_dict], parameter[name[acronym], call[name[match].group, parameter[constant[1]]], constant[1], name[acronyms]]] continue variable[match] assign[=] call[name[re].search, parameter[name[pattern], name[potential_expansion], name[re].I]] if compare[name[match] is_not constant[None]] begin[:] call[name[_add_expansion_to_acronym_dict], parameter[name[acronym], call[name[match].group, parameter[constant[1]]], constant[2], name[acronyms]]] continue variable[potential_expansion_stripped] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b09b5900>]] variable[match] assign[=] call[name[re].search, parameter[name[pattern], name[potential_expansion_stripped], name[re].I]] if compare[name[match] is_not constant[None]] begin[:] variable[first_expansion_word] assign[=] call[call[name[re].search, parameter[constant[\w+], call[name[match].group, parameter[constant[1]]]]].group, parameter[]] variable[start] assign[=] call[call[name[potential_expansion].lower, parameter[]].rfind, parameter[name[first_expansion_word]]] call[name[_add_expansion_to_acronym_dict], parameter[name[acronym], call[name[potential_expansion]][<ast.Slice object at 0x7da1b09b4fa0>], constant[3], name[acronyms]]] continue variable[reversed_words] assign[=] call[name[_words], parameter[name[potential_expansion_stripped]]] call[name[reversed_words].reverse, parameter[]] variable[reversed_acronym] assign[=] call[name[list], parameter[call[name[acronym].lower, parameter[]]]] call[name[reversed_acronym].reverse, parameter[]] variable[index0] assign[=] constant[0] variable[index1] assign[=] constant[0] variable[word] assign[=] constant[] <ast.Try object at 0x7da1b09b4a30> if <ast.UnaryOp object at 0x7da18f09d3c0> begin[:] variable[word] assign[=] constant[] if name[word] begin[:] variable[start] assign[=] call[call[name[potential_expansion].lower, parameter[]].rfind, parameter[name[word]]] call[name[_add_expansion_to_acronym_dict], parameter[name[acronym], call[name[potential_expansion]][<ast.Slice object at 0x7da18f09df00>], constant[4], name[acronyms]]] continue variable[reversed_words] assign[=] call[name[_words], parameter[call[name[potential_expansion].lower, parameter[]]]] call[name[reversed_words].reverse, parameter[]] variable[reversed_acronym] assign[=] call[name[list], parameter[call[name[acronym].lower, parameter[]]]] call[name[reversed_acronym].reverse, parameter[]] variable[index0] assign[=] constant[0] variable[index1] assign[=] constant[0] variable[word] assign[=] constant[] <ast.Try object at 0x7da18f09e8c0> if <ast.UnaryOp object at 0x7da18f09ef80> begin[:] variable[word] assign[=] constant[] if name[word] begin[:] variable[start] assign[=] call[call[name[potential_expansion].lower, parameter[]].rfind, parameter[name[word]]] call[name[_add_expansion_to_acronym_dict], parameter[name[acronym], call[name[potential_expansion]][<ast.Slice object at 0x7da18f09d210>], constant[5], name[acronyms]]] continue return[name[acronyms]]
keyword[def] identifier[get_acronyms] ( identifier[fulltext] ): literal[string] identifier[acronyms] ={} keyword[for] identifier[m] keyword[in] identifier[ACRONYM_BRACKETS_REGEX] . identifier[finditer] ( identifier[fulltext] ): identifier[acronym] = identifier[DOTS_REGEX] . identifier[sub] ( literal[string] , identifier[m] . identifier[group] ( literal[int] )) identifier[potential_expansion] = identifier[fulltext] [ identifier[m] . identifier[start] ()- literal[int] : identifier[m] . identifier[start] ()]. identifier[replace] ( literal[string] , literal[string] ) identifier[potential_expansion] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[potential_expansion] ) identifier[potential_expansion] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[potential_expansion] ) identifier[potential_expansion] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[potential_expansion] ) identifier[potential_expansion] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[potential_expansion] ) identifier[potential_expansion] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[potential_expansion] ) identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[potential_expansion] ) keyword[if] identifier[match] keyword[is] keyword[None] : identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[potential_expansion] ) keyword[if] identifier[match] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[acronym] keyword[in] identifier[match] . identifier[group] ( literal[int] ): keyword[continue] identifier[pattern] = literal[string] keyword[for] identifier[char] keyword[in] identifier[acronym] [:- literal[int] ]: identifier[pattern] += literal[string] % identifier[char] identifier[pattern] += literal[string] % identifier[acronym] [- literal[int] ] keyword[if] identifier[re] . identifier[search] ( identifier[pattern] , identifier[match] . identifier[group] ( literal[int] ), identifier[re] . identifier[I] ) keyword[is] keyword[not] keyword[None] : identifier[_add_expansion_to_acronym_dict] ( identifier[acronym] , identifier[match] . identifier[group] ( literal[int] ), literal[int] , identifier[acronyms] ) keyword[continue] identifier[pattern] = literal[string] keyword[for] identifier[char] keyword[in] identifier[acronym] [:- literal[int] ]: identifier[pattern] += literal[string] % identifier[char] identifier[pattern] += literal[string] % identifier[acronym] [- literal[int] ] identifier[match] = identifier[re] . identifier[search] ( identifier[pattern] , identifier[potential_expansion] ) keyword[if] identifier[match] keyword[is] keyword[not] keyword[None] : identifier[_add_expansion_to_acronym_dict] ( identifier[acronym] , identifier[match] . identifier[group] ( literal[int] ), literal[int] , identifier[acronyms] ) keyword[continue] identifier[match] = identifier[re] . identifier[search] ( identifier[pattern] , identifier[potential_expansion] , identifier[re] . identifier[I] ) keyword[if] identifier[match] keyword[is] keyword[not] keyword[None] : identifier[_add_expansion_to_acronym_dict] ( identifier[acronym] , identifier[match] . identifier[group] ( literal[int] ), literal[int] , identifier[acronyms] ) keyword[continue] identifier[potential_expansion_stripped] = literal[string] . identifier[join] ([ identifier[word] keyword[for] identifier[word] keyword[in] identifier[_words] ( identifier[potential_expansion] ) keyword[if] identifier[word] keyword[not] keyword[in] identifier[STOPLIST] ]) identifier[match] = identifier[re] . identifier[search] ( identifier[pattern] , identifier[potential_expansion_stripped] , identifier[re] . identifier[I] ) keyword[if] identifier[match] keyword[is] keyword[not] keyword[None] : identifier[first_expansion_word] = identifier[re] . identifier[search] ( literal[string] , identifier[match] . identifier[group] ( literal[int] )). identifier[group] () identifier[start] = identifier[potential_expansion] . identifier[lower] (). identifier[rfind] ( identifier[first_expansion_word] ) identifier[_add_expansion_to_acronym_dict] ( identifier[acronym] , identifier[potential_expansion] [ identifier[start] :], literal[int] , identifier[acronyms] ) keyword[continue] identifier[reversed_words] = identifier[_words] ( identifier[potential_expansion_stripped] ) identifier[reversed_words] . identifier[reverse] () identifier[reversed_acronym] = identifier[list] ( identifier[acronym] . identifier[lower] ()) identifier[reversed_acronym] . identifier[reverse] () identifier[index0] = literal[int] identifier[index1] = literal[int] identifier[word] = literal[string] keyword[try] : keyword[while] identifier[index0] < identifier[len] ( identifier[reversed_acronym] ) keyword[and] identifier[index1] < identifier[len] ( identifier[reversed_words] ): identifier[word] = identifier[reversed_words] [ identifier[index1] ] keyword[if] identifier[index0] + literal[int] < identifier[len] ( identifier[reversed_words] ): identifier[next_word] = identifier[reversed_words] [ identifier[index0] + literal[int] ] keyword[else] : identifier[next_word] = literal[string] identifier[char] = identifier[reversed_acronym] [ identifier[index0] ] keyword[if] identifier[index0] + literal[int] < identifier[len] ( identifier[reversed_acronym] ): identifier[next_char] = identifier[reversed_acronym] [ identifier[index0] + literal[int] ] keyword[else] : identifier[next_char] = literal[string] keyword[if] identifier[char] == identifier[next_char] keyword[and] identifier[word] . identifier[startswith] ( identifier[char] ) keyword[and] identifier[word] . identifier[count] ( identifier[char] )> literal[int] keyword[and] keyword[not] identifier[next_word] . identifier[startswith] ( identifier[char] ): identifier[index0] += literal[int] identifier[index1] += literal[int] keyword[if] identifier[word] . identifier[startswith] ( identifier[char] ): identifier[index0] += literal[int] identifier[index1] += literal[int] keyword[elif] identifier[char] keyword[in] identifier[word] keyword[and] keyword[not] identifier[word] . identifier[endswith] ( identifier[char] ) keyword[and] identifier[word] . identifier[startswith] ( identifier[next_char] ): identifier[index0] += literal[int] identifier[index1] += literal[int] keyword[else] : identifier[word] = literal[string] keyword[break] keyword[except] identifier[IndexError] : identifier[word] = literal[string] keyword[if] keyword[not] identifier[word] . identifier[startswith] ( identifier[char] ): identifier[word] = literal[string] keyword[if] identifier[word] : identifier[start] = identifier[potential_expansion] . identifier[lower] (). identifier[rfind] ( identifier[word] ) identifier[_add_expansion_to_acronym_dict] ( identifier[acronym] , identifier[potential_expansion] [ identifier[start] :], literal[int] , identifier[acronyms] ) keyword[continue] identifier[reversed_words] = identifier[_words] ( identifier[potential_expansion] . identifier[lower] ()) identifier[reversed_words] . identifier[reverse] () identifier[reversed_acronym] = identifier[list] ( identifier[acronym] . identifier[lower] ()) identifier[reversed_acronym] . identifier[reverse] () identifier[index0] = literal[int] identifier[index1] = literal[int] identifier[word] = literal[string] keyword[try] : keyword[while] identifier[index0] < identifier[len] ( identifier[reversed_acronym] ) keyword[and] identifier[index1] < identifier[len] ( identifier[reversed_words] ): identifier[word] = identifier[reversed_words] [ identifier[index1] ] keyword[if] identifier[index0] + literal[int] < identifier[len] ( identifier[reversed_words] ): identifier[next_word] = identifier[reversed_words] [ identifier[index0] + literal[int] ] keyword[else] : identifier[next_word] = literal[string] identifier[char] = identifier[reversed_acronym] [ identifier[index0] ] keyword[if] identifier[index0] + literal[int] < identifier[len] ( identifier[reversed_acronym] ): identifier[next_char] = identifier[reversed_acronym] [ identifier[index0] + literal[int] ] keyword[else] : identifier[next_char] = literal[string] keyword[if] identifier[char] == identifier[next_char] keyword[and] identifier[word] . identifier[startswith] ( identifier[char] ) keyword[and] identifier[word] . identifier[count] ( identifier[char] )> literal[int] keyword[and] keyword[not] identifier[next_word] . identifier[startswith] ( identifier[char] ): identifier[index0] += literal[int] identifier[index1] += literal[int] keyword[if] identifier[word] . identifier[startswith] ( identifier[char] ): identifier[index0] += literal[int] identifier[index1] += literal[int] keyword[elif] identifier[char] keyword[in] identifier[word] keyword[and] keyword[not] identifier[word] . identifier[endswith] ( identifier[char] ) keyword[and] identifier[word] . identifier[startswith] ( identifier[next_char] ): identifier[index0] += literal[int] identifier[index1] += literal[int] keyword[else] : identifier[word] = literal[string] keyword[break] keyword[except] identifier[IndexError] : identifier[word] = literal[string] keyword[if] keyword[not] identifier[word] . identifier[startswith] ( identifier[char] ): identifier[word] = literal[string] keyword[if] identifier[word] : identifier[start] = identifier[potential_expansion] . identifier[lower] (). identifier[rfind] ( identifier[word] ) identifier[_add_expansion_to_acronym_dict] ( identifier[acronym] , identifier[potential_expansion] [ identifier[start] :], literal[int] , identifier[acronyms] ) keyword[continue] keyword[return] identifier[acronyms]
def get_acronyms(fulltext): """Find acronyms and expansions from the fulltext. If needed, acronyms can already contain a dictionary of previously found acronyms that will be merged with the current results. """ acronyms = {} for m in ACRONYM_BRACKETS_REGEX.finditer(fulltext): acronym = DOTS_REGEX.sub('', m.group(1)) potential_expansion = fulltext[m.start() - 80:m.start()].replace('\n', ' ') # Strip potential_expansion = re.sub('(\\W).(\\W)', '\x01\x02', potential_expansion) potential_expansion = re.sub('(\\w)\\(s\\)\\W', '\x01', potential_expansion) potential_expansion = re.sub('[^\\w\'"]+$', '', potential_expansion) potential_expansion = re.sub('[[(].+[\\])]', '', potential_expansion) potential_expansion = re.sub(' {2,}', ' ', potential_expansion) # LEVEL 0: expansion between quotes # Double quotes match = re.search('"([^"]+)["]$', potential_expansion) if match is None: # Single quotes match = re.search('\'([^"]+)[\']$', potential_expansion) # depends on [control=['if'], data=['match']] if match is not None: if acronym in match.group(1): continue # depends on [control=['if'], data=[]] pattern = '' for char in acronym[:-1]: pattern += '%s\\w+\\W*' % char # depends on [control=['for'], data=['char']] pattern += '%s\\w+' % acronym[-1] if re.search(pattern, match.group(1), re.I) is not None: _add_expansion_to_acronym_dict(acronym, match.group(1), 0, acronyms) # depends on [control=['if'], data=[]] continue # depends on [control=['if'], data=['match']] pattern = '\\W(' for char in acronym[:-1]: pattern += '%s\\w+\\W+' % char # depends on [control=['for'], data=['char']] pattern += '%s\\w+)$' % acronym[-1] # LEVEL 1: expansion with uppercase initials match = re.search(pattern, potential_expansion) if match is not None: _add_expansion_to_acronym_dict(acronym, match.group(1), 1, acronyms) continue # depends on [control=['if'], data=['match']] # LEVEL 2: expansion with initials match = re.search(pattern, potential_expansion, re.I) if match is not None: _add_expansion_to_acronym_dict(acronym, match.group(1), 2, acronyms) continue # depends on [control=['if'], data=['match']] # LEVEL 3: expansion with initials and STOPLIST potential_expansion_stripped = ' '.join([word for word in _words(potential_expansion) if word not in STOPLIST]) match = re.search(pattern, potential_expansion_stripped, re.I) if match is not None: first_expansion_word = re.search('\\w+', match.group(1)).group() start = potential_expansion.lower().rfind(first_expansion_word) _add_expansion_to_acronym_dict(acronym, potential_expansion[start:], 3, acronyms) continue # depends on [control=['if'], data=['match']] # LEVEL 4: expansion with fuzzy initials and stoplist reversed_words = _words(potential_expansion_stripped) reversed_words.reverse() reversed_acronym = list(acronym.lower()) reversed_acronym.reverse() index0 = 0 index1 = 0 word = '' try: while index0 < len(reversed_acronym) and index1 < len(reversed_words): word = reversed_words[index1] if index0 + 1 < len(reversed_words): next_word = reversed_words[index0 + 1] # depends on [control=['if'], data=[]] else: next_word = '_' char = reversed_acronym[index0] if index0 + 1 < len(reversed_acronym): next_char = reversed_acronym[index0 + 1] # depends on [control=['if'], data=[]] else: next_char = '_' if char == next_char and word.startswith(char) and (word.count(char) > 1) and (not next_word.startswith(char)): index0 += 2 index1 += 1 # depends on [control=['if'], data=[]] if word.startswith(char): index0 += 1 index1 += 1 # depends on [control=['if'], data=[]] elif char in word and (not word.endswith(char)) and word.startswith(next_char): index0 += 2 index1 += 1 # depends on [control=['if'], data=[]] else: word = '' break # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]] except IndexError: word = '' # depends on [control=['except'], data=[]] if not word.startswith(char): word = '' # depends on [control=['if'], data=[]] if word: start = potential_expansion.lower().rfind(word) _add_expansion_to_acronym_dict(acronym, potential_expansion[start:], 4, acronyms) continue # depends on [control=['if'], data=[]] # LEVEL 5: expansion with fuzzy initials reversed_words = _words(potential_expansion.lower()) reversed_words.reverse() reversed_acronym = list(acronym.lower()) reversed_acronym.reverse() index0 = 0 index1 = 0 word = '' try: while index0 < len(reversed_acronym) and index1 < len(reversed_words): word = reversed_words[index1] if index0 + 1 < len(reversed_words): next_word = reversed_words[index0 + 1] # depends on [control=['if'], data=[]] else: next_word = '' char = reversed_acronym[index0] if index0 + 1 < len(reversed_acronym): next_char = reversed_acronym[index0 + 1] # depends on [control=['if'], data=[]] else: next_char = '' if char == next_char and word.startswith(char) and (word.count(char) > 1) and (not next_word.startswith(char)): index0 += 2 index1 += 1 # depends on [control=['if'], data=[]] if word.startswith(char): index0 += 1 index1 += 1 # depends on [control=['if'], data=[]] elif char in word and (not word.endswith(char)) and word.startswith(next_char): index0 += 2 index1 += 1 # depends on [control=['if'], data=[]] else: word = '' break # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]] except IndexError: word = '' # depends on [control=['except'], data=[]] if not word.startswith(char): word = '' # depends on [control=['if'], data=[]] if word: start = potential_expansion.lower().rfind(word) _add_expansion_to_acronym_dict(acronym, potential_expansion[start:], 5, acronyms) continue # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']] return acronyms
def add_to_starred(self, starred): """ :calls: `PUT /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_ :param starred: :class:`github.Repository.Repository` :rtype: None """ assert isinstance(starred, github.Repository.Repository), starred headers, data = self._requester.requestJsonAndCheck( "PUT", "/user/starred/" + starred._identity )
def function[add_to_starred, parameter[self, starred]]: constant[ :calls: `PUT /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_ :param starred: :class:`github.Repository.Repository` :rtype: None ] assert[call[name[isinstance], parameter[name[starred], name[github].Repository.Repository]]] <ast.Tuple object at 0x7da1b1f49660> assign[=] call[name[self]._requester.requestJsonAndCheck, parameter[constant[PUT], binary_operation[constant[/user/starred/] + name[starred]._identity]]]
keyword[def] identifier[add_to_starred] ( identifier[self] , identifier[starred] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[starred] , identifier[github] . identifier[Repository] . identifier[Repository] ), identifier[starred] identifier[headers] , identifier[data] = identifier[self] . identifier[_requester] . identifier[requestJsonAndCheck] ( literal[string] , literal[string] + identifier[starred] . identifier[_identity] )
def add_to_starred(self, starred): """ :calls: `PUT /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_ :param starred: :class:`github.Repository.Repository` :rtype: None """ assert isinstance(starred, github.Repository.Repository), starred (headers, data) = self._requester.requestJsonAndCheck('PUT', '/user/starred/' + starred._identity)
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-n', required=True, action='store', help='Scale set name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--operation', '-o', required=True, action='store', help='Operation (attach/detach)') arg_parser.add_argument('--vmid', '-i', required=True, action='store', help='VM id') arg_parser.add_argument('--lun', '-l', required=True, action='store', help='lun id') arg_parser.add_argument('--diskname', '-d', required=False, action='store', help='Optional password') args = arg_parser.parse_args() vmssname = args.vmssname rgname = args.rgname operation = args.operation vmid = args.vmid lun = int(args.lun) diskname = args.diskname if operation != 'attach' and operation != 'detach': sys.exit('--operation must be attach or detach') if diskname is None and operation == 'attach': sys.exit('--diskname is required for attach operation.') subscription_id = azurerm.get_subscription_from_cli() # authenticate access_token = azurerm.get_access_token_from_cli() # do a get on the VM vmssvm_model = azurerm.get_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid) # check operation if operation == 'attach': new_model = attach_model(subscription_id, rgname, vmssvm_model, diskname, lun) else: if operation == 'detach': new_model = detach_model(vmssvm_model, lun) # do a put on the VM rmreturn = azurerm.put_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid, new_model) if rmreturn.status_code != 201: sys.exit('Error ' + str(rmreturn.status_code) + ' creating VM. ' + rmreturn.text) print(json.dumps(rmreturn, sort_keys=False, indent=2, separators=(',', ': ')))
def function[main, parameter[]]: constant[Main routine.] variable[arg_parser] assign[=] call[name[argparse].ArgumentParser, parameter[]] call[name[arg_parser].add_argument, parameter[constant[--vmssname], constant[-n]]] call[name[arg_parser].add_argument, parameter[constant[--rgname], constant[-g]]] call[name[arg_parser].add_argument, parameter[constant[--operation], constant[-o]]] call[name[arg_parser].add_argument, parameter[constant[--vmid], constant[-i]]] call[name[arg_parser].add_argument, parameter[constant[--lun], constant[-l]]] call[name[arg_parser].add_argument, parameter[constant[--diskname], constant[-d]]] variable[args] assign[=] call[name[arg_parser].parse_args, parameter[]] variable[vmssname] assign[=] name[args].vmssname variable[rgname] assign[=] name[args].rgname variable[operation] assign[=] name[args].operation variable[vmid] assign[=] name[args].vmid variable[lun] assign[=] call[name[int], parameter[name[args].lun]] variable[diskname] assign[=] name[args].diskname if <ast.BoolOp object at 0x7da1b04668c0> begin[:] call[name[sys].exit, parameter[constant[--operation must be attach or detach]]] if <ast.BoolOp object at 0x7da1b0465930> begin[:] call[name[sys].exit, parameter[constant[--diskname is required for attach operation.]]] variable[subscription_id] assign[=] call[name[azurerm].get_subscription_from_cli, parameter[]] variable[access_token] assign[=] call[name[azurerm].get_access_token_from_cli, parameter[]] variable[vmssvm_model] assign[=] call[name[azurerm].get_vmss_vm, parameter[name[access_token], name[subscription_id], name[rgname], name[vmssname], name[vmid]]] if compare[name[operation] equal[==] constant[attach]] begin[:] variable[new_model] assign[=] call[name[attach_model], parameter[name[subscription_id], name[rgname], name[vmssvm_model], name[diskname], name[lun]]] variable[rmreturn] assign[=] call[name[azurerm].put_vmss_vm, parameter[name[access_token], name[subscription_id], name[rgname], name[vmssname], name[vmid], name[new_model]]] if compare[name[rmreturn].status_code not_equal[!=] constant[201]] begin[:] call[name[sys].exit, parameter[binary_operation[binary_operation[binary_operation[constant[Error ] + call[name[str], parameter[name[rmreturn].status_code]]] + constant[ creating VM. ]] + name[rmreturn].text]]] call[name[print], parameter[call[name[json].dumps, parameter[name[rmreturn]]]]]
keyword[def] identifier[main] (): literal[string] identifier[arg_parser] = identifier[argparse] . identifier[ArgumentParser] () identifier[arg_parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[required] = keyword[True] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[arg_parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[required] = keyword[True] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[arg_parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[required] = keyword[True] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[arg_parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[required] = keyword[True] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[arg_parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[required] = keyword[True] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[arg_parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[required] = keyword[False] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[args] = identifier[arg_parser] . identifier[parse_args] () identifier[vmssname] = identifier[args] . identifier[vmssname] identifier[rgname] = identifier[args] . identifier[rgname] identifier[operation] = identifier[args] . identifier[operation] identifier[vmid] = identifier[args] . identifier[vmid] identifier[lun] = identifier[int] ( identifier[args] . identifier[lun] ) identifier[diskname] = identifier[args] . identifier[diskname] keyword[if] identifier[operation] != literal[string] keyword[and] identifier[operation] != literal[string] : identifier[sys] . identifier[exit] ( literal[string] ) keyword[if] identifier[diskname] keyword[is] keyword[None] keyword[and] identifier[operation] == literal[string] : identifier[sys] . identifier[exit] ( literal[string] ) identifier[subscription_id] = identifier[azurerm] . identifier[get_subscription_from_cli] () identifier[access_token] = identifier[azurerm] . identifier[get_access_token_from_cli] () identifier[vmssvm_model] = identifier[azurerm] . identifier[get_vmss_vm] ( identifier[access_token] , identifier[subscription_id] , identifier[rgname] , identifier[vmssname] , identifier[vmid] ) keyword[if] identifier[operation] == literal[string] : identifier[new_model] = identifier[attach_model] ( identifier[subscription_id] , identifier[rgname] , identifier[vmssvm_model] , identifier[diskname] , identifier[lun] ) keyword[else] : keyword[if] identifier[operation] == literal[string] : identifier[new_model] = identifier[detach_model] ( identifier[vmssvm_model] , identifier[lun] ) identifier[rmreturn] = identifier[azurerm] . identifier[put_vmss_vm] ( identifier[access_token] , identifier[subscription_id] , identifier[rgname] , identifier[vmssname] , identifier[vmid] , identifier[new_model] ) keyword[if] identifier[rmreturn] . identifier[status_code] != literal[int] : identifier[sys] . identifier[exit] ( literal[string] + identifier[str] ( identifier[rmreturn] . identifier[status_code] )+ literal[string] + identifier[rmreturn] . identifier[text] ) identifier[print] ( identifier[json] . identifier[dumps] ( identifier[rmreturn] , identifier[sort_keys] = keyword[False] , identifier[indent] = literal[int] , identifier[separators] =( literal[string] , literal[string] )))
def main(): """Main routine.""" # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-n', required=True, action='store', help='Scale set name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--operation', '-o', required=True, action='store', help='Operation (attach/detach)') arg_parser.add_argument('--vmid', '-i', required=True, action='store', help='VM id') arg_parser.add_argument('--lun', '-l', required=True, action='store', help='lun id') arg_parser.add_argument('--diskname', '-d', required=False, action='store', help='Optional password') args = arg_parser.parse_args() vmssname = args.vmssname rgname = args.rgname operation = args.operation vmid = args.vmid lun = int(args.lun) diskname = args.diskname if operation != 'attach' and operation != 'detach': sys.exit('--operation must be attach or detach') # depends on [control=['if'], data=[]] if diskname is None and operation == 'attach': sys.exit('--diskname is required for attach operation.') # depends on [control=['if'], data=[]] subscription_id = azurerm.get_subscription_from_cli() # authenticate access_token = azurerm.get_access_token_from_cli() # do a get on the VM vmssvm_model = azurerm.get_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid) # check operation if operation == 'attach': new_model = attach_model(subscription_id, rgname, vmssvm_model, diskname, lun) # depends on [control=['if'], data=[]] elif operation == 'detach': new_model = detach_model(vmssvm_model, lun) # depends on [control=['if'], data=[]] # do a put on the VM rmreturn = azurerm.put_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid, new_model) if rmreturn.status_code != 201: sys.exit('Error ' + str(rmreturn.status_code) + ' creating VM. ' + rmreturn.text) # depends on [control=['if'], data=[]] print(json.dumps(rmreturn, sort_keys=False, indent=2, separators=(',', ': ')))
def trace_symlink_target(link): """ Given a file that is known to be a symlink, trace it to its ultimate target. Raises TargetNotPresent when the target cannot be determined. Raises ValueError when the specified link is not a symlink. """ if not is_symlink(link): raise ValueError("link must point to a symlink on the system") while is_symlink(link): orig = os.path.dirname(link) link = readlink(link) link = resolve_path(link, orig) return link
def function[trace_symlink_target, parameter[link]]: constant[ Given a file that is known to be a symlink, trace it to its ultimate target. Raises TargetNotPresent when the target cannot be determined. Raises ValueError when the specified link is not a symlink. ] if <ast.UnaryOp object at 0x7da1b25345e0> begin[:] <ast.Raise object at 0x7da1b2535270> while call[name[is_symlink], parameter[name[link]]] begin[:] variable[orig] assign[=] call[name[os].path.dirname, parameter[name[link]]] variable[link] assign[=] call[name[readlink], parameter[name[link]]] variable[link] assign[=] call[name[resolve_path], parameter[name[link], name[orig]]] return[name[link]]
keyword[def] identifier[trace_symlink_target] ( identifier[link] ): literal[string] keyword[if] keyword[not] identifier[is_symlink] ( identifier[link] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[while] identifier[is_symlink] ( identifier[link] ): identifier[orig] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[link] ) identifier[link] = identifier[readlink] ( identifier[link] ) identifier[link] = identifier[resolve_path] ( identifier[link] , identifier[orig] ) keyword[return] identifier[link]
def trace_symlink_target(link): """ Given a file that is known to be a symlink, trace it to its ultimate target. Raises TargetNotPresent when the target cannot be determined. Raises ValueError when the specified link is not a symlink. """ if not is_symlink(link): raise ValueError('link must point to a symlink on the system') # depends on [control=['if'], data=[]] while is_symlink(link): orig = os.path.dirname(link) link = readlink(link) link = resolve_path(link, orig) # depends on [control=['while'], data=[]] return link
def iter_links_element_text(cls, element): '''Get the element text as a link.''' if element.text: link_type = identify_link_type(element.text) yield LinkInfo( element=element, tag=element.tag, attrib=None, link=element.text, inline=False, linked=True, base_link=None, value_type='plain', link_type=link_type )
def function[iter_links_element_text, parameter[cls, element]]: constant[Get the element text as a link.] if name[element].text begin[:] variable[link_type] assign[=] call[name[identify_link_type], parameter[name[element].text]] <ast.Yield object at 0x7da1b26af760>
keyword[def] identifier[iter_links_element_text] ( identifier[cls] , identifier[element] ): literal[string] keyword[if] identifier[element] . identifier[text] : identifier[link_type] = identifier[identify_link_type] ( identifier[element] . identifier[text] ) keyword[yield] identifier[LinkInfo] ( identifier[element] = identifier[element] , identifier[tag] = identifier[element] . identifier[tag] , identifier[attrib] = keyword[None] , identifier[link] = identifier[element] . identifier[text] , identifier[inline] = keyword[False] , identifier[linked] = keyword[True] , identifier[base_link] = keyword[None] , identifier[value_type] = literal[string] , identifier[link_type] = identifier[link_type] )
def iter_links_element_text(cls, element): """Get the element text as a link.""" if element.text: link_type = identify_link_type(element.text) yield LinkInfo(element=element, tag=element.tag, attrib=None, link=element.text, inline=False, linked=True, base_link=None, value_type='plain', link_type=link_type) # depends on [control=['if'], data=[]]
def resize_avatar(self, img, base_width): """Resize an avatar. :param img: The image that needs to be resize. :param base_width: The width of output image. """ w_percent = (base_width / float(img.size[0])) h_size = int((float(img.size[1]) * float(w_percent))) img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS) return img
def function[resize_avatar, parameter[self, img, base_width]]: constant[Resize an avatar. :param img: The image that needs to be resize. :param base_width: The width of output image. ] variable[w_percent] assign[=] binary_operation[name[base_width] / call[name[float], parameter[call[name[img].size][constant[0]]]]] variable[h_size] assign[=] call[name[int], parameter[binary_operation[call[name[float], parameter[call[name[img].size][constant[1]]]] * call[name[float], parameter[name[w_percent]]]]]] variable[img] assign[=] call[name[img].resize, parameter[tuple[[<ast.Name object at 0x7da18c4ce170>, <ast.Name object at 0x7da18c4cf850>]], name[PIL].Image.ANTIALIAS]] return[name[img]]
keyword[def] identifier[resize_avatar] ( identifier[self] , identifier[img] , identifier[base_width] ): literal[string] identifier[w_percent] =( identifier[base_width] / identifier[float] ( identifier[img] . identifier[size] [ literal[int] ])) identifier[h_size] = identifier[int] (( identifier[float] ( identifier[img] . identifier[size] [ literal[int] ])* identifier[float] ( identifier[w_percent] ))) identifier[img] = identifier[img] . identifier[resize] (( identifier[base_width] , identifier[h_size] ), identifier[PIL] . identifier[Image] . identifier[ANTIALIAS] ) keyword[return] identifier[img]
def resize_avatar(self, img, base_width): """Resize an avatar. :param img: The image that needs to be resize. :param base_width: The width of output image. """ w_percent = base_width / float(img.size[0]) h_size = int(float(img.size[1]) * float(w_percent)) img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS) return img
def extracted_array_2d_from_array_2d_and_coordinates(array_2d, y0, y1, x0, x1): """Resize an array to a new size by extracting a sub-set of the array. The extracted input coordinates use NumPy convention, such that the upper values should be specified as +1 the \ dimensions of the extracted array. In the example below, an array of size (5,5) is extracted using the coordinates y0=1, y1=4, x0=1, x1=4. This extracts an array of dimensions (2,2) and is equivalent to array_2d[1:4, 1:4] Parameters ---------- array_2d : ndarray The 2D array that is an array is extracted from. y0 : int The lower row number (e.g. the higher y-coodinate) of the array that is extracted for the resize. y1 : int The upper row number (e.g. the lower y-coodinate) of the array that is extracted for the resize. x0 : int The lower column number (e.g. the lower x-coodinate) of the array that is extracted for the resize. x1 : int The upper column number (e.g. the higher x-coodinate) of the array that is extracted for the resize. Returns ------- ndarray The extracted 2D array from the input 2D array. Examples -------- array_2d = np.ones((5,5)) extracted_array = extract_array_2d(array_2d=array_2d, y0=1, y1=4, x0=1, x1=4) """ new_shape = (y1-y0, x1-x0) resized_array = np.zeros(shape=new_shape) for y_resized, y in enumerate(range(y0, y1)): for x_resized, x in enumerate(range(x0, x1)): resized_array[y_resized, x_resized] = array_2d[y, x] return resized_array
def function[extracted_array_2d_from_array_2d_and_coordinates, parameter[array_2d, y0, y1, x0, x1]]: constant[Resize an array to a new size by extracting a sub-set of the array. The extracted input coordinates use NumPy convention, such that the upper values should be specified as +1 the dimensions of the extracted array. In the example below, an array of size (5,5) is extracted using the coordinates y0=1, y1=4, x0=1, x1=4. This extracts an array of dimensions (2,2) and is equivalent to array_2d[1:4, 1:4] Parameters ---------- array_2d : ndarray The 2D array that is an array is extracted from. y0 : int The lower row number (e.g. the higher y-coodinate) of the array that is extracted for the resize. y1 : int The upper row number (e.g. the lower y-coodinate) of the array that is extracted for the resize. x0 : int The lower column number (e.g. the lower x-coodinate) of the array that is extracted for the resize. x1 : int The upper column number (e.g. the higher x-coodinate) of the array that is extracted for the resize. Returns ------- ndarray The extracted 2D array from the input 2D array. Examples -------- array_2d = np.ones((5,5)) extracted_array = extract_array_2d(array_2d=array_2d, y0=1, y1=4, x0=1, x1=4) ] variable[new_shape] assign[=] tuple[[<ast.BinOp object at 0x7da204620490>, <ast.BinOp object at 0x7da18dc9b460>]] variable[resized_array] assign[=] call[name[np].zeros, parameter[]] for taget[tuple[[<ast.Name object at 0x7da18dc99ed0>, <ast.Name object at 0x7da18dc98580>]]] in starred[call[name[enumerate], parameter[call[name[range], parameter[name[y0], name[y1]]]]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18dc983d0>, <ast.Name object at 0x7da18dc98700>]]] in starred[call[name[enumerate], parameter[call[name[range], parameter[name[x0], name[x1]]]]]] begin[:] call[name[resized_array]][tuple[[<ast.Name object at 0x7da18dc98880>, <ast.Name object at 0x7da18dc9a410>]]] assign[=] call[name[array_2d]][tuple[[<ast.Name object at 0x7da18dc99c90>, <ast.Name object at 0x7da18dc9a860>]]] return[name[resized_array]]
keyword[def] identifier[extracted_array_2d_from_array_2d_and_coordinates] ( identifier[array_2d] , identifier[y0] , identifier[y1] , identifier[x0] , identifier[x1] ): literal[string] identifier[new_shape] =( identifier[y1] - identifier[y0] , identifier[x1] - identifier[x0] ) identifier[resized_array] = identifier[np] . identifier[zeros] ( identifier[shape] = identifier[new_shape] ) keyword[for] identifier[y_resized] , identifier[y] keyword[in] identifier[enumerate] ( identifier[range] ( identifier[y0] , identifier[y1] )): keyword[for] identifier[x_resized] , identifier[x] keyword[in] identifier[enumerate] ( identifier[range] ( identifier[x0] , identifier[x1] )): identifier[resized_array] [ identifier[y_resized] , identifier[x_resized] ]= identifier[array_2d] [ identifier[y] , identifier[x] ] keyword[return] identifier[resized_array]
def extracted_array_2d_from_array_2d_and_coordinates(array_2d, y0, y1, x0, x1): """Resize an array to a new size by extracting a sub-set of the array. The extracted input coordinates use NumPy convention, such that the upper values should be specified as +1 the dimensions of the extracted array. In the example below, an array of size (5,5) is extracted using the coordinates y0=1, y1=4, x0=1, x1=4. This extracts an array of dimensions (2,2) and is equivalent to array_2d[1:4, 1:4] Parameters ---------- array_2d : ndarray The 2D array that is an array is extracted from. y0 : int The lower row number (e.g. the higher y-coodinate) of the array that is extracted for the resize. y1 : int The upper row number (e.g. the lower y-coodinate) of the array that is extracted for the resize. x0 : int The lower column number (e.g. the lower x-coodinate) of the array that is extracted for the resize. x1 : int The upper column number (e.g. the higher x-coodinate) of the array that is extracted for the resize. Returns ------- ndarray The extracted 2D array from the input 2D array. Examples -------- array_2d = np.ones((5,5)) extracted_array = extract_array_2d(array_2d=array_2d, y0=1, y1=4, x0=1, x1=4) """ new_shape = (y1 - y0, x1 - x0) resized_array = np.zeros(shape=new_shape) for (y_resized, y) in enumerate(range(y0, y1)): for (x_resized, x) in enumerate(range(x0, x1)): resized_array[y_resized, x_resized] = array_2d[y, x] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] return resized_array
def json(self): """ Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`. """ return { "observed_length": _json_safe_float(self.observed_length), "predicted_length": _json_safe_float(self.predicted_length), "merged_length": _json_safe_float(self.merged_length), "num_parameters": _json_safe_float(self.num_parameters), "observed_mean": _json_safe_float(self.observed_mean), "predicted_mean": _json_safe_float(self.predicted_mean), "observed_variance": _json_safe_float(self.observed_variance), "predicted_variance": _json_safe_float(self.predicted_variance), "observed_skew": _json_safe_float(self.observed_skew), "predicted_skew": _json_safe_float(self.predicted_skew), "observed_kurtosis": _json_safe_float(self.observed_kurtosis), "predicted_kurtosis": _json_safe_float(self.predicted_kurtosis), "observed_cvstd": _json_safe_float(self.observed_cvstd), "predicted_cvstd": _json_safe_float(self.predicted_cvstd), "r_squared": _json_safe_float(self.r_squared), "r_squared_adj": _json_safe_float(self.r_squared_adj), "rmse": _json_safe_float(self.rmse), "rmse_adj": _json_safe_float(self.rmse_adj), "cvrmse": _json_safe_float(self.cvrmse), "cvrmse_adj": _json_safe_float(self.cvrmse_adj), "mape": _json_safe_float(self.mape), "mape_no_zeros": _json_safe_float(self.mape_no_zeros), "num_meter_zeros": _json_safe_float(self.num_meter_zeros), "nmae": _json_safe_float(self.nmae), "nmbe": _json_safe_float(self.nmbe), "autocorr_resid": _json_safe_float(self.autocorr_resid), }
def function[json, parameter[self]]: constant[ Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`. ] return[dictionary[[<ast.Constant object at 0x7da20cabc550>, <ast.Constant object at 0x7da20cabd6f0>, <ast.Constant object at 0x7da20cabc520>, <ast.Constant object at 0x7da20cabef50>, <ast.Constant object at 0x7da20cabe560>, <ast.Constant object at 0x7da20cabdf00>, <ast.Constant object at 0x7da20cabf640>, <ast.Constant object at 0x7da20cabf490>, <ast.Constant object at 0x7da20cabe6e0>, <ast.Constant object at 0x7da20cabee00>, <ast.Constant object at 0x7da20cabe380>, <ast.Constant object at 0x7da20cabecb0>, <ast.Constant object at 0x7da20cabfd00>, <ast.Constant object at 0x7da20cabe4d0>, <ast.Constant object at 0x7da20cabda50>, <ast.Constant object at 0x7da20cabe650>, <ast.Constant object at 0x7da20cabfbe0>, <ast.Constant object at 0x7da20cabf0d0>, <ast.Constant object at 0x7da20cabd990>, <ast.Constant object at 0x7da20cabf940>, <ast.Constant object at 0x7da20cabd570>, <ast.Constant object at 0x7da20cabf4c0>, <ast.Constant object at 0x7da20cabd930>, <ast.Constant object at 0x7da20cabe920>, <ast.Constant object at 0x7da20cabc7f0>, <ast.Constant object at 0x7da20cabf430>], [<ast.Call object at 0x7da20cabeb30>, <ast.Call object at 0x7da20cabdf90>, <ast.Call object at 0x7da20cabeec0>, <ast.Call object at 0x7da20cabf5e0>, <ast.Call object at 0x7da20cabcbe0>, <ast.Call object at 0x7da20cabce20>, <ast.Call object at 0x7da20cabc2b0>, <ast.Call object at 0x7da20cabfa90>, <ast.Call object at 0x7da20cabe9b0>, <ast.Call object at 0x7da20cabd750>, <ast.Call object at 0x7da20cabf6a0>, <ast.Call object at 0x7da20cabe080>, <ast.Call object at 0x7da20cabf460>, <ast.Call object at 0x7da20cabf670>, <ast.Call object at 0x7da20cabee60>, <ast.Call object at 0x7da20cabc250>, <ast.Call object at 0x7da20cabd8d0>, <ast.Call object at 0x7da20cabc5e0>, <ast.Call object at 0x7da2047ea890>, <ast.Call object at 0x7da2047e8d60>, <ast.Call object at 0x7da2047e9120>, <ast.Call object at 0x7da2047eb2e0>, <ast.Call object at 0x7da2047ea140>, <ast.Call object at 0x7da2047ebe50>, <ast.Call object at 0x7da2047ea7a0>, <ast.Call object at 0x7da2047e9210>]]]
keyword[def] identifier[json] ( identifier[self] ): literal[string] keyword[return] { literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[observed_length] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[predicted_length] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[merged_length] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[num_parameters] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[observed_mean] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[predicted_mean] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[observed_variance] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[predicted_variance] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[observed_skew] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[predicted_skew] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[observed_kurtosis] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[predicted_kurtosis] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[observed_cvstd] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[predicted_cvstd] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[r_squared] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[r_squared_adj] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[rmse] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[rmse_adj] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[cvrmse] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[cvrmse_adj] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[mape] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[mape_no_zeros] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[num_meter_zeros] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[nmae] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[nmbe] ), literal[string] : identifier[_json_safe_float] ( identifier[self] . identifier[autocorr_resid] ), }
def json(self): """ Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`. """ return {'observed_length': _json_safe_float(self.observed_length), 'predicted_length': _json_safe_float(self.predicted_length), 'merged_length': _json_safe_float(self.merged_length), 'num_parameters': _json_safe_float(self.num_parameters), 'observed_mean': _json_safe_float(self.observed_mean), 'predicted_mean': _json_safe_float(self.predicted_mean), 'observed_variance': _json_safe_float(self.observed_variance), 'predicted_variance': _json_safe_float(self.predicted_variance), 'observed_skew': _json_safe_float(self.observed_skew), 'predicted_skew': _json_safe_float(self.predicted_skew), 'observed_kurtosis': _json_safe_float(self.observed_kurtosis), 'predicted_kurtosis': _json_safe_float(self.predicted_kurtosis), 'observed_cvstd': _json_safe_float(self.observed_cvstd), 'predicted_cvstd': _json_safe_float(self.predicted_cvstd), 'r_squared': _json_safe_float(self.r_squared), 'r_squared_adj': _json_safe_float(self.r_squared_adj), 'rmse': _json_safe_float(self.rmse), 'rmse_adj': _json_safe_float(self.rmse_adj), 'cvrmse': _json_safe_float(self.cvrmse), 'cvrmse_adj': _json_safe_float(self.cvrmse_adj), 'mape': _json_safe_float(self.mape), 'mape_no_zeros': _json_safe_float(self.mape_no_zeros), 'num_meter_zeros': _json_safe_float(self.num_meter_zeros), 'nmae': _json_safe_float(self.nmae), 'nmbe': _json_safe_float(self.nmbe), 'autocorr_resid': _json_safe_float(self.autocorr_resid)}
def api_exception(e): """ Returns the proper Exception class for the given kubernetes.client.rest.ApiException object https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#success-codes """ _, _, exc_traceback = sys.exc_info() tb = '\n'.join(traceback.format_tb(exc_traceback)) return { 400: BadRequestError, 401: UnauthorizedError, 403: ForbiddenError, 404: NotFoundError, 405: MethodNotAllowedError, 409: ConflictError, 410: GoneError, 422: UnprocessibleEntityError, 429: TooManyRequestsError, 500: InternalServerError, 503: ServiceUnavailableError, 504: ServerTimeoutError, }.get(e.status, DynamicApiError)(e, tb)
def function[api_exception, parameter[e]]: constant[ Returns the proper Exception class for the given kubernetes.client.rest.ApiException object https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#success-codes ] <ast.Tuple object at 0x7da18dc98f40> assign[=] call[name[sys].exc_info, parameter[]] variable[tb] assign[=] call[constant[ ].join, parameter[call[name[traceback].format_tb, parameter[name[exc_traceback]]]]] return[call[call[dictionary[[<ast.Constant object at 0x7da18dc9ada0>, <ast.Constant object at 0x7da18dc9b9a0>, <ast.Constant object at 0x7da18dc9bb80>, <ast.Constant object at 0x7da18dc9a9e0>, <ast.Constant object at 0x7da18dc980a0>, <ast.Constant object at 0x7da18dc99f90>, <ast.Constant object at 0x7da18dc9aad0>, <ast.Constant object at 0x7da18dc98970>, <ast.Constant object at 0x7da18dc9a8f0>, <ast.Constant object at 0x7da18dc98fa0>, <ast.Constant object at 0x7da18dc98820>, <ast.Constant object at 0x7da18dc9ae00>], [<ast.Name object at 0x7da18dc9bfa0>, <ast.Name object at 0x7da18dc99ae0>, <ast.Name object at 0x7da18dc9a740>, <ast.Name object at 0x7da18dc99b40>, <ast.Name object at 0x7da18dc994b0>, <ast.Name object at 0x7da18dc99b10>, <ast.Name object at 0x7da18dc99840>, <ast.Name object at 0x7da18dc992d0>, <ast.Name object at 0x7da18dc9b130>, <ast.Name object at 0x7da18dc9bc10>, <ast.Name object at 0x7da18dc9afb0>, <ast.Name object at 0x7da18dc9b1c0>]].get, parameter[name[e].status, name[DynamicApiError]]], parameter[name[e], name[tb]]]]
keyword[def] identifier[api_exception] ( identifier[e] ): literal[string] identifier[_] , identifier[_] , identifier[exc_traceback] = identifier[sys] . identifier[exc_info] () identifier[tb] = literal[string] . identifier[join] ( identifier[traceback] . identifier[format_tb] ( identifier[exc_traceback] )) keyword[return] { literal[int] : identifier[BadRequestError] , literal[int] : identifier[UnauthorizedError] , literal[int] : identifier[ForbiddenError] , literal[int] : identifier[NotFoundError] , literal[int] : identifier[MethodNotAllowedError] , literal[int] : identifier[ConflictError] , literal[int] : identifier[GoneError] , literal[int] : identifier[UnprocessibleEntityError] , literal[int] : identifier[TooManyRequestsError] , literal[int] : identifier[InternalServerError] , literal[int] : identifier[ServiceUnavailableError] , literal[int] : identifier[ServerTimeoutError] , }. identifier[get] ( identifier[e] . identifier[status] , identifier[DynamicApiError] )( identifier[e] , identifier[tb] )
def api_exception(e): """ Returns the proper Exception class for the given kubernetes.client.rest.ApiException object https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#success-codes """ (_, _, exc_traceback) = sys.exc_info() tb = '\n'.join(traceback.format_tb(exc_traceback)) return {400: BadRequestError, 401: UnauthorizedError, 403: ForbiddenError, 404: NotFoundError, 405: MethodNotAllowedError, 409: ConflictError, 410: GoneError, 422: UnprocessibleEntityError, 429: TooManyRequestsError, 500: InternalServerError, 503: ServiceUnavailableError, 504: ServerTimeoutError}.get(e.status, DynamicApiError)(e, tb)
def solveAgents(self): ''' Solves the microeconomic problem for all AgentTypes in this market. Parameters ---------- None Returns ------- None ''' #for this_type in self.agents: # this_type.solve() try: multiThreadCommands(self.agents,['solve()']) except Exception as err: if self.print_parallel_error_once: # Set flag to False so this is only printed once. self.print_parallel_error_once = False print("**** WARNING: could not execute multiThreadCommands in HARK.core.Market.solveAgents(), so using the serial version instead. This will likely be slower. The multiTreadCommands() functions failed with the following error:", '\n ', sys.exc_info()[0], ':', err) #sys.exc_info()[0]) multiThreadCommandsFake(self.agents,['solve()'])
def function[solveAgents, parameter[self]]: constant[ Solves the microeconomic problem for all AgentTypes in this market. Parameters ---------- None Returns ------- None ] <ast.Try object at 0x7da204566620>
keyword[def] identifier[solveAgents] ( identifier[self] ): literal[string] keyword[try] : identifier[multiThreadCommands] ( identifier[self] . identifier[agents] ,[ literal[string] ]) keyword[except] identifier[Exception] keyword[as] identifier[err] : keyword[if] identifier[self] . identifier[print_parallel_error_once] : identifier[self] . identifier[print_parallel_error_once] = keyword[False] identifier[print] ( literal[string] , literal[string] , identifier[sys] . identifier[exc_info] ()[ literal[int] ], literal[string] , identifier[err] ) identifier[multiThreadCommandsFake] ( identifier[self] . identifier[agents] ,[ literal[string] ])
def solveAgents(self): """ Solves the microeconomic problem for all AgentTypes in this market. Parameters ---------- None Returns ------- None """ #for this_type in self.agents: # this_type.solve() try: multiThreadCommands(self.agents, ['solve()']) # depends on [control=['try'], data=[]] except Exception as err: if self.print_parallel_error_once: # Set flag to False so this is only printed once. self.print_parallel_error_once = False print('**** WARNING: could not execute multiThreadCommands in HARK.core.Market.solveAgents(), so using the serial version instead. This will likely be slower. The multiTreadCommands() functions failed with the following error:', '\n ', sys.exc_info()[0], ':', err) #sys.exc_info()[0]) # depends on [control=['if'], data=[]] multiThreadCommandsFake(self.agents, ['solve()']) # depends on [control=['except'], data=['err']]
def add_nodes(self, node_name_list, dataframe=False): """ Add new nodes to the network :param node_name_list: list of node names, e.g. ['a', 'b', 'c'] :param dataframe: If True, return a pandas dataframe instead of a dict. :return: A dict mapping names to SUIDs for the newly-created nodes. """ res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS) check_response(res) nodes = res.json() if dataframe: return pd.DataFrame(nodes).set_index(['SUID']) else: return {node['name']: node['SUID'] for node in nodes}
def function[add_nodes, parameter[self, node_name_list, dataframe]]: constant[ Add new nodes to the network :param node_name_list: list of node names, e.g. ['a', 'b', 'c'] :param dataframe: If True, return a pandas dataframe instead of a dict. :return: A dict mapping names to SUIDs for the newly-created nodes. ] variable[res] assign[=] call[name[self].session.post, parameter[binary_operation[name[self].__url + constant[nodes]]]] call[name[check_response], parameter[name[res]]] variable[nodes] assign[=] call[name[res].json, parameter[]] if name[dataframe] begin[:] return[call[call[name[pd].DataFrame, parameter[name[nodes]]].set_index, parameter[list[[<ast.Constant object at 0x7da18f8131f0>]]]]]
keyword[def] identifier[add_nodes] ( identifier[self] , identifier[node_name_list] , identifier[dataframe] = keyword[False] ): literal[string] identifier[res] = identifier[self] . identifier[session] . identifier[post] ( identifier[self] . identifier[__url] + literal[string] , identifier[data] = identifier[json] . identifier[dumps] ( identifier[node_name_list] ), identifier[headers] = identifier[HEADERS] ) identifier[check_response] ( identifier[res] ) identifier[nodes] = identifier[res] . identifier[json] () keyword[if] identifier[dataframe] : keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[nodes] ). identifier[set_index] ([ literal[string] ]) keyword[else] : keyword[return] { identifier[node] [ literal[string] ]: identifier[node] [ literal[string] ] keyword[for] identifier[node] keyword[in] identifier[nodes] }
def add_nodes(self, node_name_list, dataframe=False): """ Add new nodes to the network :param node_name_list: list of node names, e.g. ['a', 'b', 'c'] :param dataframe: If True, return a pandas dataframe instead of a dict. :return: A dict mapping names to SUIDs for the newly-created nodes. """ res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS) check_response(res) nodes = res.json() if dataframe: return pd.DataFrame(nodes).set_index(['SUID']) # depends on [control=['if'], data=[]] else: return {node['name']: node['SUID'] for node in nodes}