code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _addLoggingLevel(levelName, levelNum, methodName=None):
"""
Comprehensively adds a new logging level to the `logging` module and the
currently configured logging class.
`levelName` becomes an attribute of the `logging` module with the value
`levelNum`. `methodName` becomes a convenience method for both `logging`
itself and the class returned by `logging.getLoggerClass()` (usually just
`logging.Logger`). If `methodName` is not specified, `levelName.lower()` is
used.
To avoid accidental clobberings of existing attributes, this method will
raise an `AttributeError` if the level name is already an attribute of the
`logging` module or if the method name is already present
Example
-------
>>> addLoggingLevel('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5
"""
if not methodName:
methodName = levelName.lower()
if hasattr(logging, levelName):
raise AttributeError(
'{} already defined in logging module'.format(levelName))
if hasattr(logging, methodName):
raise AttributeError(
'{} already defined in logging module'.format(methodName))
if hasattr(logging.getLoggerClass(), methodName):
raise AttributeError(
'{} already defined in logger class'.format(methodName))
# This method was inspired by the answers to Stack Overflow post
# http://stackoverflow.com/q/2183233/2988730, especially
# http://stackoverflow.com/a/13638084/2988730
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(levelNum):
self._log(levelNum, message, args, **kwargs)
def logToRoot(message, *args, **kwargs):
logging.log(levelNum, message, *args, **kwargs)
logging.addLevelName(levelNum, levelName)
setattr(logging, levelName, levelNum)
setattr(logging.getLoggerClass(), methodName, logForLevel)
setattr(logging, methodName, logToRoot) | def function[_addLoggingLevel, parameter[levelName, levelNum, methodName]]:
constant[
Comprehensively adds a new logging level to the `logging` module and the
currently configured logging class.
`levelName` becomes an attribute of the `logging` module with the value
`levelNum`. `methodName` becomes a convenience method for both `logging`
itself and the class returned by `logging.getLoggerClass()` (usually just
`logging.Logger`). If `methodName` is not specified, `levelName.lower()` is
used.
To avoid accidental clobberings of existing attributes, this method will
raise an `AttributeError` if the level name is already an attribute of the
`logging` module or if the method name is already present
Example
-------
>>> addLoggingLevel('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5
]
if <ast.UnaryOp object at 0x7da1b010b5b0> begin[:]
variable[methodName] assign[=] call[name[levelName].lower, parameter[]]
if call[name[hasattr], parameter[name[logging], name[levelName]]] begin[:]
<ast.Raise object at 0x7da1b01095d0>
if call[name[hasattr], parameter[name[logging], name[methodName]]] begin[:]
<ast.Raise object at 0x7da1b0108d30>
if call[name[hasattr], parameter[call[name[logging].getLoggerClass, parameter[]], name[methodName]]] begin[:]
<ast.Raise object at 0x7da1b010b250>
def function[logForLevel, parameter[self, message]]:
if call[name[self].isEnabledFor, parameter[name[levelNum]]] begin[:]
call[name[self]._log, parameter[name[levelNum], name[message], name[args]]]
def function[logToRoot, parameter[message]]:
call[name[logging].log, parameter[name[levelNum], name[message], <ast.Starred object at 0x7da1b0108640>]]
call[name[logging].addLevelName, parameter[name[levelNum], name[levelName]]]
call[name[setattr], parameter[name[logging], name[levelName], name[levelNum]]]
call[name[setattr], parameter[call[name[logging].getLoggerClass, parameter[]], name[methodName], name[logForLevel]]]
call[name[setattr], parameter[name[logging], name[methodName], name[logToRoot]]] | keyword[def] identifier[_addLoggingLevel] ( identifier[levelName] , identifier[levelNum] , identifier[methodName] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[methodName] :
identifier[methodName] = identifier[levelName] . identifier[lower] ()
keyword[if] identifier[hasattr] ( identifier[logging] , identifier[levelName] ):
keyword[raise] identifier[AttributeError] (
literal[string] . identifier[format] ( identifier[levelName] ))
keyword[if] identifier[hasattr] ( identifier[logging] , identifier[methodName] ):
keyword[raise] identifier[AttributeError] (
literal[string] . identifier[format] ( identifier[methodName] ))
keyword[if] identifier[hasattr] ( identifier[logging] . identifier[getLoggerClass] (), identifier[methodName] ):
keyword[raise] identifier[AttributeError] (
literal[string] . identifier[format] ( identifier[methodName] ))
keyword[def] identifier[logForLevel] ( identifier[self] , identifier[message] ,* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[self] . identifier[isEnabledFor] ( identifier[levelNum] ):
identifier[self] . identifier[_log] ( identifier[levelNum] , identifier[message] , identifier[args] ,** identifier[kwargs] )
keyword[def] identifier[logToRoot] ( identifier[message] ,* identifier[args] ,** identifier[kwargs] ):
identifier[logging] . identifier[log] ( identifier[levelNum] , identifier[message] ,* identifier[args] ,** identifier[kwargs] )
identifier[logging] . identifier[addLevelName] ( identifier[levelNum] , identifier[levelName] )
identifier[setattr] ( identifier[logging] , identifier[levelName] , identifier[levelNum] )
identifier[setattr] ( identifier[logging] . identifier[getLoggerClass] (), identifier[methodName] , identifier[logForLevel] )
identifier[setattr] ( identifier[logging] , identifier[methodName] , identifier[logToRoot] ) | def _addLoggingLevel(levelName, levelNum, methodName=None):
"""
Comprehensively adds a new logging level to the `logging` module and the
currently configured logging class.
`levelName` becomes an attribute of the `logging` module with the value
`levelNum`. `methodName` becomes a convenience method for both `logging`
itself and the class returned by `logging.getLoggerClass()` (usually just
`logging.Logger`). If `methodName` is not specified, `levelName.lower()` is
used.
To avoid accidental clobberings of existing attributes, this method will
raise an `AttributeError` if the level name is already an attribute of the
`logging` module or if the method name is already present
Example
-------
>>> addLoggingLevel('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5
"""
if not methodName:
methodName = levelName.lower() # depends on [control=['if'], data=[]]
if hasattr(logging, levelName):
raise AttributeError('{} already defined in logging module'.format(levelName)) # depends on [control=['if'], data=[]]
if hasattr(logging, methodName):
raise AttributeError('{} already defined in logging module'.format(methodName)) # depends on [control=['if'], data=[]]
if hasattr(logging.getLoggerClass(), methodName):
raise AttributeError('{} already defined in logger class'.format(methodName)) # depends on [control=['if'], data=[]]
# This method was inspired by the answers to Stack Overflow post
# http://stackoverflow.com/q/2183233/2988730, especially
# http://stackoverflow.com/a/13638084/2988730
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(levelNum):
self._log(levelNum, message, args, **kwargs) # depends on [control=['if'], data=[]]
def logToRoot(message, *args, **kwargs):
logging.log(levelNum, message, *args, **kwargs)
logging.addLevelName(levelNum, levelName)
setattr(logging, levelName, levelNum)
setattr(logging.getLoggerClass(), methodName, logForLevel)
setattr(logging, methodName, logToRoot) |
def _must_decode(value):
"""Copied from pkginfo 1.4.1, _compat module."""
if type(value) is bytes:
try:
return value.decode('utf-8')
except UnicodeDecodeError:
return value.decode('latin1')
return value | def function[_must_decode, parameter[value]]:
constant[Copied from pkginfo 1.4.1, _compat module.]
if compare[call[name[type], parameter[name[value]]] is name[bytes]] begin[:]
<ast.Try object at 0x7da20c6aa890>
return[name[value]] | keyword[def] identifier[_must_decode] ( identifier[value] ):
literal[string]
keyword[if] identifier[type] ( identifier[value] ) keyword[is] identifier[bytes] :
keyword[try] :
keyword[return] identifier[value] . identifier[decode] ( literal[string] )
keyword[except] identifier[UnicodeDecodeError] :
keyword[return] identifier[value] . identifier[decode] ( literal[string] )
keyword[return] identifier[value] | def _must_decode(value):
"""Copied from pkginfo 1.4.1, _compat module."""
if type(value) is bytes:
try:
return value.decode('utf-8') # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
return value.decode('latin1') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return value |
def mission_item_reached_send(self, seq, force_mavlink1=False):
'''
A certain mission item has been reached. The system will either hold
this position (or circle on the orbit) or (if the
autocontinue on the WP was set) continue to the next
MISSION.
seq : Sequence (uint16_t)
'''
return self.send(self.mission_item_reached_encode(seq), force_mavlink1=force_mavlink1) | def function[mission_item_reached_send, parameter[self, seq, force_mavlink1]]:
constant[
A certain mission item has been reached. The system will either hold
this position (or circle on the orbit) or (if the
autocontinue on the WP was set) continue to the next
MISSION.
seq : Sequence (uint16_t)
]
return[call[name[self].send, parameter[call[name[self].mission_item_reached_encode, parameter[name[seq]]]]]] | keyword[def] identifier[mission_item_reached_send] ( identifier[self] , identifier[seq] , identifier[force_mavlink1] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[mission_item_reached_encode] ( identifier[seq] ), identifier[force_mavlink1] = identifier[force_mavlink1] ) | def mission_item_reached_send(self, seq, force_mavlink1=False):
"""
A certain mission item has been reached. The system will either hold
this position (or circle on the orbit) or (if the
autocontinue on the WP was set) continue to the next
MISSION.
seq : Sequence (uint16_t)
"""
return self.send(self.mission_item_reached_encode(seq), force_mavlink1=force_mavlink1) |
def subscribe(obj, event, callback, event_state=None):
"""Subscribe an event from an class.
Subclasses of the class/object will also fire events for this class,
unless a more specific event exists.
"""
if inspect.isclass(obj):
cls = obj.__name__
else:
cls = obj.__class__.__name__
if event_state is None:
event_state = states.ANY
event_key = '.'.join([cls, event, event_state])
if event_key not in EVENT_HANDLERS:
EVENT_HANDLERS[event_key] = []
EVENT_HANDLERS[event_key].append(callback)
return | def function[subscribe, parameter[obj, event, callback, event_state]]:
constant[Subscribe an event from an class.
Subclasses of the class/object will also fire events for this class,
unless a more specific event exists.
]
if call[name[inspect].isclass, parameter[name[obj]]] begin[:]
variable[cls] assign[=] name[obj].__name__
if compare[name[event_state] is constant[None]] begin[:]
variable[event_state] assign[=] name[states].ANY
variable[event_key] assign[=] call[constant[.].join, parameter[list[[<ast.Name object at 0x7da20c76f7f0>, <ast.Name object at 0x7da20c76f010>, <ast.Name object at 0x7da20c76e410>]]]]
if compare[name[event_key] <ast.NotIn object at 0x7da2590d7190> name[EVENT_HANDLERS]] begin[:]
call[name[EVENT_HANDLERS]][name[event_key]] assign[=] list[[]]
call[call[name[EVENT_HANDLERS]][name[event_key]].append, parameter[name[callback]]]
return[None] | keyword[def] identifier[subscribe] ( identifier[obj] , identifier[event] , identifier[callback] , identifier[event_state] = keyword[None] ):
literal[string]
keyword[if] identifier[inspect] . identifier[isclass] ( identifier[obj] ):
identifier[cls] = identifier[obj] . identifier[__name__]
keyword[else] :
identifier[cls] = identifier[obj] . identifier[__class__] . identifier[__name__]
keyword[if] identifier[event_state] keyword[is] keyword[None] :
identifier[event_state] = identifier[states] . identifier[ANY]
identifier[event_key] = literal[string] . identifier[join] ([ identifier[cls] , identifier[event] , identifier[event_state] ])
keyword[if] identifier[event_key] keyword[not] keyword[in] identifier[EVENT_HANDLERS] :
identifier[EVENT_HANDLERS] [ identifier[event_key] ]=[]
identifier[EVENT_HANDLERS] [ identifier[event_key] ]. identifier[append] ( identifier[callback] )
keyword[return] | def subscribe(obj, event, callback, event_state=None):
"""Subscribe an event from an class.
Subclasses of the class/object will also fire events for this class,
unless a more specific event exists.
"""
if inspect.isclass(obj):
cls = obj.__name__ # depends on [control=['if'], data=[]]
else:
cls = obj.__class__.__name__
if event_state is None:
event_state = states.ANY # depends on [control=['if'], data=['event_state']]
event_key = '.'.join([cls, event, event_state])
if event_key not in EVENT_HANDLERS:
EVENT_HANDLERS[event_key] = [] # depends on [control=['if'], data=['event_key', 'EVENT_HANDLERS']]
EVENT_HANDLERS[event_key].append(callback)
return |
def environment_args(subparsers):
"""Add command line options for the environment operation"""
env_parser = subparsers.add_parser('environment')
env_parser.add_argument('vault_paths',
help='Full path(s) to secret',
nargs='+')
env_parser.add_argument('--prefix',
dest='prefix',
help='Old style prefix to use when'
'generating secret key names')
export_arg(env_parser)
mapping_args(env_parser)
base_args(env_parser) | def function[environment_args, parameter[subparsers]]:
constant[Add command line options for the environment operation]
variable[env_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[environment]]]
call[name[env_parser].add_argument, parameter[constant[vault_paths]]]
call[name[env_parser].add_argument, parameter[constant[--prefix]]]
call[name[export_arg], parameter[name[env_parser]]]
call[name[mapping_args], parameter[name[env_parser]]]
call[name[base_args], parameter[name[env_parser]]] | keyword[def] identifier[environment_args] ( identifier[subparsers] ):
literal[string]
identifier[env_parser] = identifier[subparsers] . identifier[add_parser] ( literal[string] )
identifier[env_parser] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] ,
identifier[nargs] = literal[string] )
identifier[env_parser] . identifier[add_argument] ( literal[string] ,
identifier[dest] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[export_arg] ( identifier[env_parser] )
identifier[mapping_args] ( identifier[env_parser] )
identifier[base_args] ( identifier[env_parser] ) | def environment_args(subparsers):
"""Add command line options for the environment operation"""
env_parser = subparsers.add_parser('environment')
env_parser.add_argument('vault_paths', help='Full path(s) to secret', nargs='+')
env_parser.add_argument('--prefix', dest='prefix', help='Old style prefix to use whengenerating secret key names')
export_arg(env_parser)
mapping_args(env_parser)
base_args(env_parser) |
def _initApplicationList(self):
"""Query Asterisk Manager Interface to initialize internal list of
available applications.
CLI Command - core show applications
"""
if self.checkVersion('1.4'):
cmd = "core show applications"
else:
cmd = "show applications"
cmdresp = self.executeCommand(cmd)
self._applications = set()
for line in cmdresp.splitlines()[1:-1]:
mobj = re.match('\s*(\S+):', line)
if mobj:
self._applications.add(mobj.group(1).lower()) | def function[_initApplicationList, parameter[self]]:
constant[Query Asterisk Manager Interface to initialize internal list of
available applications.
CLI Command - core show applications
]
if call[name[self].checkVersion, parameter[constant[1.4]]] begin[:]
variable[cmd] assign[=] constant[core show applications]
variable[cmdresp] assign[=] call[name[self].executeCommand, parameter[name[cmd]]]
name[self]._applications assign[=] call[name[set], parameter[]]
for taget[name[line]] in starred[call[call[name[cmdresp].splitlines, parameter[]]][<ast.Slice object at 0x7da1b0fc2e60>]] begin[:]
variable[mobj] assign[=] call[name[re].match, parameter[constant[\s*(\S+):], name[line]]]
if name[mobj] begin[:]
call[name[self]._applications.add, parameter[call[call[name[mobj].group, parameter[constant[1]]].lower, parameter[]]]] | keyword[def] identifier[_initApplicationList] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[checkVersion] ( literal[string] ):
identifier[cmd] = literal[string]
keyword[else] :
identifier[cmd] = literal[string]
identifier[cmdresp] = identifier[self] . identifier[executeCommand] ( identifier[cmd] )
identifier[self] . identifier[_applications] = identifier[set] ()
keyword[for] identifier[line] keyword[in] identifier[cmdresp] . identifier[splitlines] ()[ literal[int] :- literal[int] ]:
identifier[mobj] = identifier[re] . identifier[match] ( literal[string] , identifier[line] )
keyword[if] identifier[mobj] :
identifier[self] . identifier[_applications] . identifier[add] ( identifier[mobj] . identifier[group] ( literal[int] ). identifier[lower] ()) | def _initApplicationList(self):
"""Query Asterisk Manager Interface to initialize internal list of
available applications.
CLI Command - core show applications
"""
if self.checkVersion('1.4'):
cmd = 'core show applications' # depends on [control=['if'], data=[]]
else:
cmd = 'show applications'
cmdresp = self.executeCommand(cmd)
self._applications = set()
for line in cmdresp.splitlines()[1:-1]:
mobj = re.match('\\s*(\\S+):', line)
if mobj:
self._applications.add(mobj.group(1).lower()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] |
def get_or_create(self, um_from_user, um_to_user, message):
"""
Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner.
"""
created = False
try:
contact = self.get(Q(um_from_user=um_from_user, um_to_user=um_to_user) |
Q(um_from_user=um_to_user, um_to_user=um_from_user))
except self.model.DoesNotExist:
created = True
contact = self.create(um_from_user=um_from_user,
um_to_user=um_to_user,
latest_message=message)
return (contact, created) | def function[get_or_create, parameter[self, um_from_user, um_to_user, message]]:
constant[
Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner.
]
variable[created] assign[=] constant[False]
<ast.Try object at 0x7da2046237f0>
return[tuple[[<ast.Name object at 0x7da204621c00>, <ast.Name object at 0x7da204621390>]]] | keyword[def] identifier[get_or_create] ( identifier[self] , identifier[um_from_user] , identifier[um_to_user] , identifier[message] ):
literal[string]
identifier[created] = keyword[False]
keyword[try] :
identifier[contact] = identifier[self] . identifier[get] ( identifier[Q] ( identifier[um_from_user] = identifier[um_from_user] , identifier[um_to_user] = identifier[um_to_user] )|
identifier[Q] ( identifier[um_from_user] = identifier[um_to_user] , identifier[um_to_user] = identifier[um_from_user] ))
keyword[except] identifier[self] . identifier[model] . identifier[DoesNotExist] :
identifier[created] = keyword[True]
identifier[contact] = identifier[self] . identifier[create] ( identifier[um_from_user] = identifier[um_from_user] ,
identifier[um_to_user] = identifier[um_to_user] ,
identifier[latest_message] = identifier[message] )
keyword[return] ( identifier[contact] , identifier[created] ) | def get_or_create(self, um_from_user, um_to_user, message):
"""
Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner.
"""
created = False
try:
contact = self.get(Q(um_from_user=um_from_user, um_to_user=um_to_user) | Q(um_from_user=um_to_user, um_to_user=um_from_user)) # depends on [control=['try'], data=[]]
except self.model.DoesNotExist:
created = True
contact = self.create(um_from_user=um_from_user, um_to_user=um_to_user, latest_message=message) # depends on [control=['except'], data=[]]
return (contact, created) |
def export_profile(self):
""" Export minimum needs to a json file.
This method will save the current state of the minimum needs setup.
Then open a dialog allowing the user to browse to the desired
destination location and allow the user to save the needs as a json
file.
"""
file_name_dialog = QFileDialog(self)
file_name_dialog.setAcceptMode(QFileDialog.AcceptSave)
file_name_dialog.setNameFilter(self.tr('JSON files (*.json *.JSON)'))
file_name_dialog.setDefaultSuffix('json')
file_name = None
if file_name_dialog.exec_():
file_name = file_name_dialog.selectedFiles()[0]
if file_name != '' and file_name is not None:
self.minimum_needs.write_to_file(file_name) | def function[export_profile, parameter[self]]:
constant[ Export minimum needs to a json file.
This method will save the current state of the minimum needs setup.
Then open a dialog allowing the user to browse to the desired
destination location and allow the user to save the needs as a json
file.
]
variable[file_name_dialog] assign[=] call[name[QFileDialog], parameter[name[self]]]
call[name[file_name_dialog].setAcceptMode, parameter[name[QFileDialog].AcceptSave]]
call[name[file_name_dialog].setNameFilter, parameter[call[name[self].tr, parameter[constant[JSON files (*.json *.JSON)]]]]]
call[name[file_name_dialog].setDefaultSuffix, parameter[constant[json]]]
variable[file_name] assign[=] constant[None]
if call[name[file_name_dialog].exec_, parameter[]] begin[:]
variable[file_name] assign[=] call[call[name[file_name_dialog].selectedFiles, parameter[]]][constant[0]]
if <ast.BoolOp object at 0x7da18ede45e0> begin[:]
call[name[self].minimum_needs.write_to_file, parameter[name[file_name]]] | keyword[def] identifier[export_profile] ( identifier[self] ):
literal[string]
identifier[file_name_dialog] = identifier[QFileDialog] ( identifier[self] )
identifier[file_name_dialog] . identifier[setAcceptMode] ( identifier[QFileDialog] . identifier[AcceptSave] )
identifier[file_name_dialog] . identifier[setNameFilter] ( identifier[self] . identifier[tr] ( literal[string] ))
identifier[file_name_dialog] . identifier[setDefaultSuffix] ( literal[string] )
identifier[file_name] = keyword[None]
keyword[if] identifier[file_name_dialog] . identifier[exec_] ():
identifier[file_name] = identifier[file_name_dialog] . identifier[selectedFiles] ()[ literal[int] ]
keyword[if] identifier[file_name] != literal[string] keyword[and] identifier[file_name] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[minimum_needs] . identifier[write_to_file] ( identifier[file_name] ) | def export_profile(self):
""" Export minimum needs to a json file.
This method will save the current state of the minimum needs setup.
Then open a dialog allowing the user to browse to the desired
destination location and allow the user to save the needs as a json
file.
"""
file_name_dialog = QFileDialog(self)
file_name_dialog.setAcceptMode(QFileDialog.AcceptSave)
file_name_dialog.setNameFilter(self.tr('JSON files (*.json *.JSON)'))
file_name_dialog.setDefaultSuffix('json')
file_name = None
if file_name_dialog.exec_():
file_name = file_name_dialog.selectedFiles()[0] # depends on [control=['if'], data=[]]
if file_name != '' and file_name is not None:
self.minimum_needs.write_to_file(file_name) # depends on [control=['if'], data=[]] |
def can_create_repository_with_record_types(self, repository_record_types):
"""Tests if this user can create a single ``Repository`` using the desired record types.
While ``RepositoryManager.getRepositoryRecordTypes()`` can be
used to examine which records are supported, this method tests
which record(s) are required for creating a specific
``Repository``. Providing an empty array tests if a
``Repository`` can be created with no records.
arg: repository_record_types (osid.type.Type[]): array of
repository record types
return: (boolean) - ``true`` if ``Repository`` creation using
the specified ``Types`` is supported, ``false``
otherwise
raise: NullArgument - ``repository_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_create_bin_with_record_types
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=repository_record_types)
return True | def function[can_create_repository_with_record_types, parameter[self, repository_record_types]]:
constant[Tests if this user can create a single ``Repository`` using the desired record types.
While ``RepositoryManager.getRepositoryRecordTypes()`` can be
used to examine which records are supported, this method tests
which record(s) are required for creating a specific
``Repository``. Providing an empty array tests if a
``Repository`` can be created with no records.
arg: repository_record_types (osid.type.Type[]): array of
repository record types
return: (boolean) - ``true`` if ``Repository`` creation using
the specified ``Types`` is supported, ``false``
otherwise
raise: NullArgument - ``repository_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.can_create_catalog_with_record_types, parameter[]]]
return[constant[True]] | keyword[def] identifier[can_create_repository_with_record_types] ( identifier[self] , identifier[repository_record_types] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[can_create_catalog_with_record_types] ( identifier[catalog_record_types] = identifier[repository_record_types] )
keyword[return] keyword[True] | def can_create_repository_with_record_types(self, repository_record_types):
"""Tests if this user can create a single ``Repository`` using the desired record types.
While ``RepositoryManager.getRepositoryRecordTypes()`` can be
used to examine which records are supported, this method tests
which record(s) are required for creating a specific
``Repository``. Providing an empty array tests if a
``Repository`` can be created with no records.
arg: repository_record_types (osid.type.Type[]): array of
repository record types
return: (boolean) - ``true`` if ``Repository`` creation using
the specified ``Types`` is supported, ``false``
otherwise
raise: NullArgument - ``repository_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_create_bin_with_record_types
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=repository_record_types) # depends on [control=['if'], data=[]]
return True |
def add_editor(name, userid):
"""
:param name: a string representing the user's name
:param userid: a string representing the user's UW NetID
:return: True if request is successful, False otherwise.
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned.
"""
url = _make_add_account_url(name, userid)
return _process_resp(url,
get_sea_resource(url),
_is_editor_added
) | def function[add_editor, parameter[name, userid]]:
constant[
:param name: a string representing the user's name
:param userid: a string representing the user's UW NetID
:return: True if request is successful, False otherwise.
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned.
]
variable[url] assign[=] call[name[_make_add_account_url], parameter[name[name], name[userid]]]
return[call[name[_process_resp], parameter[name[url], call[name[get_sea_resource], parameter[name[url]]], name[_is_editor_added]]]] | keyword[def] identifier[add_editor] ( identifier[name] , identifier[userid] ):
literal[string]
identifier[url] = identifier[_make_add_account_url] ( identifier[name] , identifier[userid] )
keyword[return] identifier[_process_resp] ( identifier[url] ,
identifier[get_sea_resource] ( identifier[url] ),
identifier[_is_editor_added]
) | def add_editor(name, userid):
"""
:param name: a string representing the user's name
:param userid: a string representing the user's UW NetID
:return: True if request is successful, False otherwise.
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned.
"""
url = _make_add_account_url(name, userid)
return _process_resp(url, get_sea_resource(url), _is_editor_added) |
def bind(function, *args, **kwargs):
"""
Wraps the given function such that when it is called, the given arguments
are passed in addition to the connection argument.
:type function: function
:param function: The function that's ought to be wrapped.
:type args: list
:param args: Passed on to the called function.
:type kwargs: dict
:param kwargs: Passed on to the called function.
:rtype: function
:return: The wrapped function.
"""
def decorated(*inner_args, **inner_kwargs):
kwargs.update(inner_kwargs)
return function(*(inner_args + args), **kwargs)
copy_labels(function, decorated)
return decorated | def function[bind, parameter[function]]:
constant[
Wraps the given function such that when it is called, the given arguments
are passed in addition to the connection argument.
:type function: function
:param function: The function that's ought to be wrapped.
:type args: list
:param args: Passed on to the called function.
:type kwargs: dict
:param kwargs: Passed on to the called function.
:rtype: function
:return: The wrapped function.
]
def function[decorated, parameter[]]:
call[name[kwargs].update, parameter[name[inner_kwargs]]]
return[call[name[function], parameter[<ast.Starred object at 0x7da1b07ac550>]]]
call[name[copy_labels], parameter[name[function], name[decorated]]]
return[name[decorated]] | keyword[def] identifier[bind] ( identifier[function] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[decorated] (* identifier[inner_args] ,** identifier[inner_kwargs] ):
identifier[kwargs] . identifier[update] ( identifier[inner_kwargs] )
keyword[return] identifier[function] (*( identifier[inner_args] + identifier[args] ),** identifier[kwargs] )
identifier[copy_labels] ( identifier[function] , identifier[decorated] )
keyword[return] identifier[decorated] | def bind(function, *args, **kwargs):
"""
Wraps the given function such that when it is called, the given arguments
are passed in addition to the connection argument.
:type function: function
:param function: The function that's ought to be wrapped.
:type args: list
:param args: Passed on to the called function.
:type kwargs: dict
:param kwargs: Passed on to the called function.
:rtype: function
:return: The wrapped function.
"""
def decorated(*inner_args, **inner_kwargs):
kwargs.update(inner_kwargs)
return function(*inner_args + args, **kwargs)
copy_labels(function, decorated)
return decorated |
def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# package_path ends with __init__.py for a package
if loader.is_package(root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent)
else:
base_dir = site_parent
return base_dir, package_path
return None, package_path | def function[find_package, parameter[import_name]]:
constant[Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
]
variable[root_mod_name] assign[=] call[call[name[import_name].split, parameter[constant[.]]]][constant[0]]
variable[loader] assign[=] call[name[pkgutil].get_loader, parameter[name[root_mod_name]]]
if <ast.BoolOp object at 0x7da1b03b85b0> begin[:]
variable[package_path] assign[=] call[name[os].getcwd, parameter[]]
<ast.Tuple object at 0x7da204564700> assign[=] call[name[os].path.split, parameter[name[package_path]]]
variable[py_prefix] assign[=] call[name[os].path.abspath, parameter[name[sys].prefix]]
if call[name[package_path].startswith, parameter[name[py_prefix]]] begin[:]
return[tuple[[<ast.Name object at 0x7da2045673a0>, <ast.Name object at 0x7da204564e20>]]]
return[tuple[[<ast.Constant object at 0x7da204565ff0>, <ast.Name object at 0x7da2045650f0>]]] | keyword[def] identifier[find_package] ( identifier[import_name] ):
literal[string]
identifier[root_mod_name] = identifier[import_name] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[loader] = identifier[pkgutil] . identifier[get_loader] ( identifier[root_mod_name] )
keyword[if] identifier[loader] keyword[is] keyword[None] keyword[or] identifier[import_name] == literal[string] :
identifier[package_path] = identifier[os] . identifier[getcwd] ()
keyword[else] :
keyword[if] identifier[hasattr] ( identifier[loader] , literal[string] ):
identifier[filename] = identifier[loader] . identifier[get_filename] ( identifier[root_mod_name] )
keyword[elif] identifier[hasattr] ( identifier[loader] , literal[string] ):
identifier[filename] = identifier[loader] . identifier[archive]
keyword[else] :
identifier[__import__] ( identifier[import_name] )
identifier[filename] = identifier[sys] . identifier[modules] [ identifier[import_name] ]. identifier[__file__]
identifier[package_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[filename] ))
keyword[if] identifier[loader] . identifier[is_package] ( identifier[root_mod_name] ):
identifier[package_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[package_path] )
identifier[site_parent] , identifier[site_folder] = identifier[os] . identifier[path] . identifier[split] ( identifier[package_path] )
identifier[py_prefix] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[sys] . identifier[prefix] )
keyword[if] identifier[package_path] . identifier[startswith] ( identifier[py_prefix] ):
keyword[return] identifier[py_prefix] , identifier[package_path]
keyword[elif] identifier[site_folder] . identifier[lower] ()== literal[string] :
identifier[parent] , identifier[folder] = identifier[os] . identifier[path] . identifier[split] ( identifier[site_parent] )
keyword[if] identifier[folder] . identifier[lower] ()== literal[string] :
identifier[base_dir] = identifier[parent]
keyword[elif] identifier[os] . identifier[path] . identifier[basename] ( identifier[parent] ). identifier[lower] ()== literal[string] :
identifier[base_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[parent] )
keyword[else] :
identifier[base_dir] = identifier[site_parent]
keyword[return] identifier[base_dir] , identifier[package_path]
keyword[return] keyword[None] , identifier[package_path] | def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd() # depends on [control=['if'], data=[]]
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name) # depends on [control=['if'], data=[]]
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive # depends on [control=['if'], data=[]]
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# package_path ends with __init__.py for a package
if loader.is_package(root_mod_name):
package_path = os.path.dirname(package_path) # depends on [control=['if'], data=[]]
(site_parent, site_folder) = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return (py_prefix, package_path) # depends on [control=['if'], data=[]]
elif site_folder.lower() == 'site-packages':
(parent, folder) = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent # depends on [control=['if'], data=[]]
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent) # depends on [control=['if'], data=[]]
else:
base_dir = site_parent
return (base_dir, package_path) # depends on [control=['if'], data=[]]
return (None, package_path) |
def kill_node(config_file, yes, override_cluster_name):
"""Kills a random Raylet worker."""
config = yaml.load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
confirm("This will kill a node in your cluster", yes)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = provider.non_terminated_nodes({TAG_RAY_NODE_TYPE: "worker"})
node = random.choice(nodes)
logger.info("kill_node: Terminating worker {}".format(node))
updater = NodeUpdaterThread(
node_id=node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
runtime_hash="")
_exec(updater, "ray stop", False, False)
time.sleep(5)
if config.get("provider", {}).get("use_internal_ips", False) is True:
node_ip = provider.internal_ip(node)
else:
node_ip = provider.external_ip(node)
finally:
provider.cleanup()
return node_ip | def function[kill_node, parameter[config_file, yes, override_cluster_name]]:
constant[Kills a random Raylet worker.]
variable[config] assign[=] call[name[yaml].load, parameter[call[call[name[open], parameter[name[config_file]]].read, parameter[]]]]
if compare[name[override_cluster_name] is_not constant[None]] begin[:]
call[name[config]][constant[cluster_name]] assign[=] name[override_cluster_name]
variable[config] assign[=] call[name[_bootstrap_config], parameter[name[config]]]
call[name[confirm], parameter[constant[This will kill a node in your cluster], name[yes]]]
variable[provider] assign[=] call[name[get_node_provider], parameter[call[name[config]][constant[provider]], call[name[config]][constant[cluster_name]]]]
<ast.Try object at 0x7da1b23479d0>
return[name[node_ip]] | keyword[def] identifier[kill_node] ( identifier[config_file] , identifier[yes] , identifier[override_cluster_name] ):
literal[string]
identifier[config] = identifier[yaml] . identifier[load] ( identifier[open] ( identifier[config_file] ). identifier[read] ())
keyword[if] identifier[override_cluster_name] keyword[is] keyword[not] keyword[None] :
identifier[config] [ literal[string] ]= identifier[override_cluster_name]
identifier[config] = identifier[_bootstrap_config] ( identifier[config] )
identifier[confirm] ( literal[string] , identifier[yes] )
identifier[provider] = identifier[get_node_provider] ( identifier[config] [ literal[string] ], identifier[config] [ literal[string] ])
keyword[try] :
identifier[nodes] = identifier[provider] . identifier[non_terminated_nodes] ({ identifier[TAG_RAY_NODE_TYPE] : literal[string] })
identifier[node] = identifier[random] . identifier[choice] ( identifier[nodes] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[node] ))
identifier[updater] = identifier[NodeUpdaterThread] (
identifier[node_id] = identifier[node] ,
identifier[provider_config] = identifier[config] [ literal[string] ],
identifier[provider] = identifier[provider] ,
identifier[auth_config] = identifier[config] [ literal[string] ],
identifier[cluster_name] = identifier[config] [ literal[string] ],
identifier[file_mounts] = identifier[config] [ literal[string] ],
identifier[initialization_commands] =[],
identifier[setup_commands] =[],
identifier[runtime_hash] = literal[string] )
identifier[_exec] ( identifier[updater] , literal[string] , keyword[False] , keyword[False] )
identifier[time] . identifier[sleep] ( literal[int] )
keyword[if] identifier[config] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , keyword[False] ) keyword[is] keyword[True] :
identifier[node_ip] = identifier[provider] . identifier[internal_ip] ( identifier[node] )
keyword[else] :
identifier[node_ip] = identifier[provider] . identifier[external_ip] ( identifier[node] )
keyword[finally] :
identifier[provider] . identifier[cleanup] ()
keyword[return] identifier[node_ip] | def kill_node(config_file, yes, override_cluster_name):
"""Kills a random Raylet worker."""
config = yaml.load(open(config_file).read())
if override_cluster_name is not None:
config['cluster_name'] = override_cluster_name # depends on [control=['if'], data=['override_cluster_name']]
config = _bootstrap_config(config)
confirm('This will kill a node in your cluster', yes)
provider = get_node_provider(config['provider'], config['cluster_name'])
try:
nodes = provider.non_terminated_nodes({TAG_RAY_NODE_TYPE: 'worker'})
node = random.choice(nodes)
logger.info('kill_node: Terminating worker {}'.format(node))
updater = NodeUpdaterThread(node_id=node, provider_config=config['provider'], provider=provider, auth_config=config['auth'], cluster_name=config['cluster_name'], file_mounts=config['file_mounts'], initialization_commands=[], setup_commands=[], runtime_hash='')
_exec(updater, 'ray stop', False, False)
time.sleep(5)
if config.get('provider', {}).get('use_internal_ips', False) is True:
node_ip = provider.internal_ip(node) # depends on [control=['if'], data=[]]
else:
node_ip = provider.external_ip(node) # depends on [control=['try'], data=[]]
finally:
provider.cleanup()
return node_ip |
def cyl_to_rect(R,phi,Z):
"""
NAME:
cyl_to_rect
PURPOSE:
convert from cylindrical to rectangular coordinates
INPUT:
R, phi, Z - cylindrical coordinates
OUTPUT:
X,Y,Z
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
return (R*sc.cos(phi),R*sc.sin(phi),Z) | def function[cyl_to_rect, parameter[R, phi, Z]]:
constant[
NAME:
cyl_to_rect
PURPOSE:
convert from cylindrical to rectangular coordinates
INPUT:
R, phi, Z - cylindrical coordinates
OUTPUT:
X,Y,Z
HISTORY:
2011-02-23 - Written - Bovy (NYU)
]
return[tuple[[<ast.BinOp object at 0x7da1b0e8b130>, <ast.BinOp object at 0x7da1b0e8bcd0>, <ast.Name object at 0x7da1b0e8b2b0>]]] | keyword[def] identifier[cyl_to_rect] ( identifier[R] , identifier[phi] , identifier[Z] ):
literal[string]
keyword[return] ( identifier[R] * identifier[sc] . identifier[cos] ( identifier[phi] ), identifier[R] * identifier[sc] . identifier[sin] ( identifier[phi] ), identifier[Z] ) | def cyl_to_rect(R, phi, Z):
"""
NAME:
cyl_to_rect
PURPOSE:
convert from cylindrical to rectangular coordinates
INPUT:
R, phi, Z - cylindrical coordinates
OUTPUT:
X,Y,Z
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
return (R * sc.cos(phi), R * sc.sin(phi), Z) |
def write_to_disk(
manifest_root_dir: Optional[Path] = None,
manifest_name: Optional[str] = None,
prettify: Optional[bool] = False,
) -> Manifest:
"""
Write the active manifest to disk
Defaults
- Writes manifest to cwd unless Path is provided as manifest_root_dir.
- Writes manifest with a filename of Manifest[version].json unless a desired
manifest name (which must end in json) is provided as manifest_name.
- Writes the minified manifest version to disk unless prettify is set to True.
"""
return _write_to_disk(manifest_root_dir, manifest_name, prettify) | def function[write_to_disk, parameter[manifest_root_dir, manifest_name, prettify]]:
constant[
Write the active manifest to disk
Defaults
- Writes manifest to cwd unless Path is provided as manifest_root_dir.
- Writes manifest with a filename of Manifest[version].json unless a desired
manifest name (which must end in json) is provided as manifest_name.
- Writes the minified manifest version to disk unless prettify is set to True.
]
return[call[name[_write_to_disk], parameter[name[manifest_root_dir], name[manifest_name], name[prettify]]]] | keyword[def] identifier[write_to_disk] (
identifier[manifest_root_dir] : identifier[Optional] [ identifier[Path] ]= keyword[None] ,
identifier[manifest_name] : identifier[Optional] [ identifier[str] ]= keyword[None] ,
identifier[prettify] : identifier[Optional] [ identifier[bool] ]= keyword[False] ,
)-> identifier[Manifest] :
literal[string]
keyword[return] identifier[_write_to_disk] ( identifier[manifest_root_dir] , identifier[manifest_name] , identifier[prettify] ) | def write_to_disk(manifest_root_dir: Optional[Path]=None, manifest_name: Optional[str]=None, prettify: Optional[bool]=False) -> Manifest:
"""
Write the active manifest to disk
Defaults
- Writes manifest to cwd unless Path is provided as manifest_root_dir.
- Writes manifest with a filename of Manifest[version].json unless a desired
manifest name (which must end in json) is provided as manifest_name.
- Writes the minified manifest version to disk unless prettify is set to True.
"""
return _write_to_disk(manifest_root_dir, manifest_name, prettify) |
def read_info(
filename
):
"""Extracts FFMPEG info and returns info as JSON
Returns
-------
info : Dict
JSON info dict
"""
cmd = [
'ffprobe',
filename,
'-v', 'error',
'-print_format', 'json',
'-show_format', '-show_streams',
]
out = sp.check_output(cmd)
info = json.loads(out.decode('utf-8'))
return info | def function[read_info, parameter[filename]]:
constant[Extracts FFMPEG info and returns info as JSON
Returns
-------
info : Dict
JSON info dict
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b053aec0>, <ast.Name object at 0x7da1b0539390>, <ast.Constant object at 0x7da1b0538490>, <ast.Constant object at 0x7da1b053b430>, <ast.Constant object at 0x7da1b0539990>, <ast.Constant object at 0x7da1b0538820>, <ast.Constant object at 0x7da1b0538670>, <ast.Constant object at 0x7da1b05385b0>]]
variable[out] assign[=] call[name[sp].check_output, parameter[name[cmd]]]
variable[info] assign[=] call[name[json].loads, parameter[call[name[out].decode, parameter[constant[utf-8]]]]]
return[name[info]] | keyword[def] identifier[read_info] (
identifier[filename]
):
literal[string]
identifier[cmd] =[
literal[string] ,
identifier[filename] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
]
identifier[out] = identifier[sp] . identifier[check_output] ( identifier[cmd] )
identifier[info] = identifier[json] . identifier[loads] ( identifier[out] . identifier[decode] ( literal[string] ))
keyword[return] identifier[info] | def read_info(filename):
"""Extracts FFMPEG info and returns info as JSON
Returns
-------
info : Dict
JSON info dict
"""
cmd = ['ffprobe', filename, '-v', 'error', '-print_format', 'json', '-show_format', '-show_streams']
out = sp.check_output(cmd)
info = json.loads(out.decode('utf-8'))
return info |
def _handle_execute_reply(self, msg):
""" Handles replies for code execution.
"""
self.log.debug("execute: %s", msg.get('content', ''))
msg_id = msg['parent_header']['msg_id']
info = self._request_info['execute'].get(msg_id)
# unset reading flag, because if execute finished, raw_input can't
# still be pending.
self._reading = False
if info and info.kind == 'user' and not self._hidden:
# Make sure that all output from the SUB channel has been processed
# before writing a new prompt.
self.kernel_manager.sub_channel.flush()
# Reset the ANSI style information to prevent bad text in stdout
# from messing up our colors. We're not a true terminal so we're
# allowed to do this.
if self.ansi_codes:
self._ansi_processor.reset_sgr()
content = msg['content']
status = content['status']
if status == 'ok':
self._process_execute_ok(msg)
elif status == 'error':
self._process_execute_error(msg)
elif status == 'aborted':
self._process_execute_abort(msg)
self._show_interpreter_prompt_for_reply(msg)
self.executed.emit(msg)
self._request_info['execute'].pop(msg_id)
elif info and info.kind == 'silent_exec_callback' and not self._hidden:
self._handle_exec_callback(msg)
self._request_info['execute'].pop(msg_id)
else:
super(FrontendWidget, self)._handle_execute_reply(msg) | def function[_handle_execute_reply, parameter[self, msg]]:
constant[ Handles replies for code execution.
]
call[name[self].log.debug, parameter[constant[execute: %s], call[name[msg].get, parameter[constant[content], constant[]]]]]
variable[msg_id] assign[=] call[call[name[msg]][constant[parent_header]]][constant[msg_id]]
variable[info] assign[=] call[call[name[self]._request_info][constant[execute]].get, parameter[name[msg_id]]]
name[self]._reading assign[=] constant[False]
if <ast.BoolOp object at 0x7da18fe916c0> begin[:]
call[name[self].kernel_manager.sub_channel.flush, parameter[]]
if name[self].ansi_codes begin[:]
call[name[self]._ansi_processor.reset_sgr, parameter[]]
variable[content] assign[=] call[name[msg]][constant[content]]
variable[status] assign[=] call[name[content]][constant[status]]
if compare[name[status] equal[==] constant[ok]] begin[:]
call[name[self]._process_execute_ok, parameter[name[msg]]]
call[name[self]._show_interpreter_prompt_for_reply, parameter[name[msg]]]
call[name[self].executed.emit, parameter[name[msg]]]
call[call[name[self]._request_info][constant[execute]].pop, parameter[name[msg_id]]] | keyword[def] identifier[_handle_execute_reply] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[msg] . identifier[get] ( literal[string] , literal[string] ))
identifier[msg_id] = identifier[msg] [ literal[string] ][ literal[string] ]
identifier[info] = identifier[self] . identifier[_request_info] [ literal[string] ]. identifier[get] ( identifier[msg_id] )
identifier[self] . identifier[_reading] = keyword[False]
keyword[if] identifier[info] keyword[and] identifier[info] . identifier[kind] == literal[string] keyword[and] keyword[not] identifier[self] . identifier[_hidden] :
identifier[self] . identifier[kernel_manager] . identifier[sub_channel] . identifier[flush] ()
keyword[if] identifier[self] . identifier[ansi_codes] :
identifier[self] . identifier[_ansi_processor] . identifier[reset_sgr] ()
identifier[content] = identifier[msg] [ literal[string] ]
identifier[status] = identifier[content] [ literal[string] ]
keyword[if] identifier[status] == literal[string] :
identifier[self] . identifier[_process_execute_ok] ( identifier[msg] )
keyword[elif] identifier[status] == literal[string] :
identifier[self] . identifier[_process_execute_error] ( identifier[msg] )
keyword[elif] identifier[status] == literal[string] :
identifier[self] . identifier[_process_execute_abort] ( identifier[msg] )
identifier[self] . identifier[_show_interpreter_prompt_for_reply] ( identifier[msg] )
identifier[self] . identifier[executed] . identifier[emit] ( identifier[msg] )
identifier[self] . identifier[_request_info] [ literal[string] ]. identifier[pop] ( identifier[msg_id] )
keyword[elif] identifier[info] keyword[and] identifier[info] . identifier[kind] == literal[string] keyword[and] keyword[not] identifier[self] . identifier[_hidden] :
identifier[self] . identifier[_handle_exec_callback] ( identifier[msg] )
identifier[self] . identifier[_request_info] [ literal[string] ]. identifier[pop] ( identifier[msg_id] )
keyword[else] :
identifier[super] ( identifier[FrontendWidget] , identifier[self] ). identifier[_handle_execute_reply] ( identifier[msg] ) | def _handle_execute_reply(self, msg):
""" Handles replies for code execution.
"""
self.log.debug('execute: %s', msg.get('content', ''))
msg_id = msg['parent_header']['msg_id']
info = self._request_info['execute'].get(msg_id)
# unset reading flag, because if execute finished, raw_input can't
# still be pending.
self._reading = False
if info and info.kind == 'user' and (not self._hidden):
# Make sure that all output from the SUB channel has been processed
# before writing a new prompt.
self.kernel_manager.sub_channel.flush()
# Reset the ANSI style information to prevent bad text in stdout
# from messing up our colors. We're not a true terminal so we're
# allowed to do this.
if self.ansi_codes:
self._ansi_processor.reset_sgr() # depends on [control=['if'], data=[]]
content = msg['content']
status = content['status']
if status == 'ok':
self._process_execute_ok(msg) # depends on [control=['if'], data=[]]
elif status == 'error':
self._process_execute_error(msg) # depends on [control=['if'], data=[]]
elif status == 'aborted':
self._process_execute_abort(msg) # depends on [control=['if'], data=[]]
self._show_interpreter_prompt_for_reply(msg)
self.executed.emit(msg)
self._request_info['execute'].pop(msg_id) # depends on [control=['if'], data=[]]
elif info and info.kind == 'silent_exec_callback' and (not self._hidden):
self._handle_exec_callback(msg)
self._request_info['execute'].pop(msg_id) # depends on [control=['if'], data=[]]
else:
super(FrontendWidget, self)._handle_execute_reply(msg) |
def list2str(self, l: List, joiner: str) -> str:
"""
Convert list to str as input for tokenizer
Args:
l (list): list for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the list as a string
"""
result = str()
for item in l:
if isinstance(item, list):
result = result + self.list2str(item, joiner) + joiner
elif isinstance(item, dict):
result = result + self.dict2str(item, joiner) + joiner
elif item:
result = result + str(item) + joiner
return result | def function[list2str, parameter[self, l, joiner]]:
constant[
Convert list to str as input for tokenizer
Args:
l (list): list for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the list as a string
]
variable[result] assign[=] call[name[str], parameter[]]
for taget[name[item]] in starred[name[l]] begin[:]
if call[name[isinstance], parameter[name[item], name[list]]] begin[:]
variable[result] assign[=] binary_operation[binary_operation[name[result] + call[name[self].list2str, parameter[name[item], name[joiner]]]] + name[joiner]]
return[name[result]] | keyword[def] identifier[list2str] ( identifier[self] , identifier[l] : identifier[List] , identifier[joiner] : identifier[str] )-> identifier[str] :
literal[string]
identifier[result] = identifier[str] ()
keyword[for] identifier[item] keyword[in] identifier[l] :
keyword[if] identifier[isinstance] ( identifier[item] , identifier[list] ):
identifier[result] = identifier[result] + identifier[self] . identifier[list2str] ( identifier[item] , identifier[joiner] )+ identifier[joiner]
keyword[elif] identifier[isinstance] ( identifier[item] , identifier[dict] ):
identifier[result] = identifier[result] + identifier[self] . identifier[dict2str] ( identifier[item] , identifier[joiner] )+ identifier[joiner]
keyword[elif] identifier[item] :
identifier[result] = identifier[result] + identifier[str] ( identifier[item] )+ identifier[joiner]
keyword[return] identifier[result] | def list2str(self, l: List, joiner: str) -> str:
"""
Convert list to str as input for tokenizer
Args:
l (list): list for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the list as a string
"""
result = str()
for item in l:
if isinstance(item, list):
result = result + self.list2str(item, joiner) + joiner # depends on [control=['if'], data=[]]
elif isinstance(item, dict):
result = result + self.dict2str(item, joiner) + joiner # depends on [control=['if'], data=[]]
elif item:
result = result + str(item) + joiner # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return result |
def get_server(server_class, server_address, request_handler_class):
""" Return instance of :param:`server_class` with :param:`request_handler`
bound to it.
This method also binds a :func:`route` method to the server instance.
>>> server = get_server(TcpServer, ('localhost', 502), RequestHandler)
>>> server.serve_forever()
:param server_class: (sub)Class of :class:`socketserver.BaseServer`.
:param request_handler_class: (sub)Class of
:class:`umodbus.server.RequestHandler`.
:return: Instance of :param:`server_class`.
"""
s = server_class(server_address, request_handler_class)
s.route_map = Map()
s.route = MethodType(route, s)
return s | def function[get_server, parameter[server_class, server_address, request_handler_class]]:
constant[ Return instance of :param:`server_class` with :param:`request_handler`
bound to it.
This method also binds a :func:`route` method to the server instance.
>>> server = get_server(TcpServer, ('localhost', 502), RequestHandler)
>>> server.serve_forever()
:param server_class: (sub)Class of :class:`socketserver.BaseServer`.
:param request_handler_class: (sub)Class of
:class:`umodbus.server.RequestHandler`.
:return: Instance of :param:`server_class`.
]
variable[s] assign[=] call[name[server_class], parameter[name[server_address], name[request_handler_class]]]
name[s].route_map assign[=] call[name[Map], parameter[]]
name[s].route assign[=] call[name[MethodType], parameter[name[route], name[s]]]
return[name[s]] | keyword[def] identifier[get_server] ( identifier[server_class] , identifier[server_address] , identifier[request_handler_class] ):
literal[string]
identifier[s] = identifier[server_class] ( identifier[server_address] , identifier[request_handler_class] )
identifier[s] . identifier[route_map] = identifier[Map] ()
identifier[s] . identifier[route] = identifier[MethodType] ( identifier[route] , identifier[s] )
keyword[return] identifier[s] | def get_server(server_class, server_address, request_handler_class):
""" Return instance of :param:`server_class` with :param:`request_handler`
bound to it.
This method also binds a :func:`route` method to the server instance.
>>> server = get_server(TcpServer, ('localhost', 502), RequestHandler)
>>> server.serve_forever()
:param server_class: (sub)Class of :class:`socketserver.BaseServer`.
:param request_handler_class: (sub)Class of
:class:`umodbus.server.RequestHandler`.
:return: Instance of :param:`server_class`.
"""
s = server_class(server_address, request_handler_class)
s.route_map = Map()
s.route = MethodType(route, s)
return s |
def arc_distance(theta_1, phi_1,
theta_2, phi_2):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
temp = np.sin((theta_2-theta_1)/2)**2+np.cos(theta_1)*np.cos(theta_2)*np.sin((phi_2-phi_1)/2)**2
distance_matrix = 2 * (np.arctan2(np.sqrt(temp),np.sqrt(1-temp)))
return distance_matrix | def function[arc_distance, parameter[theta_1, phi_1, theta_2, phi_2]]:
constant[
Calculates the pairwise arc distance between all points in vector a and b.
]
variable[temp] assign[=] binary_operation[binary_operation[call[name[np].sin, parameter[binary_operation[binary_operation[name[theta_2] - name[theta_1]] / constant[2]]]] ** constant[2]] + binary_operation[binary_operation[call[name[np].cos, parameter[name[theta_1]]] * call[name[np].cos, parameter[name[theta_2]]]] * binary_operation[call[name[np].sin, parameter[binary_operation[binary_operation[name[phi_2] - name[phi_1]] / constant[2]]]] ** constant[2]]]]
variable[distance_matrix] assign[=] binary_operation[constant[2] * call[name[np].arctan2, parameter[call[name[np].sqrt, parameter[name[temp]]], call[name[np].sqrt, parameter[binary_operation[constant[1] - name[temp]]]]]]]
return[name[distance_matrix]] | keyword[def] identifier[arc_distance] ( identifier[theta_1] , identifier[phi_1] ,
identifier[theta_2] , identifier[phi_2] ):
literal[string]
identifier[temp] = identifier[np] . identifier[sin] (( identifier[theta_2] - identifier[theta_1] )/ literal[int] )** literal[int] + identifier[np] . identifier[cos] ( identifier[theta_1] )* identifier[np] . identifier[cos] ( identifier[theta_2] )* identifier[np] . identifier[sin] (( identifier[phi_2] - identifier[phi_1] )/ literal[int] )** literal[int]
identifier[distance_matrix] = literal[int] *( identifier[np] . identifier[arctan2] ( identifier[np] . identifier[sqrt] ( identifier[temp] ), identifier[np] . identifier[sqrt] ( literal[int] - identifier[temp] )))
keyword[return] identifier[distance_matrix] | def arc_distance(theta_1, phi_1, theta_2, phi_2):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
temp = np.sin((theta_2 - theta_1) / 2) ** 2 + np.cos(theta_1) * np.cos(theta_2) * np.sin((phi_2 - phi_1) / 2) ** 2
distance_matrix = 2 * np.arctan2(np.sqrt(temp), np.sqrt(1 - temp))
return distance_matrix |
def assemble_tlg_author_filepaths():
"""Reads TLG index and builds a list of absolute filepaths."""
plaintext_dir_rel = '~/cltk_data/greek/text/tlg/plaintext/'
plaintext_dir = os.path.expanduser(plaintext_dir_rel)
filepaths = [os.path.join(plaintext_dir, x + '.TXT') for x in TLG_INDEX]
return filepaths | def function[assemble_tlg_author_filepaths, parameter[]]:
constant[Reads TLG index and builds a list of absolute filepaths.]
variable[plaintext_dir_rel] assign[=] constant[~/cltk_data/greek/text/tlg/plaintext/]
variable[plaintext_dir] assign[=] call[name[os].path.expanduser, parameter[name[plaintext_dir_rel]]]
variable[filepaths] assign[=] <ast.ListComp object at 0x7da2044c3e20>
return[name[filepaths]] | keyword[def] identifier[assemble_tlg_author_filepaths] ():
literal[string]
identifier[plaintext_dir_rel] = literal[string]
identifier[plaintext_dir] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[plaintext_dir_rel] )
identifier[filepaths] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[plaintext_dir] , identifier[x] + literal[string] ) keyword[for] identifier[x] keyword[in] identifier[TLG_INDEX] ]
keyword[return] identifier[filepaths] | def assemble_tlg_author_filepaths():
"""Reads TLG index and builds a list of absolute filepaths."""
plaintext_dir_rel = '~/cltk_data/greek/text/tlg/plaintext/'
plaintext_dir = os.path.expanduser(plaintext_dir_rel)
filepaths = [os.path.join(plaintext_dir, x + '.TXT') for x in TLG_INDEX]
return filepaths |
def remove(self, symbol):
"""
Remove the entry for the unit matching `symbol`.
Parameters
----------
symbol : str
The name of the unit symbol to remove from the registry.
"""
self._unit_system_id = None
if symbol not in self.lut:
raise SymbolNotFoundError(
"Tried to remove the symbol '%s', but it does not exist "
"in this registry." % symbol
)
del self.lut[symbol] | def function[remove, parameter[self, symbol]]:
constant[
Remove the entry for the unit matching `symbol`.
Parameters
----------
symbol : str
The name of the unit symbol to remove from the registry.
]
name[self]._unit_system_id assign[=] constant[None]
if compare[name[symbol] <ast.NotIn object at 0x7da2590d7190> name[self].lut] begin[:]
<ast.Raise object at 0x7da1b11d25c0>
<ast.Delete object at 0x7da1b11d3df0> | keyword[def] identifier[remove] ( identifier[self] , identifier[symbol] ):
literal[string]
identifier[self] . identifier[_unit_system_id] = keyword[None]
keyword[if] identifier[symbol] keyword[not] keyword[in] identifier[self] . identifier[lut] :
keyword[raise] identifier[SymbolNotFoundError] (
literal[string]
literal[string] % identifier[symbol]
)
keyword[del] identifier[self] . identifier[lut] [ identifier[symbol] ] | def remove(self, symbol):
"""
Remove the entry for the unit matching `symbol`.
Parameters
----------
symbol : str
The name of the unit symbol to remove from the registry.
"""
self._unit_system_id = None
if symbol not in self.lut:
raise SymbolNotFoundError("Tried to remove the symbol '%s', but it does not exist in this registry." % symbol) # depends on [control=['if'], data=['symbol']]
del self.lut[symbol] |
def svd(self):
"""! @brief File-like object for the device's SVD file.
@todo Support multiple cores.
"""
try:
svdPath = self._info.debugs[0].attrib['svd']
return self._pack.get_file(svdPath)
except (KeyError, IndexError):
return None | def function[svd, parameter[self]]:
constant[! @brief File-like object for the device's SVD file.
@todo Support multiple cores.
]
<ast.Try object at 0x7da1b18a1240> | keyword[def] identifier[svd] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[svdPath] = identifier[self] . identifier[_info] . identifier[debugs] [ literal[int] ]. identifier[attrib] [ literal[string] ]
keyword[return] identifier[self] . identifier[_pack] . identifier[get_file] ( identifier[svdPath] )
keyword[except] ( identifier[KeyError] , identifier[IndexError] ):
keyword[return] keyword[None] | def svd(self):
"""! @brief File-like object for the device's SVD file.
@todo Support multiple cores.
"""
try:
svdPath = self._info.debugs[0].attrib['svd']
return self._pack.get_file(svdPath) # depends on [control=['try'], data=[]]
except (KeyError, IndexError):
return None # depends on [control=['except'], data=[]] |
def segment(self, source, language=None):
"""Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`.
"""
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by MeCab segmenter'.format(language))
chunks = ChunkList()
seek = 0
source_str = source.encode('utf-8') if six.PY2 else source
results = self.tagger.parse(source_str).split('\n')[:-2]
for row in results:
if six.PY2:
row = row.decode('utf-8')
token = row.split('\t')
word = token[0]
labels = token[3].split('-')
pos = labels[0]
label = labels[1] if len(labels) > 1 else None
if source[seek: seek + len(word)] != word:
assert source[seek] == ' '
assert source[seek + 1: seek + len(word) + 1] == word
chunks.append(Chunk.space())
seek += 1
dependency = None
if pos in _DEPENDENT_POS_FORWARD:
dependency = True
elif pos in _DEPENDENT_POS_BACKWARD:
dependency = False
elif label in _DEPENDENT_LABEL_FORWARD:
dependency = True
elif label in _DEPENDENT_LABEL_BACKWARD:
dependency = False
chunk = Chunk(word, pos=pos, label=label, dependency=dependency)
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
chunks.resolve_dependencies()
return chunks | def function[segment, parameter[self, source, language]]:
constant[Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`.
]
if <ast.BoolOp object at 0x7da1b12cc7f0> begin[:]
<ast.Raise object at 0x7da1b12ccf10>
variable[chunks] assign[=] call[name[ChunkList], parameter[]]
variable[seek] assign[=] constant[0]
variable[source_str] assign[=] <ast.IfExp object at 0x7da1b12cc940>
variable[results] assign[=] call[call[call[name[self].tagger.parse, parameter[name[source_str]]].split, parameter[constant[
]]]][<ast.Slice object at 0x7da1b12cc550>]
for taget[name[row]] in starred[name[results]] begin[:]
if name[six].PY2 begin[:]
variable[row] assign[=] call[name[row].decode, parameter[constant[utf-8]]]
variable[token] assign[=] call[name[row].split, parameter[constant[ ]]]
variable[word] assign[=] call[name[token]][constant[0]]
variable[labels] assign[=] call[call[name[token]][constant[3]].split, parameter[constant[-]]]
variable[pos] assign[=] call[name[labels]][constant[0]]
variable[label] assign[=] <ast.IfExp object at 0x7da2041d87c0>
if compare[call[name[source]][<ast.Slice object at 0x7da2041d8d30>] not_equal[!=] name[word]] begin[:]
assert[compare[call[name[source]][name[seek]] equal[==] constant[ ]]]
assert[compare[call[name[source]][<ast.Slice object at 0x7da1b1237190>] equal[==] name[word]]]
call[name[chunks].append, parameter[call[name[Chunk].space, parameter[]]]]
<ast.AugAssign object at 0x7da1b12b9870>
variable[dependency] assign[=] constant[None]
if compare[name[pos] in name[_DEPENDENT_POS_FORWARD]] begin[:]
variable[dependency] assign[=] constant[True]
variable[chunk] assign[=] call[name[Chunk], parameter[name[word]]]
if call[name[chunk].is_punct, parameter[]] begin[:]
name[chunk].dependency assign[=] call[name[chunk].is_open_punct, parameter[]]
call[name[chunks].append, parameter[name[chunk]]]
<ast.AugAssign object at 0x7da1b12bbbb0>
call[name[chunks].resolve_dependencies, parameter[]]
return[name[chunks]] | keyword[def] identifier[segment] ( identifier[self] , identifier[source] , identifier[language] = keyword[None] ):
literal[string]
keyword[if] identifier[language] keyword[and] keyword[not] identifier[language] keyword[in] identifier[self] . identifier[supported_languages] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[language] ))
identifier[chunks] = identifier[ChunkList] ()
identifier[seek] = literal[int]
identifier[source_str] = identifier[source] . identifier[encode] ( literal[string] ) keyword[if] identifier[six] . identifier[PY2] keyword[else] identifier[source]
identifier[results] = identifier[self] . identifier[tagger] . identifier[parse] ( identifier[source_str] ). identifier[split] ( literal[string] )[:- literal[int] ]
keyword[for] identifier[row] keyword[in] identifier[results] :
keyword[if] identifier[six] . identifier[PY2] :
identifier[row] = identifier[row] . identifier[decode] ( literal[string] )
identifier[token] = identifier[row] . identifier[split] ( literal[string] )
identifier[word] = identifier[token] [ literal[int] ]
identifier[labels] = identifier[token] [ literal[int] ]. identifier[split] ( literal[string] )
identifier[pos] = identifier[labels] [ literal[int] ]
identifier[label] = identifier[labels] [ literal[int] ] keyword[if] identifier[len] ( identifier[labels] )> literal[int] keyword[else] keyword[None]
keyword[if] identifier[source] [ identifier[seek] : identifier[seek] + identifier[len] ( identifier[word] )]!= identifier[word] :
keyword[assert] identifier[source] [ identifier[seek] ]== literal[string]
keyword[assert] identifier[source] [ identifier[seek] + literal[int] : identifier[seek] + identifier[len] ( identifier[word] )+ literal[int] ]== identifier[word]
identifier[chunks] . identifier[append] ( identifier[Chunk] . identifier[space] ())
identifier[seek] += literal[int]
identifier[dependency] = keyword[None]
keyword[if] identifier[pos] keyword[in] identifier[_DEPENDENT_POS_FORWARD] :
identifier[dependency] = keyword[True]
keyword[elif] identifier[pos] keyword[in] identifier[_DEPENDENT_POS_BACKWARD] :
identifier[dependency] = keyword[False]
keyword[elif] identifier[label] keyword[in] identifier[_DEPENDENT_LABEL_FORWARD] :
identifier[dependency] = keyword[True]
keyword[elif] identifier[label] keyword[in] identifier[_DEPENDENT_LABEL_BACKWARD] :
identifier[dependency] = keyword[False]
identifier[chunk] = identifier[Chunk] ( identifier[word] , identifier[pos] = identifier[pos] , identifier[label] = identifier[label] , identifier[dependency] = identifier[dependency] )
keyword[if] identifier[chunk] . identifier[is_punct] ():
identifier[chunk] . identifier[dependency] = identifier[chunk] . identifier[is_open_punct] ()
identifier[chunks] . identifier[append] ( identifier[chunk] )
identifier[seek] += identifier[len] ( identifier[word] )
identifier[chunks] . identifier[resolve_dependencies] ()
keyword[return] identifier[chunks] | def segment(self, source, language=None):
"""Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`.
"""
if language and (not language in self.supported_languages):
raise ValueError('Language {} is not supported by MeCab segmenter'.format(language)) # depends on [control=['if'], data=[]]
chunks = ChunkList()
seek = 0
source_str = source.encode('utf-8') if six.PY2 else source
results = self.tagger.parse(source_str).split('\n')[:-2]
for row in results:
if six.PY2:
row = row.decode('utf-8') # depends on [control=['if'], data=[]]
token = row.split('\t')
word = token[0]
labels = token[3].split('-')
pos = labels[0]
label = labels[1] if len(labels) > 1 else None
if source[seek:seek + len(word)] != word:
assert source[seek] == ' '
assert source[seek + 1:seek + len(word) + 1] == word
chunks.append(Chunk.space())
seek += 1 # depends on [control=['if'], data=['word']]
dependency = None
if pos in _DEPENDENT_POS_FORWARD:
dependency = True # depends on [control=['if'], data=[]]
elif pos in _DEPENDENT_POS_BACKWARD:
dependency = False # depends on [control=['if'], data=[]]
elif label in _DEPENDENT_LABEL_FORWARD:
dependency = True # depends on [control=['if'], data=[]]
elif label in _DEPENDENT_LABEL_BACKWARD:
dependency = False # depends on [control=['if'], data=[]]
chunk = Chunk(word, pos=pos, label=label, dependency=dependency)
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct() # depends on [control=['if'], data=[]]
chunks.append(chunk)
seek += len(word) # depends on [control=['for'], data=['row']]
chunks.resolve_dependencies()
return chunks |
def _get_template(self, root=None, **metadata_defaults):
""" Iterate over items metadata_defaults {prop: val, ...} to populate template """
if root is None:
if self._data_map is None:
self._init_data_map()
root = self._xml_root = self._data_map['_root']
template_tree = self._xml_tree = create_element_tree(root)
for prop, val in iteritems(metadata_defaults):
path = self._data_map.get(prop)
if path and val:
setattr(self, prop, val)
update_property(template_tree, None, path, prop, val)
return template_tree | def function[_get_template, parameter[self, root]]:
constant[ Iterate over items metadata_defaults {prop: val, ...} to populate template ]
if compare[name[root] is constant[None]] begin[:]
if compare[name[self]._data_map is constant[None]] begin[:]
call[name[self]._init_data_map, parameter[]]
variable[root] assign[=] call[name[self]._data_map][constant[_root]]
variable[template_tree] assign[=] call[name[create_element_tree], parameter[name[root]]]
for taget[tuple[[<ast.Name object at 0x7da18f7215a0>, <ast.Name object at 0x7da18f722350>]]] in starred[call[name[iteritems], parameter[name[metadata_defaults]]]] begin[:]
variable[path] assign[=] call[name[self]._data_map.get, parameter[name[prop]]]
if <ast.BoolOp object at 0x7da18f721150> begin[:]
call[name[setattr], parameter[name[self], name[prop], name[val]]]
call[name[update_property], parameter[name[template_tree], constant[None], name[path], name[prop], name[val]]]
return[name[template_tree]] | keyword[def] identifier[_get_template] ( identifier[self] , identifier[root] = keyword[None] ,** identifier[metadata_defaults] ):
literal[string]
keyword[if] identifier[root] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[_data_map] keyword[is] keyword[None] :
identifier[self] . identifier[_init_data_map] ()
identifier[root] = identifier[self] . identifier[_xml_root] = identifier[self] . identifier[_data_map] [ literal[string] ]
identifier[template_tree] = identifier[self] . identifier[_xml_tree] = identifier[create_element_tree] ( identifier[root] )
keyword[for] identifier[prop] , identifier[val] keyword[in] identifier[iteritems] ( identifier[metadata_defaults] ):
identifier[path] = identifier[self] . identifier[_data_map] . identifier[get] ( identifier[prop] )
keyword[if] identifier[path] keyword[and] identifier[val] :
identifier[setattr] ( identifier[self] , identifier[prop] , identifier[val] )
identifier[update_property] ( identifier[template_tree] , keyword[None] , identifier[path] , identifier[prop] , identifier[val] )
keyword[return] identifier[template_tree] | def _get_template(self, root=None, **metadata_defaults):
""" Iterate over items metadata_defaults {prop: val, ...} to populate template """
if root is None:
if self._data_map is None:
self._init_data_map() # depends on [control=['if'], data=[]]
root = self._xml_root = self._data_map['_root'] # depends on [control=['if'], data=['root']]
template_tree = self._xml_tree = create_element_tree(root)
for (prop, val) in iteritems(metadata_defaults):
path = self._data_map.get(prop)
if path and val:
setattr(self, prop, val)
update_property(template_tree, None, path, prop, val) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return template_tree |
def props_to_DataFrame(regionprops):
r"""
Returns a Pandas DataFrame containing all the scalar metrics for each
region, such as volume, sphericity, and so on, calculated by
``regionprops_3D``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by ``regionprops_3D``. Because ``regionprops_3D`` returns data in
the same ``list`` format as the ``regionprops`` function in **Skimage**
you can pass in either.
Returns
-------
DataFrame : Pandas DataFrame
A Pandas DataFrame with each region corresponding to a row and each
column corresponding to a key metric. All the values for a given
property (e.g. 'sphericity') can be obtained as
``val = df['sphericity']``. Conversely, all the key metrics for a
given region can be found with ``df.iloc[1]``.
See Also
--------
props_to_image
regionprops_3d
"""
# Parse the regionprops list and pull out all props with scalar values
metrics = []
reg = regionprops[0]
for item in reg.__dir__():
if not item.startswith('_'):
try:
if sp.shape(getattr(reg, item)) == ():
metrics.append(item)
except (TypeError, NotImplementedError, AttributeError):
pass
# Create a dictionary of all metrics that are simple scalar propertie
d = {}
for k in metrics:
try:
d[k] = sp.array([r[k] for r in regionprops])
except ValueError:
print('Error encountered evaluating ' + k + ' so skipping it')
# Create pandas data frame an return
df = DataFrame(d)
return df | def function[props_to_DataFrame, parameter[regionprops]]:
constant[
Returns a Pandas DataFrame containing all the scalar metrics for each
region, such as volume, sphericity, and so on, calculated by
``regionprops_3D``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by ``regionprops_3D``. Because ``regionprops_3D`` returns data in
the same ``list`` format as the ``regionprops`` function in **Skimage**
you can pass in either.
Returns
-------
DataFrame : Pandas DataFrame
A Pandas DataFrame with each region corresponding to a row and each
column corresponding to a key metric. All the values for a given
property (e.g. 'sphericity') can be obtained as
``val = df['sphericity']``. Conversely, all the key metrics for a
given region can be found with ``df.iloc[1]``.
See Also
--------
props_to_image
regionprops_3d
]
variable[metrics] assign[=] list[[]]
variable[reg] assign[=] call[name[regionprops]][constant[0]]
for taget[name[item]] in starred[call[name[reg].__dir__, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0517160> begin[:]
<ast.Try object at 0x7da1b0515660>
variable[d] assign[=] dictionary[[], []]
for taget[name[k]] in starred[name[metrics]] begin[:]
<ast.Try object at 0x7da1b05175b0>
variable[df] assign[=] call[name[DataFrame], parameter[name[d]]]
return[name[df]] | keyword[def] identifier[props_to_DataFrame] ( identifier[regionprops] ):
literal[string]
identifier[metrics] =[]
identifier[reg] = identifier[regionprops] [ literal[int] ]
keyword[for] identifier[item] keyword[in] identifier[reg] . identifier[__dir__] ():
keyword[if] keyword[not] identifier[item] . identifier[startswith] ( literal[string] ):
keyword[try] :
keyword[if] identifier[sp] . identifier[shape] ( identifier[getattr] ( identifier[reg] , identifier[item] ))==():
identifier[metrics] . identifier[append] ( identifier[item] )
keyword[except] ( identifier[TypeError] , identifier[NotImplementedError] , identifier[AttributeError] ):
keyword[pass]
identifier[d] ={}
keyword[for] identifier[k] keyword[in] identifier[metrics] :
keyword[try] :
identifier[d] [ identifier[k] ]= identifier[sp] . identifier[array] ([ identifier[r] [ identifier[k] ] keyword[for] identifier[r] keyword[in] identifier[regionprops] ])
keyword[except] identifier[ValueError] :
identifier[print] ( literal[string] + identifier[k] + literal[string] )
identifier[df] = identifier[DataFrame] ( identifier[d] )
keyword[return] identifier[df] | def props_to_DataFrame(regionprops):
"""
Returns a Pandas DataFrame containing all the scalar metrics for each
region, such as volume, sphericity, and so on, calculated by
``regionprops_3D``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by ``regionprops_3D``. Because ``regionprops_3D`` returns data in
the same ``list`` format as the ``regionprops`` function in **Skimage**
you can pass in either.
Returns
-------
DataFrame : Pandas DataFrame
A Pandas DataFrame with each region corresponding to a row and each
column corresponding to a key metric. All the values for a given
property (e.g. 'sphericity') can be obtained as
``val = df['sphericity']``. Conversely, all the key metrics for a
given region can be found with ``df.iloc[1]``.
See Also
--------
props_to_image
regionprops_3d
"""
# Parse the regionprops list and pull out all props with scalar values
metrics = []
reg = regionprops[0]
for item in reg.__dir__():
if not item.startswith('_'):
try:
if sp.shape(getattr(reg, item)) == ():
metrics.append(item) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (TypeError, NotImplementedError, AttributeError):
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
# Create a dictionary of all metrics that are simple scalar propertie
d = {}
for k in metrics:
try:
d[k] = sp.array([r[k] for r in regionprops]) # depends on [control=['try'], data=[]]
except ValueError:
print('Error encountered evaluating ' + k + ' so skipping it') # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['k']]
# Create pandas data frame an return
df = DataFrame(d)
return df |
def _parse_fields_dns(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('IPASN._parse_fields_dns() has been deprecated and will be '
'removed. You should now use IPASN.parse_fields_dns().')
return self.parse_fields_dns(*args, **kwargs) | def function[_parse_fields_dns, parameter[self]]:
constant[
Deprecated. This will be removed in a future release.
]
from relative_module[warnings] import module[warn]
call[name[warn], parameter[constant[IPASN._parse_fields_dns() has been deprecated and will be removed. You should now use IPASN.parse_fields_dns().]]]
return[call[name[self].parse_fields_dns, parameter[<ast.Starred object at 0x7da18eb56fe0>]]] | keyword[def] identifier[_parse_fields_dns] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[warnings] keyword[import] identifier[warn]
identifier[warn] ( literal[string]
literal[string] )
keyword[return] identifier[self] . identifier[parse_fields_dns] (* identifier[args] ,** identifier[kwargs] ) | def _parse_fields_dns(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('IPASN._parse_fields_dns() has been deprecated and will be removed. You should now use IPASN.parse_fields_dns().')
return self.parse_fields_dns(*args, **kwargs) |
def get_output_column_widths(table, spans):
"""
Gets the widths of the columns of the output table
Parameters
----------
table : list of lists of str
The table of rows of text
spans : list of lists of int
The [row, column] pairs of combined cells
Returns
-------
widths : list of int
The widths of each column in the output table
"""
widths = []
for column in table[0]:
widths.append(3)
for row in range(len(table)):
for column in range(len(table[row])):
span = get_span(spans, row, column)
column_count = get_span_column_count(span)
if column_count == 1:
text_row = span[0][0]
text_column = span[0][1]
text = table[text_row][text_column]
length = get_longest_line_length(text)
if length > widths[column]:
widths[column] = length
for row in range(len(table)):
for column in range(len(table[row])):
span = get_span(spans, row, column)
column_count = get_span_column_count(span)
if column_count > 1:
text_row = span[0][0]
text_column = span[0][1]
text = table[text_row][text_column]
end_column = text_column + column_count
available_space = sum(
widths[text_column:end_column])
available_space += column_count - 1
length = get_longest_line_length(text)
while length > available_space:
for i in range(text_column, end_column):
widths[i] += 1
available_space = sum(
widths[text_column:end_column])
available_space += column_count - 1
if length <= available_space:
break
return widths | def function[get_output_column_widths, parameter[table, spans]]:
constant[
Gets the widths of the columns of the output table
Parameters
----------
table : list of lists of str
The table of rows of text
spans : list of lists of int
The [row, column] pairs of combined cells
Returns
-------
widths : list of int
The widths of each column in the output table
]
variable[widths] assign[=] list[[]]
for taget[name[column]] in starred[call[name[table]][constant[0]]] begin[:]
call[name[widths].append, parameter[constant[3]]]
for taget[name[row]] in starred[call[name[range], parameter[call[name[len], parameter[name[table]]]]]] begin[:]
for taget[name[column]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[table]][name[row]]]]]]] begin[:]
variable[span] assign[=] call[name[get_span], parameter[name[spans], name[row], name[column]]]
variable[column_count] assign[=] call[name[get_span_column_count], parameter[name[span]]]
if compare[name[column_count] equal[==] constant[1]] begin[:]
variable[text_row] assign[=] call[call[name[span]][constant[0]]][constant[0]]
variable[text_column] assign[=] call[call[name[span]][constant[0]]][constant[1]]
variable[text] assign[=] call[call[name[table]][name[text_row]]][name[text_column]]
variable[length] assign[=] call[name[get_longest_line_length], parameter[name[text]]]
if compare[name[length] greater[>] call[name[widths]][name[column]]] begin[:]
call[name[widths]][name[column]] assign[=] name[length]
for taget[name[row]] in starred[call[name[range], parameter[call[name[len], parameter[name[table]]]]]] begin[:]
for taget[name[column]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[table]][name[row]]]]]]] begin[:]
variable[span] assign[=] call[name[get_span], parameter[name[spans], name[row], name[column]]]
variable[column_count] assign[=] call[name[get_span_column_count], parameter[name[span]]]
if compare[name[column_count] greater[>] constant[1]] begin[:]
variable[text_row] assign[=] call[call[name[span]][constant[0]]][constant[0]]
variable[text_column] assign[=] call[call[name[span]][constant[0]]][constant[1]]
variable[text] assign[=] call[call[name[table]][name[text_row]]][name[text_column]]
variable[end_column] assign[=] binary_operation[name[text_column] + name[column_count]]
variable[available_space] assign[=] call[name[sum], parameter[call[name[widths]][<ast.Slice object at 0x7da1b10c4520>]]]
<ast.AugAssign object at 0x7da1b10c7700>
variable[length] assign[=] call[name[get_longest_line_length], parameter[name[text]]]
while compare[name[length] greater[>] name[available_space]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[name[text_column], name[end_column]]]] begin[:]
<ast.AugAssign object at 0x7da1b10c40d0>
variable[available_space] assign[=] call[name[sum], parameter[call[name[widths]][<ast.Slice object at 0x7da1b10c68c0>]]]
<ast.AugAssign object at 0x7da1b10c71f0>
if compare[name[length] less_or_equal[<=] name[available_space]] begin[:]
break
return[name[widths]] | keyword[def] identifier[get_output_column_widths] ( identifier[table] , identifier[spans] ):
literal[string]
identifier[widths] =[]
keyword[for] identifier[column] keyword[in] identifier[table] [ literal[int] ]:
identifier[widths] . identifier[append] ( literal[int] )
keyword[for] identifier[row] keyword[in] identifier[range] ( identifier[len] ( identifier[table] )):
keyword[for] identifier[column] keyword[in] identifier[range] ( identifier[len] ( identifier[table] [ identifier[row] ])):
identifier[span] = identifier[get_span] ( identifier[spans] , identifier[row] , identifier[column] )
identifier[column_count] = identifier[get_span_column_count] ( identifier[span] )
keyword[if] identifier[column_count] == literal[int] :
identifier[text_row] = identifier[span] [ literal[int] ][ literal[int] ]
identifier[text_column] = identifier[span] [ literal[int] ][ literal[int] ]
identifier[text] = identifier[table] [ identifier[text_row] ][ identifier[text_column] ]
identifier[length] = identifier[get_longest_line_length] ( identifier[text] )
keyword[if] identifier[length] > identifier[widths] [ identifier[column] ]:
identifier[widths] [ identifier[column] ]= identifier[length]
keyword[for] identifier[row] keyword[in] identifier[range] ( identifier[len] ( identifier[table] )):
keyword[for] identifier[column] keyword[in] identifier[range] ( identifier[len] ( identifier[table] [ identifier[row] ])):
identifier[span] = identifier[get_span] ( identifier[spans] , identifier[row] , identifier[column] )
identifier[column_count] = identifier[get_span_column_count] ( identifier[span] )
keyword[if] identifier[column_count] > literal[int] :
identifier[text_row] = identifier[span] [ literal[int] ][ literal[int] ]
identifier[text_column] = identifier[span] [ literal[int] ][ literal[int] ]
identifier[text] = identifier[table] [ identifier[text_row] ][ identifier[text_column] ]
identifier[end_column] = identifier[text_column] + identifier[column_count]
identifier[available_space] = identifier[sum] (
identifier[widths] [ identifier[text_column] : identifier[end_column] ])
identifier[available_space] += identifier[column_count] - literal[int]
identifier[length] = identifier[get_longest_line_length] ( identifier[text] )
keyword[while] identifier[length] > identifier[available_space] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[text_column] , identifier[end_column] ):
identifier[widths] [ identifier[i] ]+= literal[int]
identifier[available_space] = identifier[sum] (
identifier[widths] [ identifier[text_column] : identifier[end_column] ])
identifier[available_space] += identifier[column_count] - literal[int]
keyword[if] identifier[length] <= identifier[available_space] :
keyword[break]
keyword[return] identifier[widths] | def get_output_column_widths(table, spans):
"""
Gets the widths of the columns of the output table
Parameters
----------
table : list of lists of str
The table of rows of text
spans : list of lists of int
The [row, column] pairs of combined cells
Returns
-------
widths : list of int
The widths of each column in the output table
"""
widths = []
for column in table[0]:
widths.append(3) # depends on [control=['for'], data=[]]
for row in range(len(table)):
for column in range(len(table[row])):
span = get_span(spans, row, column)
column_count = get_span_column_count(span)
if column_count == 1:
text_row = span[0][0]
text_column = span[0][1]
text = table[text_row][text_column]
length = get_longest_line_length(text)
if length > widths[column]:
widths[column] = length # depends on [control=['if'], data=['length']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['column']] # depends on [control=['for'], data=['row']]
for row in range(len(table)):
for column in range(len(table[row])):
span = get_span(spans, row, column)
column_count = get_span_column_count(span)
if column_count > 1:
text_row = span[0][0]
text_column = span[0][1]
text = table[text_row][text_column]
end_column = text_column + column_count
available_space = sum(widths[text_column:end_column])
available_space += column_count - 1
length = get_longest_line_length(text)
while length > available_space:
for i in range(text_column, end_column):
widths[i] += 1
available_space = sum(widths[text_column:end_column])
available_space += column_count - 1
if length <= available_space:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['while'], data=['length', 'available_space']] # depends on [control=['if'], data=['column_count']] # depends on [control=['for'], data=['column']] # depends on [control=['for'], data=['row']]
return widths |
def htmlSaveFileFormat(self, filename, encoding, format):
"""Dump an HTML document to a file using a given encoding. """
ret = libxml2mod.htmlSaveFileFormat(filename, self._o, encoding, format)
return ret | def function[htmlSaveFileFormat, parameter[self, filename, encoding, format]]:
constant[Dump an HTML document to a file using a given encoding. ]
variable[ret] assign[=] call[name[libxml2mod].htmlSaveFileFormat, parameter[name[filename], name[self]._o, name[encoding], name[format]]]
return[name[ret]] | keyword[def] identifier[htmlSaveFileFormat] ( identifier[self] , identifier[filename] , identifier[encoding] , identifier[format] ):
literal[string]
identifier[ret] = identifier[libxml2mod] . identifier[htmlSaveFileFormat] ( identifier[filename] , identifier[self] . identifier[_o] , identifier[encoding] , identifier[format] )
keyword[return] identifier[ret] | def htmlSaveFileFormat(self, filename, encoding, format):
"""Dump an HTML document to a file using a given encoding. """
ret = libxml2mod.htmlSaveFileFormat(filename, self._o, encoding, format)
return ret |
def get_dict_column(dict_, colx):
r"""
Args:
dict_ (dict_): a dictionary of lists
colx (int):
CommandLine:
python -m utool.util_dict --test-get_dict_column
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': [0, 1, 2], 'b': [3, 4, 5], 'c': [6, 7, 8]}
>>> colx = [2, 0]
>>> retdict_ = get_dict_column(dict_, colx)
>>> result = ut.repr2(retdict_)
>>> print(result)
{'a': [2, 0], 'b': [5, 3], 'c': [8, 6]}
"""
retdict_ = {key: util_list.list_take(val, colx)
for key, val in six.iteritems(dict_)}
return retdict_ | def function[get_dict_column, parameter[dict_, colx]]:
constant[
Args:
dict_ (dict_): a dictionary of lists
colx (int):
CommandLine:
python -m utool.util_dict --test-get_dict_column
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': [0, 1, 2], 'b': [3, 4, 5], 'c': [6, 7, 8]}
>>> colx = [2, 0]
>>> retdict_ = get_dict_column(dict_, colx)
>>> result = ut.repr2(retdict_)
>>> print(result)
{'a': [2, 0], 'b': [5, 3], 'c': [8, 6]}
]
variable[retdict_] assign[=] <ast.DictComp object at 0x7da1b2407790>
return[name[retdict_]] | keyword[def] identifier[get_dict_column] ( identifier[dict_] , identifier[colx] ):
literal[string]
identifier[retdict_] ={ identifier[key] : identifier[util_list] . identifier[list_take] ( identifier[val] , identifier[colx] )
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[six] . identifier[iteritems] ( identifier[dict_] )}
keyword[return] identifier[retdict_] | def get_dict_column(dict_, colx):
"""
Args:
dict_ (dict_): a dictionary of lists
colx (int):
CommandLine:
python -m utool.util_dict --test-get_dict_column
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': [0, 1, 2], 'b': [3, 4, 5], 'c': [6, 7, 8]}
>>> colx = [2, 0]
>>> retdict_ = get_dict_column(dict_, colx)
>>> result = ut.repr2(retdict_)
>>> print(result)
{'a': [2, 0], 'b': [5, 3], 'c': [8, 6]}
"""
retdict_ = {key: util_list.list_take(val, colx) for (key, val) in six.iteritems(dict_)}
return retdict_ |
def get(session, api_key, **kwargs):
"""
Выполняет доступ к API.
session - модуль requests или сессия из него
api_key - строка ключа доступа к API
rate - тариф, может быть `informers` или `forecast`
lat, lon - широта и долгота
```
import yandex_weather_api
import requests as req
yandex_weather_api.get(req, "ЗАМЕНИ_МЕНЯ_КЛЮЧОМ", lat=55.10, lon=60.10)
```
"""
args, kwargs = validate_args(api_key, **kwargs)
resp = session.get(*args, **kwargs)
return WeatherAnswer.validate(resp.json()) | def function[get, parameter[session, api_key]]:
constant[
Выполняет доступ к API.
session - модуль requests или сессия из него
api_key - строка ключа доступа к API
rate - тариф, может быть `informers` или `forecast`
lat, lon - широта и долгота
```
import yandex_weather_api
import requests as req
yandex_weather_api.get(req, "ЗАМЕНИ_МЕНЯ_КЛЮЧОМ", lat=55.10, lon=60.10)
```
]
<ast.Tuple object at 0x7da18ede6950> assign[=] call[name[validate_args], parameter[name[api_key]]]
variable[resp] assign[=] call[name[session].get, parameter[<ast.Starred object at 0x7da18ede43d0>]]
return[call[name[WeatherAnswer].validate, parameter[call[name[resp].json, parameter[]]]]] | keyword[def] identifier[get] ( identifier[session] , identifier[api_key] ,** identifier[kwargs] ):
literal[string]
identifier[args] , identifier[kwargs] = identifier[validate_args] ( identifier[api_key] ,** identifier[kwargs] )
identifier[resp] = identifier[session] . identifier[get] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[WeatherAnswer] . identifier[validate] ( identifier[resp] . identifier[json] ()) | def get(session, api_key, **kwargs):
"""
Выполняет доступ к API.
session - модуль requests или сессия из него
api_key - строка ключа доступа к API
rate - тариф, может быть `informers` или `forecast`
lat, lon - широта и долгота
```
import yandex_weather_api
import requests as req
yandex_weather_api.get(req, "ЗАМЕНИ_МЕНЯ_КЛЮЧОМ", lat=55.10, lon=60.10)
```
"""
(args, kwargs) = validate_args(api_key, **kwargs)
resp = session.get(*args, **kwargs)
return WeatherAnswer.validate(resp.json()) |
def _processEscapeSequences(replaceText):
"""Replace symbols like \n \\, etc
"""
def _replaceFunc(escapeMatchObject):
char = escapeMatchObject.group(0)[1]
if char in _escapeSequences:
return _escapeSequences[char]
return escapeMatchObject.group(0) # no any replacements, return original value
return _seqReplacer.sub(_replaceFunc, replaceText) | def function[_processEscapeSequences, parameter[replaceText]]:
constant[Replace symbols like
\, etc
]
def function[_replaceFunc, parameter[escapeMatchObject]]:
variable[char] assign[=] call[call[name[escapeMatchObject].group, parameter[constant[0]]]][constant[1]]
if compare[name[char] in name[_escapeSequences]] begin[:]
return[call[name[_escapeSequences]][name[char]]]
return[call[name[escapeMatchObject].group, parameter[constant[0]]]]
return[call[name[_seqReplacer].sub, parameter[name[_replaceFunc], name[replaceText]]]] | keyword[def] identifier[_processEscapeSequences] ( identifier[replaceText] ):
literal[string]
keyword[def] identifier[_replaceFunc] ( identifier[escapeMatchObject] ):
identifier[char] = identifier[escapeMatchObject] . identifier[group] ( literal[int] )[ literal[int] ]
keyword[if] identifier[char] keyword[in] identifier[_escapeSequences] :
keyword[return] identifier[_escapeSequences] [ identifier[char] ]
keyword[return] identifier[escapeMatchObject] . identifier[group] ( literal[int] )
keyword[return] identifier[_seqReplacer] . identifier[sub] ( identifier[_replaceFunc] , identifier[replaceText] ) | def _processEscapeSequences(replaceText):
"""Replace symbols like
\\, etc
"""
def _replaceFunc(escapeMatchObject):
char = escapeMatchObject.group(0)[1]
if char in _escapeSequences:
return _escapeSequences[char] # depends on [control=['if'], data=['char', '_escapeSequences']]
return escapeMatchObject.group(0) # no any replacements, return original value
return _seqReplacer.sub(_replaceFunc, replaceText) |
def normalize(email_address, resolve=True):
"""Return the normalized email address, removing
:param str email_address: The normalized email address
:param bool resolve: Resolve the domain
:rtype: str
"""
address = utils.parseaddr(email_address)
local_part, domain_part = address[1].lower().split('@')
# Plus addressing is supported by Microsoft domains and FastMail
if domain_part in MICROSOFT_DOMAINS:
if '+' in local_part:
local_part = local_part.split('+')[0]
# GMail supports plus addressing and throw-away period delimiters
elif _is_gmail(domain_part, resolve):
local_part = local_part.replace('.', '').split('+')[0]
# Yahoo domain handling of - is like plus addressing
elif _is_yahoo(domain_part, resolve):
if '-' in local_part:
local_part = local_part.split('-')[0]
# FastMail has domain part username aliasing and plus addressing
elif _is_fastmail(domain_part, resolve):
domain_segments = domain_part.split('.')
if len(domain_segments) > 2:
local_part = domain_segments[0]
domain_part = '.'.join(domain_segments[1:])
elif '+' in local_part:
local_part = local_part.split('+')[0]
return '@'.join([local_part, domain_part]) | def function[normalize, parameter[email_address, resolve]]:
constant[Return the normalized email address, removing
:param str email_address: The normalized email address
:param bool resolve: Resolve the domain
:rtype: str
]
variable[address] assign[=] call[name[utils].parseaddr, parameter[name[email_address]]]
<ast.Tuple object at 0x7da18dc072e0> assign[=] call[call[call[name[address]][constant[1]].lower, parameter[]].split, parameter[constant[@]]]
if compare[name[domain_part] in name[MICROSOFT_DOMAINS]] begin[:]
if compare[constant[+] in name[local_part]] begin[:]
variable[local_part] assign[=] call[call[name[local_part].split, parameter[constant[+]]]][constant[0]]
return[call[constant[@].join, parameter[list[[<ast.Name object at 0x7da18fe911e0>, <ast.Name object at 0x7da18fe92740>]]]]] | keyword[def] identifier[normalize] ( identifier[email_address] , identifier[resolve] = keyword[True] ):
literal[string]
identifier[address] = identifier[utils] . identifier[parseaddr] ( identifier[email_address] )
identifier[local_part] , identifier[domain_part] = identifier[address] [ literal[int] ]. identifier[lower] (). identifier[split] ( literal[string] )
keyword[if] identifier[domain_part] keyword[in] identifier[MICROSOFT_DOMAINS] :
keyword[if] literal[string] keyword[in] identifier[local_part] :
identifier[local_part] = identifier[local_part] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[_is_gmail] ( identifier[domain_part] , identifier[resolve] ):
identifier[local_part] = identifier[local_part] . identifier[replace] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[_is_yahoo] ( identifier[domain_part] , identifier[resolve] ):
keyword[if] literal[string] keyword[in] identifier[local_part] :
identifier[local_part] = identifier[local_part] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[_is_fastmail] ( identifier[domain_part] , identifier[resolve] ):
identifier[domain_segments] = identifier[domain_part] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[domain_segments] )> literal[int] :
identifier[local_part] = identifier[domain_segments] [ literal[int] ]
identifier[domain_part] = literal[string] . identifier[join] ( identifier[domain_segments] [ literal[int] :])
keyword[elif] literal[string] keyword[in] identifier[local_part] :
identifier[local_part] = identifier[local_part] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[return] literal[string] . identifier[join] ([ identifier[local_part] , identifier[domain_part] ]) | def normalize(email_address, resolve=True):
"""Return the normalized email address, removing
:param str email_address: The normalized email address
:param bool resolve: Resolve the domain
:rtype: str
"""
address = utils.parseaddr(email_address)
(local_part, domain_part) = address[1].lower().split('@')
# Plus addressing is supported by Microsoft domains and FastMail
if domain_part in MICROSOFT_DOMAINS:
if '+' in local_part:
local_part = local_part.split('+')[0] # depends on [control=['if'], data=['local_part']] # depends on [control=['if'], data=[]]
# GMail supports plus addressing and throw-away period delimiters
elif _is_gmail(domain_part, resolve):
local_part = local_part.replace('.', '').split('+')[0] # depends on [control=['if'], data=[]]
# Yahoo domain handling of - is like plus addressing
elif _is_yahoo(domain_part, resolve):
if '-' in local_part:
local_part = local_part.split('-')[0] # depends on [control=['if'], data=['local_part']] # depends on [control=['if'], data=[]]
# FastMail has domain part username aliasing and plus addressing
elif _is_fastmail(domain_part, resolve):
domain_segments = domain_part.split('.')
if len(domain_segments) > 2:
local_part = domain_segments[0]
domain_part = '.'.join(domain_segments[1:]) # depends on [control=['if'], data=[]]
elif '+' in local_part:
local_part = local_part.split('+')[0] # depends on [control=['if'], data=['local_part']] # depends on [control=['if'], data=[]]
return '@'.join([local_part, domain_part]) |
def compute_ecc_params(max_block_size, rate, hasher):
'''Compute the ecc parameters (size of the message, size of the hash, size of the ecc). This is an helper function to easily compute the parameters from a resilience rate to instanciate an ECCMan object.'''
#message_size = max_block_size - int(round(max_block_size * rate * 2, 0)) # old way to compute, wasn't really correct because we applied the rate on the total message+ecc size, when we should apply the rate to the message size only (that is not known beforehand, but we want the ecc size (k) = 2*rate*message_size or in other words that k + k * 2 * rate = n)
message_size = int(round(float(max_block_size) / (1 + 2*rate), 0))
ecc_size = max_block_size - message_size
hash_size = len(hasher) # 32 when we use MD5
return {"message_size": message_size, "ecc_size": ecc_size, "hash_size": hash_size} | def function[compute_ecc_params, parameter[max_block_size, rate, hasher]]:
constant[Compute the ecc parameters (size of the message, size of the hash, size of the ecc). This is an helper function to easily compute the parameters from a resilience rate to instanciate an ECCMan object.]
variable[message_size] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[call[name[float], parameter[name[max_block_size]]] / binary_operation[constant[1] + binary_operation[constant[2] * name[rate]]]], constant[0]]]]]
variable[ecc_size] assign[=] binary_operation[name[max_block_size] - name[message_size]]
variable[hash_size] assign[=] call[name[len], parameter[name[hasher]]]
return[dictionary[[<ast.Constant object at 0x7da1b04723b0>, <ast.Constant object at 0x7da1b0470910>, <ast.Constant object at 0x7da1b0472f80>], [<ast.Name object at 0x7da1b04724d0>, <ast.Name object at 0x7da1b0472aa0>, <ast.Name object at 0x7da1b0473d90>]]] | keyword[def] identifier[compute_ecc_params] ( identifier[max_block_size] , identifier[rate] , identifier[hasher] ):
literal[string]
identifier[message_size] = identifier[int] ( identifier[round] ( identifier[float] ( identifier[max_block_size] )/( literal[int] + literal[int] * identifier[rate] ), literal[int] ))
identifier[ecc_size] = identifier[max_block_size] - identifier[message_size]
identifier[hash_size] = identifier[len] ( identifier[hasher] )
keyword[return] { literal[string] : identifier[message_size] , literal[string] : identifier[ecc_size] , literal[string] : identifier[hash_size] } | def compute_ecc_params(max_block_size, rate, hasher):
"""Compute the ecc parameters (size of the message, size of the hash, size of the ecc). This is an helper function to easily compute the parameters from a resilience rate to instanciate an ECCMan object."""
#message_size = max_block_size - int(round(max_block_size * rate * 2, 0)) # old way to compute, wasn't really correct because we applied the rate on the total message+ecc size, when we should apply the rate to the message size only (that is not known beforehand, but we want the ecc size (k) = 2*rate*message_size or in other words that k + k * 2 * rate = n)
message_size = int(round(float(max_block_size) / (1 + 2 * rate), 0))
ecc_size = max_block_size - message_size
hash_size = len(hasher) # 32 when we use MD5
return {'message_size': message_size, 'ecc_size': ecc_size, 'hash_size': hash_size} |
def _resolveCtypesImports(cbinaries):
"""Completes ctypes BINARY entries for modules with their full path.
"""
if is_unix:
envvar = "LD_LIBRARY_PATH"
elif is_darwin:
envvar = "DYLD_LIBRARY_PATH"
else:
envvar = "PATH"
def _setPaths():
path = os.pathsep.join(PyInstaller.__pathex__)
old = compat.getenv(envvar)
if old is not None:
path = os.pathsep.join((path, old))
compat.setenv(envvar, path)
return old
def _restorePaths(old):
if old is None:
compat.unsetenv(envvar)
else:
compat.setenv(envvar, old)
ret = []
# Try to locate the shared library on disk. This is done by
# executing ctypes.utile.find_library prepending ImportTracker's
# local paths to library search paths, then replaces original values.
old = _setPaths()
for cbin in cbinaries:
# Ignore annoying warnings like:
# 'W: library kernel32.dll required via ctypes not found'
# 'W: library coredll.dll required via ctypes not found'
if cbin in ['coredll.dll', 'kernel32.dll']:
continue
ext = os.path.splitext(cbin)[1]
# On Windows, only .dll files can be loaded.
if os.name == "nt" and ext.lower() in [".so", ".dylib"]:
continue
cpath = find_library(os.path.splitext(cbin)[0])
if is_unix:
# CAVEAT: find_library() is not the correct function. Ctype's
# documentation says that it is meant to resolve only the filename
# (as a *compiler* does) not the full path. Anyway, it works well
# enough on Windows and Mac. On Linux, we need to implement
# more code to find out the full path.
if cpath is None:
cpath = cbin
# "man ld.so" says that we should first search LD_LIBRARY_PATH
# and then the ldcache
for d in compat.getenv(envvar, '').split(os.pathsep):
if os.path.isfile(os.path.join(d, cpath)):
cpath = os.path.join(d, cpath)
break
else:
text = compat.exec_command("/sbin/ldconfig", "-p")
for L in text.strip().splitlines():
if cpath in L:
cpath = L.split("=>", 1)[1].strip()
assert os.path.isfile(cpath)
break
else:
cpath = None
if cpath is None:
logger.warn("library %s required via ctypes not found", cbin)
else:
ret.append((cbin, cpath, "BINARY"))
_restorePaths(old)
return ret | def function[_resolveCtypesImports, parameter[cbinaries]]:
constant[Completes ctypes BINARY entries for modules with their full path.
]
if name[is_unix] begin[:]
variable[envvar] assign[=] constant[LD_LIBRARY_PATH]
def function[_setPaths, parameter[]]:
variable[path] assign[=] call[name[os].pathsep.join, parameter[name[PyInstaller].__pathex__]]
variable[old] assign[=] call[name[compat].getenv, parameter[name[envvar]]]
if compare[name[old] is_not constant[None]] begin[:]
variable[path] assign[=] call[name[os].pathsep.join, parameter[tuple[[<ast.Name object at 0x7da1b0b97d60>, <ast.Name object at 0x7da1b0b94a00>]]]]
call[name[compat].setenv, parameter[name[envvar], name[path]]]
return[name[old]]
def function[_restorePaths, parameter[old]]:
if compare[name[old] is constant[None]] begin[:]
call[name[compat].unsetenv, parameter[name[envvar]]]
variable[ret] assign[=] list[[]]
variable[old] assign[=] call[name[_setPaths], parameter[]]
for taget[name[cbin]] in starred[name[cbinaries]] begin[:]
if compare[name[cbin] in list[[<ast.Constant object at 0x7da1b0b963b0>, <ast.Constant object at 0x7da1b0b95480>]]] begin[:]
continue
variable[ext] assign[=] call[call[name[os].path.splitext, parameter[name[cbin]]]][constant[1]]
if <ast.BoolOp object at 0x7da1b0b956c0> begin[:]
continue
variable[cpath] assign[=] call[name[find_library], parameter[call[call[name[os].path.splitext, parameter[name[cbin]]]][constant[0]]]]
if name[is_unix] begin[:]
if compare[name[cpath] is constant[None]] begin[:]
variable[cpath] assign[=] name[cbin]
for taget[name[d]] in starred[call[call[name[compat].getenv, parameter[name[envvar], constant[]]].split, parameter[name[os].pathsep]]] begin[:]
if call[name[os].path.isfile, parameter[call[name[os].path.join, parameter[name[d], name[cpath]]]]] begin[:]
variable[cpath] assign[=] call[name[os].path.join, parameter[name[d], name[cpath]]]
break
if compare[name[cpath] is constant[None]] begin[:]
call[name[logger].warn, parameter[constant[library %s required via ctypes not found], name[cbin]]]
call[name[_restorePaths], parameter[name[old]]]
return[name[ret]] | keyword[def] identifier[_resolveCtypesImports] ( identifier[cbinaries] ):
literal[string]
keyword[if] identifier[is_unix] :
identifier[envvar] = literal[string]
keyword[elif] identifier[is_darwin] :
identifier[envvar] = literal[string]
keyword[else] :
identifier[envvar] = literal[string]
keyword[def] identifier[_setPaths] ():
identifier[path] = identifier[os] . identifier[pathsep] . identifier[join] ( identifier[PyInstaller] . identifier[__pathex__] )
identifier[old] = identifier[compat] . identifier[getenv] ( identifier[envvar] )
keyword[if] identifier[old] keyword[is] keyword[not] keyword[None] :
identifier[path] = identifier[os] . identifier[pathsep] . identifier[join] (( identifier[path] , identifier[old] ))
identifier[compat] . identifier[setenv] ( identifier[envvar] , identifier[path] )
keyword[return] identifier[old]
keyword[def] identifier[_restorePaths] ( identifier[old] ):
keyword[if] identifier[old] keyword[is] keyword[None] :
identifier[compat] . identifier[unsetenv] ( identifier[envvar] )
keyword[else] :
identifier[compat] . identifier[setenv] ( identifier[envvar] , identifier[old] )
identifier[ret] =[]
identifier[old] = identifier[_setPaths] ()
keyword[for] identifier[cbin] keyword[in] identifier[cbinaries] :
keyword[if] identifier[cbin] keyword[in] [ literal[string] , literal[string] ]:
keyword[continue]
identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[cbin] )[ literal[int] ]
keyword[if] identifier[os] . identifier[name] == literal[string] keyword[and] identifier[ext] . identifier[lower] () keyword[in] [ literal[string] , literal[string] ]:
keyword[continue]
identifier[cpath] = identifier[find_library] ( identifier[os] . identifier[path] . identifier[splitext] ( identifier[cbin] )[ literal[int] ])
keyword[if] identifier[is_unix] :
keyword[if] identifier[cpath] keyword[is] keyword[None] :
identifier[cpath] = identifier[cbin]
keyword[for] identifier[d] keyword[in] identifier[compat] . identifier[getenv] ( identifier[envvar] , literal[string] ). identifier[split] ( identifier[os] . identifier[pathsep] ):
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[cpath] )):
identifier[cpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[cpath] )
keyword[break]
keyword[else] :
identifier[text] = identifier[compat] . identifier[exec_command] ( literal[string] , literal[string] )
keyword[for] identifier[L] keyword[in] identifier[text] . identifier[strip] (). identifier[splitlines] ():
keyword[if] identifier[cpath] keyword[in] identifier[L] :
identifier[cpath] = identifier[L] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]. identifier[strip] ()
keyword[assert] identifier[os] . identifier[path] . identifier[isfile] ( identifier[cpath] )
keyword[break]
keyword[else] :
identifier[cpath] = keyword[None]
keyword[if] identifier[cpath] keyword[is] keyword[None] :
identifier[logger] . identifier[warn] ( literal[string] , identifier[cbin] )
keyword[else] :
identifier[ret] . identifier[append] (( identifier[cbin] , identifier[cpath] , literal[string] ))
identifier[_restorePaths] ( identifier[old] )
keyword[return] identifier[ret] | def _resolveCtypesImports(cbinaries):
"""Completes ctypes BINARY entries for modules with their full path.
"""
if is_unix:
envvar = 'LD_LIBRARY_PATH' # depends on [control=['if'], data=[]]
elif is_darwin:
envvar = 'DYLD_LIBRARY_PATH' # depends on [control=['if'], data=[]]
else:
envvar = 'PATH'
def _setPaths():
path = os.pathsep.join(PyInstaller.__pathex__)
old = compat.getenv(envvar)
if old is not None:
path = os.pathsep.join((path, old)) # depends on [control=['if'], data=['old']]
compat.setenv(envvar, path)
return old
def _restorePaths(old):
if old is None:
compat.unsetenv(envvar) # depends on [control=['if'], data=[]]
else:
compat.setenv(envvar, old)
ret = []
# Try to locate the shared library on disk. This is done by
# executing ctypes.utile.find_library prepending ImportTracker's
# local paths to library search paths, then replaces original values.
old = _setPaths()
for cbin in cbinaries:
# Ignore annoying warnings like:
# 'W: library kernel32.dll required via ctypes not found'
# 'W: library coredll.dll required via ctypes not found'
if cbin in ['coredll.dll', 'kernel32.dll']:
continue # depends on [control=['if'], data=[]]
ext = os.path.splitext(cbin)[1]
# On Windows, only .dll files can be loaded.
if os.name == 'nt' and ext.lower() in ['.so', '.dylib']:
continue # depends on [control=['if'], data=[]]
cpath = find_library(os.path.splitext(cbin)[0])
if is_unix:
# CAVEAT: find_library() is not the correct function. Ctype's
# documentation says that it is meant to resolve only the filename
# (as a *compiler* does) not the full path. Anyway, it works well
# enough on Windows and Mac. On Linux, we need to implement
# more code to find out the full path.
if cpath is None:
cpath = cbin # depends on [control=['if'], data=['cpath']]
# "man ld.so" says that we should first search LD_LIBRARY_PATH
# and then the ldcache
for d in compat.getenv(envvar, '').split(os.pathsep):
if os.path.isfile(os.path.join(d, cpath)):
cpath = os.path.join(d, cpath)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']]
else:
text = compat.exec_command('/sbin/ldconfig', '-p')
for L in text.strip().splitlines():
if cpath in L:
cpath = L.split('=>', 1)[1].strip()
assert os.path.isfile(cpath)
break # depends on [control=['if'], data=['cpath', 'L']] # depends on [control=['for'], data=['L']]
else:
cpath = None # depends on [control=['if'], data=[]]
if cpath is None:
logger.warn('library %s required via ctypes not found', cbin) # depends on [control=['if'], data=[]]
else:
ret.append((cbin, cpath, 'BINARY')) # depends on [control=['for'], data=['cbin']]
_restorePaths(old)
return ret |
def DSP_capture_add_samples(self,new_data):
"""
Append new samples to the data_capture array and increment the sample counter
If length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0
then new values are not appended to the data_capture array.
"""
self.capture_sample_count += len(new_data)
if self.Tcapture > 0:
self.data_capture = np.hstack((self.data_capture,new_data))
if (self.Tcapture > 0) and (len(self.data_capture) > self.Ncapture):
self.data_capture = self.data_capture[-self.Ncapture:] | def function[DSP_capture_add_samples, parameter[self, new_data]]:
constant[
Append new samples to the data_capture array and increment the sample counter
If length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0
then new values are not appended to the data_capture array.
]
<ast.AugAssign object at 0x7da2041d8430>
if compare[name[self].Tcapture greater[>] constant[0]] begin[:]
name[self].data_capture assign[=] call[name[np].hstack, parameter[tuple[[<ast.Attribute object at 0x7da2041d9990>, <ast.Name object at 0x7da2041d8070>]]]]
if <ast.BoolOp object at 0x7da2041d90f0> begin[:]
name[self].data_capture assign[=] call[name[self].data_capture][<ast.Slice object at 0x7da207f007f0>] | keyword[def] identifier[DSP_capture_add_samples] ( identifier[self] , identifier[new_data] ):
literal[string]
identifier[self] . identifier[capture_sample_count] += identifier[len] ( identifier[new_data] )
keyword[if] identifier[self] . identifier[Tcapture] > literal[int] :
identifier[self] . identifier[data_capture] = identifier[np] . identifier[hstack] (( identifier[self] . identifier[data_capture] , identifier[new_data] ))
keyword[if] ( identifier[self] . identifier[Tcapture] > literal[int] ) keyword[and] ( identifier[len] ( identifier[self] . identifier[data_capture] )> identifier[self] . identifier[Ncapture] ):
identifier[self] . identifier[data_capture] = identifier[self] . identifier[data_capture] [- identifier[self] . identifier[Ncapture] :] | def DSP_capture_add_samples(self, new_data):
"""
Append new samples to the data_capture array and increment the sample counter
If length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0
then new values are not appended to the data_capture array.
"""
self.capture_sample_count += len(new_data)
if self.Tcapture > 0:
self.data_capture = np.hstack((self.data_capture, new_data))
if self.Tcapture > 0 and len(self.data_capture) > self.Ncapture:
self.data_capture = self.data_capture[-self.Ncapture:] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def merge_cands(pkllist, outroot='', remove=[], snrmin=0, snrmax=999):
""" Takes cands pkls from list and filteres to write new single "merge" pkl.
Ignores segment cand files.
remove is a list [t0,t1,t2,t3], where t0-t1, t2-t3 define the time ranges in seconds.
snrmin, snrmax define how to filter cands read and written by abs(snr)
"""
assert isinstance(pkllist, list), "pkllist must be list of file names"
if not outroot:
outroot = '_'.join(pkllist[0].split('_')[1:-1])
workdir = os.path.dirname(pkllist[0])
mergepkl = os.path.join(workdir, 'cands_' + outroot + '_merge.pkl')
pkllist = [pkllist[i] for i in range(len(pkllist)) if ('merge' not in pkllist[i]) and ('seg' not in pkllist[i])]
pkllist.sort(key=lambda i: int(i.rstrip('.pkl').split('_sc')[1])) # assumes filename structure
scans = [int(ff.rstrip('.pkl').split('_sc')[1]) for ff in pkllist]
logger.info('Aggregating cands from scans %s' % scans)
# get sample state dict. use 'dict' suffix to define multi-scan metadata dictionaries
mergeloc = []; mergeprop = []; mergetimes = []
segmenttimesdict = {}
starttime_mjddict = {}
for pklfile in pkllist:
# get scan number and read candidates
locs, props, d = read_candidates(pklfile, snrmin=snrmin, snrmax=snrmax, returnstate=True)
if 'snr2' in d['features']:
snrcol = d['features'].index('snr2')
elif 'snr1' in d['features']:
snrcol = d['features'].index('snr1')
# scan = int(pklfile.rstrip('.pkl').split('_sc')[1]) # parsing filename to get scan number
scan = d['scan']
segmenttimesdict[scan] = d['segmenttimes']
starttime_mjddict[scan] = d['starttime_mjd']
times = int2mjd(d, locs)
# build merged loc,prop lists
for i in range(len(locs)):
loc = list(locs[i])
prop = list(props[i])
mergeloc += [loc]
mergeprop += [prop]
mergetimes.append(times[i])
mergeloc = np.array(mergeloc)
mergeprop = np.array(mergeprop)
mergetimes = np.array(mergetimes)
# filter by remove, if needed
if remove:
mergetimes -= mergetimes.min()
ww = np.ones(len(mergetimes), dtype=bool) # initialize pass filter
nranges = len(remove)
for first in range(0,nranges,2):
badrange0 = remove[first]
badrange1 = remove[first+1]
ww = ww & np.where( (mergetimes < badrange0) | (mergetimes > badrange1), True, False )
mergeloc = mergeloc[ww]
mergeprop = mergeprop[ww]
# update metadata
d['remove'] = remove
d['segmenttimesdict'] = segmenttimesdict
d['starttime_mjddict'] = starttime_mjddict
logger.info('Writing filtered set of %d candidates to %s' % (len(mergeloc), mergepkl))
# write up new pkl
pkl = open(mergepkl, 'w')
pickle.dump(d, pkl, protocol=2)
pickle.dump((mergeloc, mergeprop), pkl, protocol=2)
pkl.close() | def function[merge_cands, parameter[pkllist, outroot, remove, snrmin, snrmax]]:
constant[ Takes cands pkls from list and filteres to write new single "merge" pkl.
Ignores segment cand files.
remove is a list [t0,t1,t2,t3], where t0-t1, t2-t3 define the time ranges in seconds.
snrmin, snrmax define how to filter cands read and written by abs(snr)
]
assert[call[name[isinstance], parameter[name[pkllist], name[list]]]]
if <ast.UnaryOp object at 0x7da1b26f7b80> begin[:]
variable[outroot] assign[=] call[constant[_].join, parameter[call[call[call[name[pkllist]][constant[0]].split, parameter[constant[_]]]][<ast.Slice object at 0x7da1b26f78e0>]]]
variable[workdir] assign[=] call[name[os].path.dirname, parameter[call[name[pkllist]][constant[0]]]]
variable[mergepkl] assign[=] call[name[os].path.join, parameter[name[workdir], binary_operation[binary_operation[constant[cands_] + name[outroot]] + constant[_merge.pkl]]]]
variable[pkllist] assign[=] <ast.ListComp object at 0x7da1b26f73d0>
call[name[pkllist].sort, parameter[]]
variable[scans] assign[=] <ast.ListComp object at 0x7da1b26f6b60>
call[name[logger].info, parameter[binary_operation[constant[Aggregating cands from scans %s] <ast.Mod object at 0x7da2590d6920> name[scans]]]]
variable[mergeloc] assign[=] list[[]]
variable[mergeprop] assign[=] list[[]]
variable[mergetimes] assign[=] list[[]]
variable[segmenttimesdict] assign[=] dictionary[[], []]
variable[starttime_mjddict] assign[=] dictionary[[], []]
for taget[name[pklfile]] in starred[name[pkllist]] begin[:]
<ast.Tuple object at 0x7da1b26f6350> assign[=] call[name[read_candidates], parameter[name[pklfile]]]
if compare[constant[snr2] in call[name[d]][constant[features]]] begin[:]
variable[snrcol] assign[=] call[call[name[d]][constant[features]].index, parameter[constant[snr2]]]
variable[scan] assign[=] call[name[d]][constant[scan]]
call[name[segmenttimesdict]][name[scan]] assign[=] call[name[d]][constant[segmenttimes]]
call[name[starttime_mjddict]][name[scan]] assign[=] call[name[d]][constant[starttime_mjd]]
variable[times] assign[=] call[name[int2mjd], parameter[name[d], name[locs]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[locs]]]]]] begin[:]
variable[loc] assign[=] call[name[list], parameter[call[name[locs]][name[i]]]]
variable[prop] assign[=] call[name[list], parameter[call[name[props]][name[i]]]]
<ast.AugAssign object at 0x7da1b26f4160>
<ast.AugAssign object at 0x7da1b26f40a0>
call[name[mergetimes].append, parameter[call[name[times]][name[i]]]]
variable[mergeloc] assign[=] call[name[np].array, parameter[name[mergeloc]]]
variable[mergeprop] assign[=] call[name[np].array, parameter[name[mergeprop]]]
variable[mergetimes] assign[=] call[name[np].array, parameter[name[mergetimes]]]
if name[remove] begin[:]
<ast.AugAssign object at 0x7da18ede4340>
variable[ww] assign[=] call[name[np].ones, parameter[call[name[len], parameter[name[mergetimes]]]]]
variable[nranges] assign[=] call[name[len], parameter[name[remove]]]
for taget[name[first]] in starred[call[name[range], parameter[constant[0], name[nranges], constant[2]]]] begin[:]
variable[badrange0] assign[=] call[name[remove]][name[first]]
variable[badrange1] assign[=] call[name[remove]][binary_operation[name[first] + constant[1]]]
variable[ww] assign[=] binary_operation[name[ww] <ast.BitAnd object at 0x7da2590d6b60> call[name[np].where, parameter[binary_operation[compare[name[mergetimes] less[<] name[badrange0]] <ast.BitOr object at 0x7da2590d6aa0> compare[name[mergetimes] greater[>] name[badrange1]]], constant[True], constant[False]]]]
variable[mergeloc] assign[=] call[name[mergeloc]][name[ww]]
variable[mergeprop] assign[=] call[name[mergeprop]][name[ww]]
call[name[d]][constant[remove]] assign[=] name[remove]
call[name[d]][constant[segmenttimesdict]] assign[=] name[segmenttimesdict]
call[name[d]][constant[starttime_mjddict]] assign[=] name[starttime_mjddict]
call[name[logger].info, parameter[binary_operation[constant[Writing filtered set of %d candidates to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18ede7010>, <ast.Name object at 0x7da18ede4370>]]]]]
variable[pkl] assign[=] call[name[open], parameter[name[mergepkl], constant[w]]]
call[name[pickle].dump, parameter[name[d], name[pkl]]]
call[name[pickle].dump, parameter[tuple[[<ast.Name object at 0x7da1b2527d60>, <ast.Name object at 0x7da1b25269e0>]], name[pkl]]]
call[name[pkl].close, parameter[]] | keyword[def] identifier[merge_cands] ( identifier[pkllist] , identifier[outroot] = literal[string] , identifier[remove] =[], identifier[snrmin] = literal[int] , identifier[snrmax] = literal[int] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[pkllist] , identifier[list] ), literal[string]
keyword[if] keyword[not] identifier[outroot] :
identifier[outroot] = literal[string] . identifier[join] ( identifier[pkllist] [ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] :- literal[int] ])
identifier[workdir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[pkllist] [ literal[int] ])
identifier[mergepkl] = identifier[os] . identifier[path] . identifier[join] ( identifier[workdir] , literal[string] + identifier[outroot] + literal[string] )
identifier[pkllist] =[ identifier[pkllist] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[pkllist] )) keyword[if] ( literal[string] keyword[not] keyword[in] identifier[pkllist] [ identifier[i] ]) keyword[and] ( literal[string] keyword[not] keyword[in] identifier[pkllist] [ identifier[i] ])]
identifier[pkllist] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[i] : identifier[int] ( identifier[i] . identifier[rstrip] ( literal[string] ). identifier[split] ( literal[string] )[ literal[int] ]))
identifier[scans] =[ identifier[int] ( identifier[ff] . identifier[rstrip] ( literal[string] ). identifier[split] ( literal[string] )[ literal[int] ]) keyword[for] identifier[ff] keyword[in] identifier[pkllist] ]
identifier[logger] . identifier[info] ( literal[string] % identifier[scans] )
identifier[mergeloc] =[]; identifier[mergeprop] =[]; identifier[mergetimes] =[]
identifier[segmenttimesdict] ={}
identifier[starttime_mjddict] ={}
keyword[for] identifier[pklfile] keyword[in] identifier[pkllist] :
identifier[locs] , identifier[props] , identifier[d] = identifier[read_candidates] ( identifier[pklfile] , identifier[snrmin] = identifier[snrmin] , identifier[snrmax] = identifier[snrmax] , identifier[returnstate] = keyword[True] )
keyword[if] literal[string] keyword[in] identifier[d] [ literal[string] ]:
identifier[snrcol] = identifier[d] [ literal[string] ]. identifier[index] ( literal[string] )
keyword[elif] literal[string] keyword[in] identifier[d] [ literal[string] ]:
identifier[snrcol] = identifier[d] [ literal[string] ]. identifier[index] ( literal[string] )
identifier[scan] = identifier[d] [ literal[string] ]
identifier[segmenttimesdict] [ identifier[scan] ]= identifier[d] [ literal[string] ]
identifier[starttime_mjddict] [ identifier[scan] ]= identifier[d] [ literal[string] ]
identifier[times] = identifier[int2mjd] ( identifier[d] , identifier[locs] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[locs] )):
identifier[loc] = identifier[list] ( identifier[locs] [ identifier[i] ])
identifier[prop] = identifier[list] ( identifier[props] [ identifier[i] ])
identifier[mergeloc] +=[ identifier[loc] ]
identifier[mergeprop] +=[ identifier[prop] ]
identifier[mergetimes] . identifier[append] ( identifier[times] [ identifier[i] ])
identifier[mergeloc] = identifier[np] . identifier[array] ( identifier[mergeloc] )
identifier[mergeprop] = identifier[np] . identifier[array] ( identifier[mergeprop] )
identifier[mergetimes] = identifier[np] . identifier[array] ( identifier[mergetimes] )
keyword[if] identifier[remove] :
identifier[mergetimes] -= identifier[mergetimes] . identifier[min] ()
identifier[ww] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[mergetimes] ), identifier[dtype] = identifier[bool] )
identifier[nranges] = identifier[len] ( identifier[remove] )
keyword[for] identifier[first] keyword[in] identifier[range] ( literal[int] , identifier[nranges] , literal[int] ):
identifier[badrange0] = identifier[remove] [ identifier[first] ]
identifier[badrange1] = identifier[remove] [ identifier[first] + literal[int] ]
identifier[ww] = identifier[ww] & identifier[np] . identifier[where] (( identifier[mergetimes] < identifier[badrange0] )|( identifier[mergetimes] > identifier[badrange1] ), keyword[True] , keyword[False] )
identifier[mergeloc] = identifier[mergeloc] [ identifier[ww] ]
identifier[mergeprop] = identifier[mergeprop] [ identifier[ww] ]
identifier[d] [ literal[string] ]= identifier[remove]
identifier[d] [ literal[string] ]= identifier[segmenttimesdict]
identifier[d] [ literal[string] ]= identifier[starttime_mjddict]
identifier[logger] . identifier[info] ( literal[string] %( identifier[len] ( identifier[mergeloc] ), identifier[mergepkl] ))
identifier[pkl] = identifier[open] ( identifier[mergepkl] , literal[string] )
identifier[pickle] . identifier[dump] ( identifier[d] , identifier[pkl] , identifier[protocol] = literal[int] )
identifier[pickle] . identifier[dump] (( identifier[mergeloc] , identifier[mergeprop] ), identifier[pkl] , identifier[protocol] = literal[int] )
identifier[pkl] . identifier[close] () | def merge_cands(pkllist, outroot='', remove=[], snrmin=0, snrmax=999):
""" Takes cands pkls from list and filteres to write new single "merge" pkl.
Ignores segment cand files.
remove is a list [t0,t1,t2,t3], where t0-t1, t2-t3 define the time ranges in seconds.
snrmin, snrmax define how to filter cands read and written by abs(snr)
"""
assert isinstance(pkllist, list), 'pkllist must be list of file names'
if not outroot:
outroot = '_'.join(pkllist[0].split('_')[1:-1]) # depends on [control=['if'], data=[]]
workdir = os.path.dirname(pkllist[0])
mergepkl = os.path.join(workdir, 'cands_' + outroot + '_merge.pkl')
pkllist = [pkllist[i] for i in range(len(pkllist)) if 'merge' not in pkllist[i] and 'seg' not in pkllist[i]]
pkllist.sort(key=lambda i: int(i.rstrip('.pkl').split('_sc')[1])) # assumes filename structure
scans = [int(ff.rstrip('.pkl').split('_sc')[1]) for ff in pkllist]
logger.info('Aggregating cands from scans %s' % scans)
# get sample state dict. use 'dict' suffix to define multi-scan metadata dictionaries
mergeloc = []
mergeprop = []
mergetimes = []
segmenttimesdict = {}
starttime_mjddict = {}
for pklfile in pkllist:
# get scan number and read candidates
(locs, props, d) = read_candidates(pklfile, snrmin=snrmin, snrmax=snrmax, returnstate=True)
if 'snr2' in d['features']:
snrcol = d['features'].index('snr2') # depends on [control=['if'], data=[]]
elif 'snr1' in d['features']:
snrcol = d['features'].index('snr1') # depends on [control=['if'], data=[]]
# scan = int(pklfile.rstrip('.pkl').split('_sc')[1]) # parsing filename to get scan number
scan = d['scan']
segmenttimesdict[scan] = d['segmenttimes']
starttime_mjddict[scan] = d['starttime_mjd']
times = int2mjd(d, locs)
# build merged loc,prop lists
for i in range(len(locs)):
loc = list(locs[i])
prop = list(props[i])
mergeloc += [loc]
mergeprop += [prop]
mergetimes.append(times[i]) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['pklfile']]
mergeloc = np.array(mergeloc)
mergeprop = np.array(mergeprop)
mergetimes = np.array(mergetimes)
# filter by remove, if needed
if remove:
mergetimes -= mergetimes.min()
ww = np.ones(len(mergetimes), dtype=bool) # initialize pass filter
nranges = len(remove)
for first in range(0, nranges, 2):
badrange0 = remove[first]
badrange1 = remove[first + 1]
ww = ww & np.where((mergetimes < badrange0) | (mergetimes > badrange1), True, False) # depends on [control=['for'], data=['first']]
mergeloc = mergeloc[ww]
mergeprop = mergeprop[ww] # depends on [control=['if'], data=[]]
# update metadata
d['remove'] = remove
d['segmenttimesdict'] = segmenttimesdict
d['starttime_mjddict'] = starttime_mjddict
logger.info('Writing filtered set of %d candidates to %s' % (len(mergeloc), mergepkl))
# write up new pkl
pkl = open(mergepkl, 'w')
pickle.dump(d, pkl, protocol=2)
pickle.dump((mergeloc, mergeprop), pkl, protocol=2)
pkl.close() |
def remove_dcm2nii_underprocessed(filepaths):
""" Return a subset of `filepaths`. Keep only the files that have a basename longer than the
others with same suffix.
This works based on that dcm2nii appends a preffix character for each processing
step it does automatically in the DICOM to NifTI conversion.
Parameters
----------
filepaths: iterable of str
Returns
-------
cleaned_paths: iterable of str
"""
cln_flist = []
# sort them by size
len_sorted = sorted(filepaths, key=len)
for idx, fpath in enumerate(len_sorted):
remove = False
# get the basename and the rest of the files
fname = op.basename(fpath)
rest = len_sorted[idx+1:]
# check if the basename is in the basename of the rest of the files
for rest_fpath in rest:
rest_file = op.basename(rest_fpath)
if rest_file.endswith(fname):
remove = True
break
if not remove:
cln_flist.append(fpath)
return cln_flist | def function[remove_dcm2nii_underprocessed, parameter[filepaths]]:
constant[ Return a subset of `filepaths`. Keep only the files that have a basename longer than the
others with same suffix.
This works based on that dcm2nii appends a preffix character for each processing
step it does automatically in the DICOM to NifTI conversion.
Parameters
----------
filepaths: iterable of str
Returns
-------
cleaned_paths: iterable of str
]
variable[cln_flist] assign[=] list[[]]
variable[len_sorted] assign[=] call[name[sorted], parameter[name[filepaths]]]
for taget[tuple[[<ast.Name object at 0x7da1afe79de0>, <ast.Name object at 0x7da1afe78730>]]] in starred[call[name[enumerate], parameter[name[len_sorted]]]] begin[:]
variable[remove] assign[=] constant[False]
variable[fname] assign[=] call[name[op].basename, parameter[name[fpath]]]
variable[rest] assign[=] call[name[len_sorted]][<ast.Slice object at 0x7da1afe0e350>]
for taget[name[rest_fpath]] in starred[name[rest]] begin[:]
variable[rest_file] assign[=] call[name[op].basename, parameter[name[rest_fpath]]]
if call[name[rest_file].endswith, parameter[name[fname]]] begin[:]
variable[remove] assign[=] constant[True]
break
if <ast.UnaryOp object at 0x7da1afe0e500> begin[:]
call[name[cln_flist].append, parameter[name[fpath]]]
return[name[cln_flist]] | keyword[def] identifier[remove_dcm2nii_underprocessed] ( identifier[filepaths] ):
literal[string]
identifier[cln_flist] =[]
identifier[len_sorted] = identifier[sorted] ( identifier[filepaths] , identifier[key] = identifier[len] )
keyword[for] identifier[idx] , identifier[fpath] keyword[in] identifier[enumerate] ( identifier[len_sorted] ):
identifier[remove] = keyword[False]
identifier[fname] = identifier[op] . identifier[basename] ( identifier[fpath] )
identifier[rest] = identifier[len_sorted] [ identifier[idx] + literal[int] :]
keyword[for] identifier[rest_fpath] keyword[in] identifier[rest] :
identifier[rest_file] = identifier[op] . identifier[basename] ( identifier[rest_fpath] )
keyword[if] identifier[rest_file] . identifier[endswith] ( identifier[fname] ):
identifier[remove] = keyword[True]
keyword[break]
keyword[if] keyword[not] identifier[remove] :
identifier[cln_flist] . identifier[append] ( identifier[fpath] )
keyword[return] identifier[cln_flist] | def remove_dcm2nii_underprocessed(filepaths):
""" Return a subset of `filepaths`. Keep only the files that have a basename longer than the
others with same suffix.
This works based on that dcm2nii appends a preffix character for each processing
step it does automatically in the DICOM to NifTI conversion.
Parameters
----------
filepaths: iterable of str
Returns
-------
cleaned_paths: iterable of str
"""
cln_flist = []
# sort them by size
len_sorted = sorted(filepaths, key=len)
for (idx, fpath) in enumerate(len_sorted):
remove = False
# get the basename and the rest of the files
fname = op.basename(fpath)
rest = len_sorted[idx + 1:]
# check if the basename is in the basename of the rest of the files
for rest_fpath in rest:
rest_file = op.basename(rest_fpath)
if rest_file.endswith(fname):
remove = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rest_fpath']]
if not remove:
cln_flist.append(fpath) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return cln_flist |
def transform_streams_for_comparison(outputs):
"""Makes failure output for streams better by having key be the stream name"""
new_outputs = []
for output in outputs:
if (output.output_type == 'stream'):
# Transform output
new_outputs.append({
'output_type': 'stream',
output.name: output.text,
})
else:
new_outputs.append(output)
return new_outputs | def function[transform_streams_for_comparison, parameter[outputs]]:
constant[Makes failure output for streams better by having key be the stream name]
variable[new_outputs] assign[=] list[[]]
for taget[name[output]] in starred[name[outputs]] begin[:]
if compare[name[output].output_type equal[==] constant[stream]] begin[:]
call[name[new_outputs].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1ec7010>, <ast.Attribute object at 0x7da1b1ec68f0>], [<ast.Constant object at 0x7da1b1ec6680>, <ast.Attribute object at 0x7da1b1ec6a10>]]]]
return[name[new_outputs]] | keyword[def] identifier[transform_streams_for_comparison] ( identifier[outputs] ):
literal[string]
identifier[new_outputs] =[]
keyword[for] identifier[output] keyword[in] identifier[outputs] :
keyword[if] ( identifier[output] . identifier[output_type] == literal[string] ):
identifier[new_outputs] . identifier[append] ({
literal[string] : literal[string] ,
identifier[output] . identifier[name] : identifier[output] . identifier[text] ,
})
keyword[else] :
identifier[new_outputs] . identifier[append] ( identifier[output] )
keyword[return] identifier[new_outputs] | def transform_streams_for_comparison(outputs):
"""Makes failure output for streams better by having key be the stream name"""
new_outputs = []
for output in outputs:
if output.output_type == 'stream':
# Transform output
new_outputs.append({'output_type': 'stream', output.name: output.text}) # depends on [control=['if'], data=[]]
else:
new_outputs.append(output) # depends on [control=['for'], data=['output']]
return new_outputs |
def install(verbose=True,
verbose_destination=sys.__stderr__.fileno() if hasattr(sys.__stderr__, 'fileno') else sys.__stderr__,
strict=True,
**kwargs):
"""
Installs the manhole.
Args:
verbose (bool): Set it to ``False`` to squelch the logging.
verbose_destination (file descriptor or handle): Destination for verbose messages. Default is unbuffered stderr
(stderr ``2`` file descriptor).
patch_fork (bool): Set it to ``False`` if you don't want your ``os.fork`` and ``os.forkpy`` monkeypatched
activate_on (int or signal name): set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole thread to start when this signal is sent. This is desireable in case you don't want the
thread active all the time.
oneshot_on (int or signal name): Set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole to listen for connection in the signal handler. This is desireable in case you don't want
threads at all.
thread (bool): Start the always-on ManholeThread. Default: ``True``. Automatically switched to ``False`` if
``oneshort_on`` or ``activate_on`` are used.
sigmask (list of ints or signal names): Will set the signal mask to the given list (using
``signalfd.sigprocmask``). No action is done if ``signalfd`` is not importable.
**NOTE**: This is done so that the Manhole thread doesn't *steal* any signals; Normally that is fine because
Python will force all the signal handling to be run in the main thread but signalfd doesn't.
socket_path (str): Use a specific path for the unix domain socket (instead of ``/tmp/manhole-<pid>``). This
disables ``patch_fork`` as children cannot reuse the same path.
reinstall_delay (float): Delay the unix domain socket creation *reinstall_delay* seconds. This
alleviates cleanup failures when using fork+exec patterns.
locals (dict): Names to add to manhole interactive shell locals.
daemon_connection (bool): The connection thread is daemonic (dies on app exit). Default: ``False``.
redirect_stderr (bool): Redirect output from stderr to manhole console. Default: ``True``.
connection_handler (function): Connection handler to use. Use ``"exec"`` for simple implementation without
output redirection or your own function. (warning: this is for advanced users). Default: ``"repl"``.
"""
# pylint: disable=W0603
global _MANHOLE
with _LOCK:
if _MANHOLE is None:
_MANHOLE = Manhole()
else:
if strict:
raise AlreadyInstalled("Manhole already installed!")
else:
_LOG.release()
_MANHOLE.release() # Threads might be started here
_LOG.configure(verbose, verbose_destination)
_MANHOLE.configure(**kwargs) # Threads might be started here
return _MANHOLE | def function[install, parameter[verbose, verbose_destination, strict]]:
constant[
Installs the manhole.
Args:
verbose (bool): Set it to ``False`` to squelch the logging.
verbose_destination (file descriptor or handle): Destination for verbose messages. Default is unbuffered stderr
(stderr ``2`` file descriptor).
patch_fork (bool): Set it to ``False`` if you don't want your ``os.fork`` and ``os.forkpy`` monkeypatched
activate_on (int or signal name): set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole thread to start when this signal is sent. This is desireable in case you don't want the
thread active all the time.
oneshot_on (int or signal name): Set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole to listen for connection in the signal handler. This is desireable in case you don't want
threads at all.
thread (bool): Start the always-on ManholeThread. Default: ``True``. Automatically switched to ``False`` if
``oneshort_on`` or ``activate_on`` are used.
sigmask (list of ints or signal names): Will set the signal mask to the given list (using
``signalfd.sigprocmask``). No action is done if ``signalfd`` is not importable.
**NOTE**: This is done so that the Manhole thread doesn't *steal* any signals; Normally that is fine because
Python will force all the signal handling to be run in the main thread but signalfd doesn't.
socket_path (str): Use a specific path for the unix domain socket (instead of ``/tmp/manhole-<pid>``). This
disables ``patch_fork`` as children cannot reuse the same path.
reinstall_delay (float): Delay the unix domain socket creation *reinstall_delay* seconds. This
alleviates cleanup failures when using fork+exec patterns.
locals (dict): Names to add to manhole interactive shell locals.
daemon_connection (bool): The connection thread is daemonic (dies on app exit). Default: ``False``.
redirect_stderr (bool): Redirect output from stderr to manhole console. Default: ``True``.
connection_handler (function): Connection handler to use. Use ``"exec"`` for simple implementation without
output redirection or your own function. (warning: this is for advanced users). Default: ``"repl"``.
]
<ast.Global object at 0x7da20c9922c0>
with name[_LOCK] begin[:]
if compare[name[_MANHOLE] is constant[None]] begin[:]
variable[_MANHOLE] assign[=] call[name[Manhole], parameter[]]
call[name[_LOG].configure, parameter[name[verbose], name[verbose_destination]]]
call[name[_MANHOLE].configure, parameter[]]
return[name[_MANHOLE]] | keyword[def] identifier[install] ( identifier[verbose] = keyword[True] ,
identifier[verbose_destination] = identifier[sys] . identifier[__stderr__] . identifier[fileno] () keyword[if] identifier[hasattr] ( identifier[sys] . identifier[__stderr__] , literal[string] ) keyword[else] identifier[sys] . identifier[__stderr__] ,
identifier[strict] = keyword[True] ,
** identifier[kwargs] ):
literal[string]
keyword[global] identifier[_MANHOLE]
keyword[with] identifier[_LOCK] :
keyword[if] identifier[_MANHOLE] keyword[is] keyword[None] :
identifier[_MANHOLE] = identifier[Manhole] ()
keyword[else] :
keyword[if] identifier[strict] :
keyword[raise] identifier[AlreadyInstalled] ( literal[string] )
keyword[else] :
identifier[_LOG] . identifier[release] ()
identifier[_MANHOLE] . identifier[release] ()
identifier[_LOG] . identifier[configure] ( identifier[verbose] , identifier[verbose_destination] )
identifier[_MANHOLE] . identifier[configure] (** identifier[kwargs] )
keyword[return] identifier[_MANHOLE] | def install(verbose=True, verbose_destination=sys.__stderr__.fileno() if hasattr(sys.__stderr__, 'fileno') else sys.__stderr__, strict=True, **kwargs):
"""
Installs the manhole.
Args:
verbose (bool): Set it to ``False`` to squelch the logging.
verbose_destination (file descriptor or handle): Destination for verbose messages. Default is unbuffered stderr
(stderr ``2`` file descriptor).
patch_fork (bool): Set it to ``False`` if you don't want your ``os.fork`` and ``os.forkpy`` monkeypatched
activate_on (int or signal name): set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole thread to start when this signal is sent. This is desireable in case you don't want the
thread active all the time.
oneshot_on (int or signal name): Set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole to listen for connection in the signal handler. This is desireable in case you don't want
threads at all.
thread (bool): Start the always-on ManholeThread. Default: ``True``. Automatically switched to ``False`` if
``oneshort_on`` or ``activate_on`` are used.
sigmask (list of ints or signal names): Will set the signal mask to the given list (using
``signalfd.sigprocmask``). No action is done if ``signalfd`` is not importable.
**NOTE**: This is done so that the Manhole thread doesn't *steal* any signals; Normally that is fine because
Python will force all the signal handling to be run in the main thread but signalfd doesn't.
socket_path (str): Use a specific path for the unix domain socket (instead of ``/tmp/manhole-<pid>``). This
disables ``patch_fork`` as children cannot reuse the same path.
reinstall_delay (float): Delay the unix domain socket creation *reinstall_delay* seconds. This
alleviates cleanup failures when using fork+exec patterns.
locals (dict): Names to add to manhole interactive shell locals.
daemon_connection (bool): The connection thread is daemonic (dies on app exit). Default: ``False``.
redirect_stderr (bool): Redirect output from stderr to manhole console. Default: ``True``.
connection_handler (function): Connection handler to use. Use ``"exec"`` for simple implementation without
output redirection or your own function. (warning: this is for advanced users). Default: ``"repl"``.
"""
# pylint: disable=W0603
global _MANHOLE
with _LOCK:
if _MANHOLE is None:
_MANHOLE = Manhole() # depends on [control=['if'], data=['_MANHOLE']]
elif strict:
raise AlreadyInstalled('Manhole already installed!') # depends on [control=['if'], data=[]]
else:
_LOG.release()
_MANHOLE.release() # Threads might be started here # depends on [control=['with'], data=[]]
_LOG.configure(verbose, verbose_destination)
_MANHOLE.configure(**kwargs) # Threads might be started here
return _MANHOLE |
def _tls_auth_encrypt(self, s):
"""
Return the TLSCiphertext.encrypted_record for AEAD ciphers.
"""
wcs = self.tls_session.wcs
write_seq_num = struct.pack("!Q", wcs.seq_num)
wcs.seq_num += 1
return wcs.cipher.auth_encrypt(s, b"", write_seq_num) | def function[_tls_auth_encrypt, parameter[self, s]]:
constant[
Return the TLSCiphertext.encrypted_record for AEAD ciphers.
]
variable[wcs] assign[=] name[self].tls_session.wcs
variable[write_seq_num] assign[=] call[name[struct].pack, parameter[constant[!Q], name[wcs].seq_num]]
<ast.AugAssign object at 0x7da1b1ff4400>
return[call[name[wcs].cipher.auth_encrypt, parameter[name[s], constant[b''], name[write_seq_num]]]] | keyword[def] identifier[_tls_auth_encrypt] ( identifier[self] , identifier[s] ):
literal[string]
identifier[wcs] = identifier[self] . identifier[tls_session] . identifier[wcs]
identifier[write_seq_num] = identifier[struct] . identifier[pack] ( literal[string] , identifier[wcs] . identifier[seq_num] )
identifier[wcs] . identifier[seq_num] += literal[int]
keyword[return] identifier[wcs] . identifier[cipher] . identifier[auth_encrypt] ( identifier[s] , literal[string] , identifier[write_seq_num] ) | def _tls_auth_encrypt(self, s):
"""
Return the TLSCiphertext.encrypted_record for AEAD ciphers.
"""
wcs = self.tls_session.wcs
write_seq_num = struct.pack('!Q', wcs.seq_num)
wcs.seq_num += 1
return wcs.cipher.auth_encrypt(s, b'', write_seq_num) |
def compare_hives(fs0, fs1):
"""Compares all the windows registry hive files
returning those which differ.
"""
registries = []
for path in chain(registries_path(fs0.fsroot), user_registries(fs0, fs1)):
if fs0.checksum(path) != fs1.checksum(path):
registries.append(path)
return registries | def function[compare_hives, parameter[fs0, fs1]]:
constant[Compares all the windows registry hive files
returning those which differ.
]
variable[registries] assign[=] list[[]]
for taget[name[path]] in starred[call[name[chain], parameter[call[name[registries_path], parameter[name[fs0].fsroot]], call[name[user_registries], parameter[name[fs0], name[fs1]]]]]] begin[:]
if compare[call[name[fs0].checksum, parameter[name[path]]] not_equal[!=] call[name[fs1].checksum, parameter[name[path]]]] begin[:]
call[name[registries].append, parameter[name[path]]]
return[name[registries]] | keyword[def] identifier[compare_hives] ( identifier[fs0] , identifier[fs1] ):
literal[string]
identifier[registries] =[]
keyword[for] identifier[path] keyword[in] identifier[chain] ( identifier[registries_path] ( identifier[fs0] . identifier[fsroot] ), identifier[user_registries] ( identifier[fs0] , identifier[fs1] )):
keyword[if] identifier[fs0] . identifier[checksum] ( identifier[path] )!= identifier[fs1] . identifier[checksum] ( identifier[path] ):
identifier[registries] . identifier[append] ( identifier[path] )
keyword[return] identifier[registries] | def compare_hives(fs0, fs1):
"""Compares all the windows registry hive files
returning those which differ.
"""
registries = []
for path in chain(registries_path(fs0.fsroot), user_registries(fs0, fs1)):
if fs0.checksum(path) != fs1.checksum(path):
registries.append(path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']]
return registries |
def _spawn_minions(self, timeout=60):
'''
Spawn all the coroutines which will sign in to masters
'''
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters. If match is 'any' we let
# eval_master handle the discovery instead so disconnections can also handle
# discovery
if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'):
self._discover_masters()
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue)
self.io_loop.spawn_callback(self._connect_minion, minion)
self.io_loop.call_later(timeout, self._check_minions) | def function[_spawn_minions, parameter[self, timeout]]:
constant[
Spawn all the coroutines which will sign in to masters
]
if <ast.BoolOp object at 0x7da1b2136680> begin[:]
call[name[self]._discover_masters, parameter[]]
variable[masters] assign[=] call[name[self].opts][constant[master]]
if <ast.BoolOp object at 0x7da1b2136ef0> begin[:]
variable[masters] assign[=] list[[<ast.Name object at 0x7da1b2137250>]]
for taget[name[master]] in starred[name[masters]] begin[:]
variable[s_opts] assign[=] call[name[copy].deepcopy, parameter[name[self].opts]]
call[name[s_opts]][constant[master]] assign[=] name[master]
call[name[s_opts]][constant[multimaster]] assign[=] constant[True]
variable[minion] assign[=] call[name[self]._create_minion_object, parameter[name[s_opts], call[name[s_opts]][constant[auth_timeout]], constant[False]]]
call[name[self].io_loop.spawn_callback, parameter[name[self]._connect_minion, name[minion]]]
call[name[self].io_loop.call_later, parameter[name[timeout], name[self]._check_minions]] | keyword[def] identifier[_spawn_minions] ( identifier[self] , identifier[timeout] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[opts] [ literal[string] ], identifier[dict] ) keyword[and] identifier[self] . identifier[opts] [ literal[string] ]. identifier[get] ( literal[string] ):
identifier[self] . identifier[_discover_masters] ()
identifier[masters] = identifier[self] . identifier[opts] [ literal[string] ]
keyword[if] ( identifier[self] . identifier[opts] [ literal[string] ] keyword[in] ( literal[string] , literal[string] )) keyword[or] keyword[not] identifier[isinstance] ( identifier[self] . identifier[opts] [ literal[string] ], identifier[list] ):
identifier[masters] =[ identifier[masters] ]
keyword[for] identifier[master] keyword[in] identifier[masters] :
identifier[s_opts] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[opts] )
identifier[s_opts] [ literal[string] ]= identifier[master]
identifier[s_opts] [ literal[string] ]= keyword[True]
identifier[minion] = identifier[self] . identifier[_create_minion_object] ( identifier[s_opts] ,
identifier[s_opts] [ literal[string] ],
keyword[False] ,
identifier[io_loop] = identifier[self] . identifier[io_loop] ,
identifier[loaded_base_name] = literal[string] . identifier[format] ( identifier[s_opts] [ literal[string] ]),
identifier[jid_queue] = identifier[self] . identifier[jid_queue] )
identifier[self] . identifier[io_loop] . identifier[spawn_callback] ( identifier[self] . identifier[_connect_minion] , identifier[minion] )
identifier[self] . identifier[io_loop] . identifier[call_later] ( identifier[timeout] , identifier[self] . identifier[_check_minions] ) | def _spawn_minions(self, timeout=60):
"""
Spawn all the coroutines which will sign in to masters
"""
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters. If match is 'any' we let
# eval_master handle the discovery instead so disconnections can also handle
# discovery
if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'):
self._discover_masters() # depends on [control=['if'], data=[]]
masters = self.opts['master']
if self.opts['master_type'] in ('failover', 'distributed') or not isinstance(self.opts['master'], list):
masters = [masters] # depends on [control=['if'], data=[]]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts, s_opts['auth_timeout'], False, io_loop=self.io_loop, loaded_base_name='salt.loader.{0}'.format(s_opts['master']), jid_queue=self.jid_queue)
self.io_loop.spawn_callback(self._connect_minion, minion) # depends on [control=['for'], data=['master']]
self.io_loop.call_later(timeout, self._check_minions) |
def cleanup_a_alpha_and_derivatives(self):
r'''Removes properties set by `setup_a_alpha_and_derivatives`; run by
`GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for
every component'''
del(self.a, self.kappa, self.kappa0, self.kappa1, self.kappa2, self.kappa3, self.Tc) | def function[cleanup_a_alpha_and_derivatives, parameter[self]]:
constant[Removes properties set by `setup_a_alpha_and_derivatives`; run by
`GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for
every component]
<ast.Delete object at 0x7da1b021d2a0> | keyword[def] identifier[cleanup_a_alpha_and_derivatives] ( identifier[self] ):
literal[string]
keyword[del] ( identifier[self] . identifier[a] , identifier[self] . identifier[kappa] , identifier[self] . identifier[kappa0] , identifier[self] . identifier[kappa1] , identifier[self] . identifier[kappa2] , identifier[self] . identifier[kappa3] , identifier[self] . identifier[Tc] ) | def cleanup_a_alpha_and_derivatives(self):
"""Removes properties set by `setup_a_alpha_and_derivatives`; run by
`GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for
every component"""
del (self.a, self.kappa, self.kappa0, self.kappa1, self.kappa2, self.kappa3, self.Tc) |
def get_site_url(request=None):
"""Tries to get a site URL from environment and settings
in the following order:
1. (SITE_PROTO / SITE_SCHEME) + SITE_DOMAIN
2. SITE_URL
3. Django Sites contrib
4. Request object
:param HttpRequest request: Request object to deduce URL from.
:rtype: str
"""
env = partial(environ.get)
settings_ = partial(getattr, settings)
domain = None
scheme = None
url = None
for src in (env, settings_):
if url is None:
url = src('SITE_URL', None)
if domain is None:
domain = src('SITE_DOMAIN', None)
if scheme is None:
scheme = src('SITE_PROTO', src('SITE_SCHEME', None))
if domain is None and url is not None:
scheme, domain = url.split('://')[:2]
if domain is None:
site = get_current_site(request or DomainGetter(domain))
domain = site.domain
if scheme is None and request:
scheme = request.scheme
if domain is None:
domain = 'undefined-domain.local'
if scheme is None:
scheme = 'http'
domain = domain.rstrip('/')
return '%s://%s' % (scheme, domain) | def function[get_site_url, parameter[request]]:
constant[Tries to get a site URL from environment and settings
in the following order:
1. (SITE_PROTO / SITE_SCHEME) + SITE_DOMAIN
2. SITE_URL
3. Django Sites contrib
4. Request object
:param HttpRequest request: Request object to deduce URL from.
:rtype: str
]
variable[env] assign[=] call[name[partial], parameter[name[environ].get]]
variable[settings_] assign[=] call[name[partial], parameter[name[getattr], name[settings]]]
variable[domain] assign[=] constant[None]
variable[scheme] assign[=] constant[None]
variable[url] assign[=] constant[None]
for taget[name[src]] in starred[tuple[[<ast.Name object at 0x7da1b03b92d0>, <ast.Name object at 0x7da1b03b8280>]]] begin[:]
if compare[name[url] is constant[None]] begin[:]
variable[url] assign[=] call[name[src], parameter[constant[SITE_URL], constant[None]]]
if compare[name[domain] is constant[None]] begin[:]
variable[domain] assign[=] call[name[src], parameter[constant[SITE_DOMAIN], constant[None]]]
if compare[name[scheme] is constant[None]] begin[:]
variable[scheme] assign[=] call[name[src], parameter[constant[SITE_PROTO], call[name[src], parameter[constant[SITE_SCHEME], constant[None]]]]]
if <ast.BoolOp object at 0x7da1b03b98a0> begin[:]
<ast.Tuple object at 0x7da1b03bacb0> assign[=] call[call[name[url].split, parameter[constant[://]]]][<ast.Slice object at 0x7da1b03b9690>]
if compare[name[domain] is constant[None]] begin[:]
variable[site] assign[=] call[name[get_current_site], parameter[<ast.BoolOp object at 0x7da1b03b8c40>]]
variable[domain] assign[=] name[site].domain
if <ast.BoolOp object at 0x7da1b03ba530> begin[:]
variable[scheme] assign[=] name[request].scheme
if compare[name[domain] is constant[None]] begin[:]
variable[domain] assign[=] constant[undefined-domain.local]
if compare[name[scheme] is constant[None]] begin[:]
variable[scheme] assign[=] constant[http]
variable[domain] assign[=] call[name[domain].rstrip, parameter[constant[/]]]
return[binary_operation[constant[%s://%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18eb54f10>, <ast.Name object at 0x7da18eb57070>]]]] | keyword[def] identifier[get_site_url] ( identifier[request] = keyword[None] ):
literal[string]
identifier[env] = identifier[partial] ( identifier[environ] . identifier[get] )
identifier[settings_] = identifier[partial] ( identifier[getattr] , identifier[settings] )
identifier[domain] = keyword[None]
identifier[scheme] = keyword[None]
identifier[url] = keyword[None]
keyword[for] identifier[src] keyword[in] ( identifier[env] , identifier[settings_] ):
keyword[if] identifier[url] keyword[is] keyword[None] :
identifier[url] = identifier[src] ( literal[string] , keyword[None] )
keyword[if] identifier[domain] keyword[is] keyword[None] :
identifier[domain] = identifier[src] ( literal[string] , keyword[None] )
keyword[if] identifier[scheme] keyword[is] keyword[None] :
identifier[scheme] = identifier[src] ( literal[string] , identifier[src] ( literal[string] , keyword[None] ))
keyword[if] identifier[domain] keyword[is] keyword[None] keyword[and] identifier[url] keyword[is] keyword[not] keyword[None] :
identifier[scheme] , identifier[domain] = identifier[url] . identifier[split] ( literal[string] )[: literal[int] ]
keyword[if] identifier[domain] keyword[is] keyword[None] :
identifier[site] = identifier[get_current_site] ( identifier[request] keyword[or] identifier[DomainGetter] ( identifier[domain] ))
identifier[domain] = identifier[site] . identifier[domain]
keyword[if] identifier[scheme] keyword[is] keyword[None] keyword[and] identifier[request] :
identifier[scheme] = identifier[request] . identifier[scheme]
keyword[if] identifier[domain] keyword[is] keyword[None] :
identifier[domain] = literal[string]
keyword[if] identifier[scheme] keyword[is] keyword[None] :
identifier[scheme] = literal[string]
identifier[domain] = identifier[domain] . identifier[rstrip] ( literal[string] )
keyword[return] literal[string] %( identifier[scheme] , identifier[domain] ) | def get_site_url(request=None):
"""Tries to get a site URL from environment and settings
in the following order:
1. (SITE_PROTO / SITE_SCHEME) + SITE_DOMAIN
2. SITE_URL
3. Django Sites contrib
4. Request object
:param HttpRequest request: Request object to deduce URL from.
:rtype: str
"""
env = partial(environ.get)
settings_ = partial(getattr, settings)
domain = None
scheme = None
url = None
for src in (env, settings_):
if url is None:
url = src('SITE_URL', None) # depends on [control=['if'], data=['url']]
if domain is None:
domain = src('SITE_DOMAIN', None) # depends on [control=['if'], data=['domain']]
if scheme is None:
scheme = src('SITE_PROTO', src('SITE_SCHEME', None)) # depends on [control=['if'], data=['scheme']] # depends on [control=['for'], data=['src']]
if domain is None and url is not None:
(scheme, domain) = url.split('://')[:2] # depends on [control=['if'], data=[]]
if domain is None:
site = get_current_site(request or DomainGetter(domain))
domain = site.domain # depends on [control=['if'], data=['domain']]
if scheme is None and request:
scheme = request.scheme # depends on [control=['if'], data=[]]
if domain is None:
domain = 'undefined-domain.local' # depends on [control=['if'], data=['domain']]
if scheme is None:
scheme = 'http' # depends on [control=['if'], data=['scheme']]
domain = domain.rstrip('/')
return '%s://%s' % (scheme, domain) |
def send(self, verb, params=None, source=None, tags=None):
"""Send a generic IRC message to the server.
A message is created using the various parts of the message, then gets
assembled and sent to the server.
Args:
verb (str): Verb, such as PRIVMSG.
params (list of str): Message parameters, defaults to no params.
source (str): Source of the message, defaults to no source.
tags (dict): `Tags <http://ircv3.net/specs/core/message-tags-3.2.html>`_
to send with the message.
"""
m = RFC1459Message.from_data(verb, params=params, source=source, tags=tags)
self._send_message(m) | def function[send, parameter[self, verb, params, source, tags]]:
constant[Send a generic IRC message to the server.
A message is created using the various parts of the message, then gets
assembled and sent to the server.
Args:
verb (str): Verb, such as PRIVMSG.
params (list of str): Message parameters, defaults to no params.
source (str): Source of the message, defaults to no source.
tags (dict): `Tags <http://ircv3.net/specs/core/message-tags-3.2.html>`_
to send with the message.
]
variable[m] assign[=] call[name[RFC1459Message].from_data, parameter[name[verb]]]
call[name[self]._send_message, parameter[name[m]]] | keyword[def] identifier[send] ( identifier[self] , identifier[verb] , identifier[params] = keyword[None] , identifier[source] = keyword[None] , identifier[tags] = keyword[None] ):
literal[string]
identifier[m] = identifier[RFC1459Message] . identifier[from_data] ( identifier[verb] , identifier[params] = identifier[params] , identifier[source] = identifier[source] , identifier[tags] = identifier[tags] )
identifier[self] . identifier[_send_message] ( identifier[m] ) | def send(self, verb, params=None, source=None, tags=None):
"""Send a generic IRC message to the server.
A message is created using the various parts of the message, then gets
assembled and sent to the server.
Args:
verb (str): Verb, such as PRIVMSG.
params (list of str): Message parameters, defaults to no params.
source (str): Source of the message, defaults to no source.
tags (dict): `Tags <http://ircv3.net/specs/core/message-tags-3.2.html>`_
to send with the message.
"""
m = RFC1459Message.from_data(verb, params=params, source=source, tags=tags)
self._send_message(m) |
def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile):
'''helper method for present. ensure that cloudwatch_alarms are set'''
current = __salt__['config.option'](alarms_from_pillar, {})
if alarms:
current = salt.utils.dictupdate.update(current, alarms)
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
for _, info in six.iteritems(current):
info["name"] = name + " " + info["name"]
info["attributes"]["description"] = name + " " + info["attributes"]["description"]
info["attributes"]["dimensions"] = {"LoadBalancerName": [name]}
kwargs = {
"name": info["name"],
"attributes": info["attributes"],
"region": region,
"key": key,
"keyid": keyid,
"profile": profile,
}
# No test=False cluase needed since the state handles that itself...
results = __states__['boto_cloudwatch_alarm.present'](**kwargs)
if not results.get('result'):
ret["result"] = results["result"]
if results.get("changes", {}) != {}:
ret["changes"][info["name"]] = results["changes"]
if "comment" in results:
ret["comment"] += results["comment"]
return ret | def function[_alarms_present, parameter[name, alarms, alarms_from_pillar, region, key, keyid, profile]]:
constant[helper method for present. ensure that cloudwatch_alarms are set]
variable[current] assign[=] call[call[name[__salt__]][constant[config.option]], parameter[name[alarms_from_pillar], dictionary[[], []]]]
if name[alarms] begin[:]
variable[current] assign[=] call[name[salt].utils.dictupdate.update, parameter[name[current], name[alarms]]]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da2054a6b00>, <ast.Constant object at 0x7da2054a7e50>, <ast.Constant object at 0x7da2054a6bc0>, <ast.Constant object at 0x7da2054a5900>], [<ast.Name object at 0x7da2054a4370>, <ast.Constant object at 0x7da2054a61a0>, <ast.Constant object at 0x7da2054a6020>, <ast.Dict object at 0x7da2054a42e0>]]
for taget[tuple[[<ast.Name object at 0x7da2054a4b20>, <ast.Name object at 0x7da2054a7d60>]]] in starred[call[name[six].iteritems, parameter[name[current]]]] begin[:]
call[name[info]][constant[name]] assign[=] binary_operation[binary_operation[name[name] + constant[ ]] + call[name[info]][constant[name]]]
call[call[name[info]][constant[attributes]]][constant[description]] assign[=] binary_operation[binary_operation[name[name] + constant[ ]] + call[call[name[info]][constant[attributes]]][constant[description]]]
call[call[name[info]][constant[attributes]]][constant[dimensions]] assign[=] dictionary[[<ast.Constant object at 0x7da2054a5ba0>], [<ast.List object at 0x7da2054a6dd0>]]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da2054a6290>, <ast.Constant object at 0x7da2054a41f0>, <ast.Constant object at 0x7da2054a7d30>, <ast.Constant object at 0x7da2054a4610>, <ast.Constant object at 0x7da2054a7bb0>, <ast.Constant object at 0x7da2054a7400>], [<ast.Subscript object at 0x7da2054a7c10>, <ast.Subscript object at 0x7da2054a56f0>, <ast.Name object at 0x7da2054a6e60>, <ast.Name object at 0x7da2054a57b0>, <ast.Name object at 0x7da2054a7d90>, <ast.Name object at 0x7da2054a4460>]]
variable[results] assign[=] call[call[name[__states__]][constant[boto_cloudwatch_alarm.present]], parameter[]]
if <ast.UnaryOp object at 0x7da2054a44c0> begin[:]
call[name[ret]][constant[result]] assign[=] call[name[results]][constant[result]]
if compare[call[name[results].get, parameter[constant[changes], dictionary[[], []]]] not_equal[!=] dictionary[[], []]] begin[:]
call[call[name[ret]][constant[changes]]][call[name[info]][constant[name]]] assign[=] call[name[results]][constant[changes]]
if compare[constant[comment] in name[results]] begin[:]
<ast.AugAssign object at 0x7da2054a7520>
return[name[ret]] | keyword[def] identifier[_alarms_present] ( identifier[name] , identifier[alarms] , identifier[alarms_from_pillar] , identifier[region] , identifier[key] , identifier[keyid] , identifier[profile] ):
literal[string]
identifier[current] = identifier[__salt__] [ literal[string] ]( identifier[alarms_from_pillar] ,{})
keyword[if] identifier[alarms] :
identifier[current] = identifier[salt] . identifier[utils] . identifier[dictupdate] . identifier[update] ( identifier[current] , identifier[alarms] )
identifier[ret] ={ literal[string] : identifier[name] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] :{}}
keyword[for] identifier[_] , identifier[info] keyword[in] identifier[six] . identifier[iteritems] ( identifier[current] ):
identifier[info] [ literal[string] ]= identifier[name] + literal[string] + identifier[info] [ literal[string] ]
identifier[info] [ literal[string] ][ literal[string] ]= identifier[name] + literal[string] + identifier[info] [ literal[string] ][ literal[string] ]
identifier[info] [ literal[string] ][ literal[string] ]={ literal[string] :[ identifier[name] ]}
identifier[kwargs] ={
literal[string] : identifier[info] [ literal[string] ],
literal[string] : identifier[info] [ literal[string] ],
literal[string] : identifier[region] ,
literal[string] : identifier[key] ,
literal[string] : identifier[keyid] ,
literal[string] : identifier[profile] ,
}
identifier[results] = identifier[__states__] [ literal[string] ](** identifier[kwargs] )
keyword[if] keyword[not] identifier[results] . identifier[get] ( literal[string] ):
identifier[ret] [ literal[string] ]= identifier[results] [ literal[string] ]
keyword[if] identifier[results] . identifier[get] ( literal[string] ,{})!={}:
identifier[ret] [ literal[string] ][ identifier[info] [ literal[string] ]]= identifier[results] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[results] :
identifier[ret] [ literal[string] ]+= identifier[results] [ literal[string] ]
keyword[return] identifier[ret] | def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile):
"""helper method for present. ensure that cloudwatch_alarms are set"""
current = __salt__['config.option'](alarms_from_pillar, {})
if alarms:
current = salt.utils.dictupdate.update(current, alarms) # depends on [control=['if'], data=[]]
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
for (_, info) in six.iteritems(current):
info['name'] = name + ' ' + info['name']
info['attributes']['description'] = name + ' ' + info['attributes']['description']
info['attributes']['dimensions'] = {'LoadBalancerName': [name]}
kwargs = {'name': info['name'], 'attributes': info['attributes'], 'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
# No test=False cluase needed since the state handles that itself...
results = __states__['boto_cloudwatch_alarm.present'](**kwargs)
if not results.get('result'):
ret['result'] = results['result'] # depends on [control=['if'], data=[]]
if results.get('changes', {}) != {}:
ret['changes'][info['name']] = results['changes'] # depends on [control=['if'], data=[]]
if 'comment' in results:
ret['comment'] += results['comment'] # depends on [control=['if'], data=['results']] # depends on [control=['for'], data=[]]
return ret |
def get_field_names(self):
"""
Builds a list of the field names for all tables and joined tables by calling
``get_field_names()`` on each table
:return: list of field names
:rtype: list of str
"""
field_names = []
for table in self.tables:
field_names.extend(table.get_field_names())
for join_item in self.joins:
field_names.extend(join_item.right_table.get_field_names())
return field_names | def function[get_field_names, parameter[self]]:
constant[
Builds a list of the field names for all tables and joined tables by calling
``get_field_names()`` on each table
:return: list of field names
:rtype: list of str
]
variable[field_names] assign[=] list[[]]
for taget[name[table]] in starred[name[self].tables] begin[:]
call[name[field_names].extend, parameter[call[name[table].get_field_names, parameter[]]]]
for taget[name[join_item]] in starred[name[self].joins] begin[:]
call[name[field_names].extend, parameter[call[name[join_item].right_table.get_field_names, parameter[]]]]
return[name[field_names]] | keyword[def] identifier[get_field_names] ( identifier[self] ):
literal[string]
identifier[field_names] =[]
keyword[for] identifier[table] keyword[in] identifier[self] . identifier[tables] :
identifier[field_names] . identifier[extend] ( identifier[table] . identifier[get_field_names] ())
keyword[for] identifier[join_item] keyword[in] identifier[self] . identifier[joins] :
identifier[field_names] . identifier[extend] ( identifier[join_item] . identifier[right_table] . identifier[get_field_names] ())
keyword[return] identifier[field_names] | def get_field_names(self):
"""
Builds a list of the field names for all tables and joined tables by calling
``get_field_names()`` on each table
:return: list of field names
:rtype: list of str
"""
field_names = []
for table in self.tables:
field_names.extend(table.get_field_names()) # depends on [control=['for'], data=['table']]
for join_item in self.joins:
field_names.extend(join_item.right_table.get_field_names()) # depends on [control=['for'], data=['join_item']]
return field_names |
def setShapeClass(self, typeID, clazz):
"""setShapeClass(string, string) -> None
Sets the shape class of vehicles of this type.
"""
self._connection._sendStringCmd(
tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_SHAPECLASS, typeID, clazz) | def function[setShapeClass, parameter[self, typeID, clazz]]:
constant[setShapeClass(string, string) -> None
Sets the shape class of vehicles of this type.
]
call[name[self]._connection._sendStringCmd, parameter[name[tc].CMD_SET_VEHICLETYPE_VARIABLE, name[tc].VAR_SHAPECLASS, name[typeID], name[clazz]]] | keyword[def] identifier[setShapeClass] ( identifier[self] , identifier[typeID] , identifier[clazz] ):
literal[string]
identifier[self] . identifier[_connection] . identifier[_sendStringCmd] (
identifier[tc] . identifier[CMD_SET_VEHICLETYPE_VARIABLE] , identifier[tc] . identifier[VAR_SHAPECLASS] , identifier[typeID] , identifier[clazz] ) | def setShapeClass(self, typeID, clazz):
"""setShapeClass(string, string) -> None
Sets the shape class of vehicles of this type.
"""
self._connection._sendStringCmd(tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_SHAPECLASS, typeID, clazz) |
def out_32(library, session, space, offset, data, extended=False):
"""Write in an 32-bit value from the specified memory space and offset.
Corresponds to viOut32* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param data: Data to write to bus.
:param extended: Use 64 bits offset independent of the platform.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
if extended:
return library.viOut32Ex(session, space, offset, data)
else:
return library.viOut32(session, space, offset, data) | def function[out_32, parameter[library, session, space, offset, data, extended]]:
constant[Write in an 32-bit value from the specified memory space and offset.
Corresponds to viOut32* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param data: Data to write to bus.
:param extended: Use 64 bits offset independent of the platform.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
]
if name[extended] begin[:]
return[call[name[library].viOut32Ex, parameter[name[session], name[space], name[offset], name[data]]]] | keyword[def] identifier[out_32] ( identifier[library] , identifier[session] , identifier[space] , identifier[offset] , identifier[data] , identifier[extended] = keyword[False] ):
literal[string]
keyword[if] identifier[extended] :
keyword[return] identifier[library] . identifier[viOut32Ex] ( identifier[session] , identifier[space] , identifier[offset] , identifier[data] )
keyword[else] :
keyword[return] identifier[library] . identifier[viOut32] ( identifier[session] , identifier[space] , identifier[offset] , identifier[data] ) | def out_32(library, session, space, offset, data, extended=False):
"""Write in an 32-bit value from the specified memory space and offset.
Corresponds to viOut32* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param data: Data to write to bus.
:param extended: Use 64 bits offset independent of the platform.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
if extended:
return library.viOut32Ex(session, space, offset, data) # depends on [control=['if'], data=[]]
else:
return library.viOut32(session, space, offset, data) |
def _get_stats_files(data, out_dir=None):
"""Retrieve stats files from pre-existing dictionary or filesystem.
"""
if not out_dir:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data),
"qc", dd.get_sample_name(data), "samtools"))
stats_file = tz.get_in(["depth", "samtools", "stats"], data)
idxstats_file = tz.get_in(["depth", "samtools", "idxstats"], data)
if not stats_file:
stats_file = os.path.join(out_dir, "%s.txt" % dd.get_sample_name(data))
if not idxstats_file:
idxstats_file = os.path.join(out_dir, "%s-idxstats.txt" % dd.get_sample_name(data))
return stats_file, idxstats_file | def function[_get_stats_files, parameter[data, out_dir]]:
constant[Retrieve stats files from pre-existing dictionary or filesystem.
]
if <ast.UnaryOp object at 0x7da1b18f8220> begin[:]
variable[out_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[call[name[dd].get_work_dir, parameter[name[data]]], constant[qc], call[name[dd].get_sample_name, parameter[name[data]]], constant[samtools]]]]]
variable[stats_file] assign[=] call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da1b18fb7f0>, <ast.Constant object at 0x7da1b18fae60>, <ast.Constant object at 0x7da1b18faa40>]], name[data]]]
variable[idxstats_file] assign[=] call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da1b18f9120>, <ast.Constant object at 0x7da1b18f80d0>, <ast.Constant object at 0x7da1b18fa980>]], name[data]]]
if <ast.UnaryOp object at 0x7da1b18fa770> begin[:]
variable[stats_file] assign[=] call[name[os].path.join, parameter[name[out_dir], binary_operation[constant[%s.txt] <ast.Mod object at 0x7da2590d6920> call[name[dd].get_sample_name, parameter[name[data]]]]]]
if <ast.UnaryOp object at 0x7da1b18fa7d0> begin[:]
variable[idxstats_file] assign[=] call[name[os].path.join, parameter[name[out_dir], binary_operation[constant[%s-idxstats.txt] <ast.Mod object at 0x7da2590d6920> call[name[dd].get_sample_name, parameter[name[data]]]]]]
return[tuple[[<ast.Name object at 0x7da1b18fbee0>, <ast.Name object at 0x7da1b18fba00>]]] | keyword[def] identifier[_get_stats_files] ( identifier[data] , identifier[out_dir] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[out_dir] :
identifier[out_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dd] . identifier[get_work_dir] ( identifier[data] ),
literal[string] , identifier[dd] . identifier[get_sample_name] ( identifier[data] ), literal[string] ))
identifier[stats_file] = identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[data] )
identifier[idxstats_file] = identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[data] )
keyword[if] keyword[not] identifier[stats_file] :
identifier[stats_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] % identifier[dd] . identifier[get_sample_name] ( identifier[data] ))
keyword[if] keyword[not] identifier[idxstats_file] :
identifier[idxstats_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] % identifier[dd] . identifier[get_sample_name] ( identifier[data] ))
keyword[return] identifier[stats_file] , identifier[idxstats_file] | def _get_stats_files(data, out_dir=None):
"""Retrieve stats files from pre-existing dictionary or filesystem.
"""
if not out_dir:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), 'qc', dd.get_sample_name(data), 'samtools')) # depends on [control=['if'], data=[]]
stats_file = tz.get_in(['depth', 'samtools', 'stats'], data)
idxstats_file = tz.get_in(['depth', 'samtools', 'idxstats'], data)
if not stats_file:
stats_file = os.path.join(out_dir, '%s.txt' % dd.get_sample_name(data)) # depends on [control=['if'], data=[]]
if not idxstats_file:
idxstats_file = os.path.join(out_dir, '%s-idxstats.txt' % dd.get_sample_name(data)) # depends on [control=['if'], data=[]]
return (stats_file, idxstats_file) |
def _merge_sampleset(model1, model2):
"""Simple merge of samplesets."""
w1 = _get_sampleset(model1)
w2 = _get_sampleset(model2)
return merge_wavelengths(w1, w2) | def function[_merge_sampleset, parameter[model1, model2]]:
constant[Simple merge of samplesets.]
variable[w1] assign[=] call[name[_get_sampleset], parameter[name[model1]]]
variable[w2] assign[=] call[name[_get_sampleset], parameter[name[model2]]]
return[call[name[merge_wavelengths], parameter[name[w1], name[w2]]]] | keyword[def] identifier[_merge_sampleset] ( identifier[model1] , identifier[model2] ):
literal[string]
identifier[w1] = identifier[_get_sampleset] ( identifier[model1] )
identifier[w2] = identifier[_get_sampleset] ( identifier[model2] )
keyword[return] identifier[merge_wavelengths] ( identifier[w1] , identifier[w2] ) | def _merge_sampleset(model1, model2):
"""Simple merge of samplesets."""
w1 = _get_sampleset(model1)
w2 = _get_sampleset(model2)
return merge_wavelengths(w1, w2) |
def remove_directories(list_of_paths):
"""
Removes non-leafs from a list of directory paths
"""
found_dirs = set('/')
for path in list_of_paths:
dirs = path.strip().split('/')
for i in range(2,len(dirs)):
found_dirs.add( '/'.join(dirs[:i]) )
paths = [ path for path in list_of_paths if
(path.strip() not in found_dirs) and path.strip()[-1]!='/' ]
return paths | def function[remove_directories, parameter[list_of_paths]]:
constant[
Removes non-leafs from a list of directory paths
]
variable[found_dirs] assign[=] call[name[set], parameter[constant[/]]]
for taget[name[path]] in starred[name[list_of_paths]] begin[:]
variable[dirs] assign[=] call[call[name[path].strip, parameter[]].split, parameter[constant[/]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[2], call[name[len], parameter[name[dirs]]]]]] begin[:]
call[name[found_dirs].add, parameter[call[constant[/].join, parameter[call[name[dirs]][<ast.Slice object at 0x7da1b2347280>]]]]]
variable[paths] assign[=] <ast.ListComp object at 0x7da1b23469b0>
return[name[paths]] | keyword[def] identifier[remove_directories] ( identifier[list_of_paths] ):
literal[string]
identifier[found_dirs] = identifier[set] ( literal[string] )
keyword[for] identifier[path] keyword[in] identifier[list_of_paths] :
identifier[dirs] = identifier[path] . identifier[strip] (). identifier[split] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[dirs] )):
identifier[found_dirs] . identifier[add] ( literal[string] . identifier[join] ( identifier[dirs] [: identifier[i] ]))
identifier[paths] =[ identifier[path] keyword[for] identifier[path] keyword[in] identifier[list_of_paths] keyword[if]
( identifier[path] . identifier[strip] () keyword[not] keyword[in] identifier[found_dirs] ) keyword[and] identifier[path] . identifier[strip] ()[- literal[int] ]!= literal[string] ]
keyword[return] identifier[paths] | def remove_directories(list_of_paths):
"""
Removes non-leafs from a list of directory paths
"""
found_dirs = set('/')
for path in list_of_paths:
dirs = path.strip().split('/')
for i in range(2, len(dirs)):
found_dirs.add('/'.join(dirs[:i])) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['path']]
paths = [path for path in list_of_paths if path.strip() not in found_dirs and path.strip()[-1] != '/']
return paths |
def load_state_from_disk():
""" loads the state from a local data.json file
"""
if is_there_state():
with open('data.json', 'r') as f:
data = json.load(f)
return data
else:
return False | def function[load_state_from_disk, parameter[]]:
constant[ loads the state from a local data.json file
]
if call[name[is_there_state], parameter[]] begin[:]
with call[name[open], parameter[constant[data.json], constant[r]]] begin[:]
variable[data] assign[=] call[name[json].load, parameter[name[f]]]
return[name[data]] | keyword[def] identifier[load_state_from_disk] ():
literal[string]
keyword[if] identifier[is_there_state] ():
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[f] :
identifier[data] = identifier[json] . identifier[load] ( identifier[f] )
keyword[return] identifier[data]
keyword[else] :
keyword[return] keyword[False] | def load_state_from_disk():
""" loads the state from a local data.json file
"""
if is_there_state():
with open('data.json', 'r') as f:
data = json.load(f) # depends on [control=['with'], data=['f']]
return data # depends on [control=['if'], data=[]]
else:
return False |
def _parse_xml(self, response):
"""
Run our XML parser (lxml in this case) over our response text. Lxml
doesn't enjoy having xml/encoding information in the header so we strip
that out if necessary. We return a parsed XML object that can be
used by the calling API method and massaged into a more appropriate
format.
"""
if response.startswith('\n'):
response = response[1:]
tree = etree.fromstring(response)
return tree | def function[_parse_xml, parameter[self, response]]:
constant[
Run our XML parser (lxml in this case) over our response text. Lxml
doesn't enjoy having xml/encoding information in the header so we strip
that out if necessary. We return a parsed XML object that can be
used by the calling API method and massaged into a more appropriate
format.
]
if call[name[response].startswith, parameter[constant[
]]] begin[:]
variable[response] assign[=] call[name[response]][<ast.Slice object at 0x7da1b0bdbe50>]
variable[tree] assign[=] call[name[etree].fromstring, parameter[name[response]]]
return[name[tree]] | keyword[def] identifier[_parse_xml] ( identifier[self] , identifier[response] ):
literal[string]
keyword[if] identifier[response] . identifier[startswith] ( literal[string] ):
identifier[response] = identifier[response] [ literal[int] :]
identifier[tree] = identifier[etree] . identifier[fromstring] ( identifier[response] )
keyword[return] identifier[tree] | def _parse_xml(self, response):
"""
Run our XML parser (lxml in this case) over our response text. Lxml
doesn't enjoy having xml/encoding information in the header so we strip
that out if necessary. We return a parsed XML object that can be
used by the calling API method and massaged into a more appropriate
format.
"""
if response.startswith('\n'):
response = response[1:] # depends on [control=['if'], data=[]]
tree = etree.fromstring(response)
return tree |
def next(self):
"""Get the next batch of messages from the previously fetched messages.
If there's no more messages, check if we should auto-delete the
messages and raise StopIteration.
"""
if not self._messages:
if self.auto_delete:
self.delete_messages()
raise StopIteration
message = self._messages.pop(0)
self._processed_messages.append(message)
return json.loads(message.payload) | def function[next, parameter[self]]:
constant[Get the next batch of messages from the previously fetched messages.
If there's no more messages, check if we should auto-delete the
messages and raise StopIteration.
]
if <ast.UnaryOp object at 0x7da18f58d0c0> begin[:]
if name[self].auto_delete begin[:]
call[name[self].delete_messages, parameter[]]
<ast.Raise object at 0x7da18f58f970>
variable[message] assign[=] call[name[self]._messages.pop, parameter[constant[0]]]
call[name[self]._processed_messages.append, parameter[name[message]]]
return[call[name[json].loads, parameter[name[message].payload]]] | keyword[def] identifier[next] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_messages] :
keyword[if] identifier[self] . identifier[auto_delete] :
identifier[self] . identifier[delete_messages] ()
keyword[raise] identifier[StopIteration]
identifier[message] = identifier[self] . identifier[_messages] . identifier[pop] ( literal[int] )
identifier[self] . identifier[_processed_messages] . identifier[append] ( identifier[message] )
keyword[return] identifier[json] . identifier[loads] ( identifier[message] . identifier[payload] ) | def next(self):
"""Get the next batch of messages from the previously fetched messages.
If there's no more messages, check if we should auto-delete the
messages and raise StopIteration.
"""
if not self._messages:
if self.auto_delete:
self.delete_messages() # depends on [control=['if'], data=[]]
raise StopIteration # depends on [control=['if'], data=[]]
message = self._messages.pop(0)
self._processed_messages.append(message)
return json.loads(message.payload) |
def s_data(nrows_fdata, Nmax, Q):
""" I am going to assume we will always have even data. This is pretty
safe because it means that we have measured both poles of the sphere and
have data that has been continued.
nrows_fdata: Number of rows in fdata.
Nmax: The largest number of n values desired.
Q: A value greater than nrows_fdata + Nmax. This can be
selected to be factorable into small primes to
increase the speed of the fft (probably not that big
of a deal today).
"""
if np.mod(nrows_fdata, 2) == 1:
raise Exception("nrows_fdata must be even.")
L1 = nrows_fdata
s = np.zeros(Q, dtype=np.complex128)
MM = int(L1 / 2)
for nu in xrange(-MM, MM + Nmax + 1):
if np.mod(nu, 2) == 1:
s[nu - MM] = -1j / nu
return s | def function[s_data, parameter[nrows_fdata, Nmax, Q]]:
constant[ I am going to assume we will always have even data. This is pretty
safe because it means that we have measured both poles of the sphere and
have data that has been continued.
nrows_fdata: Number of rows in fdata.
Nmax: The largest number of n values desired.
Q: A value greater than nrows_fdata + Nmax. This can be
selected to be factorable into small primes to
increase the speed of the fft (probably not that big
of a deal today).
]
if compare[call[name[np].mod, parameter[name[nrows_fdata], constant[2]]] equal[==] constant[1]] begin[:]
<ast.Raise object at 0x7da18bc72740>
variable[L1] assign[=] name[nrows_fdata]
variable[s] assign[=] call[name[np].zeros, parameter[name[Q]]]
variable[MM] assign[=] call[name[int], parameter[binary_operation[name[L1] / constant[2]]]]
for taget[name[nu]] in starred[call[name[xrange], parameter[<ast.UnaryOp object at 0x7da18bc72440>, binary_operation[binary_operation[name[MM] + name[Nmax]] + constant[1]]]]] begin[:]
if compare[call[name[np].mod, parameter[name[nu], constant[2]]] equal[==] constant[1]] begin[:]
call[name[s]][binary_operation[name[nu] - name[MM]]] assign[=] binary_operation[<ast.UnaryOp object at 0x7da18f09d600> / name[nu]]
return[name[s]] | keyword[def] identifier[s_data] ( identifier[nrows_fdata] , identifier[Nmax] , identifier[Q] ):
literal[string]
keyword[if] identifier[np] . identifier[mod] ( identifier[nrows_fdata] , literal[int] )== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[L1] = identifier[nrows_fdata]
identifier[s] = identifier[np] . identifier[zeros] ( identifier[Q] , identifier[dtype] = identifier[np] . identifier[complex128] )
identifier[MM] = identifier[int] ( identifier[L1] / literal[int] )
keyword[for] identifier[nu] keyword[in] identifier[xrange] (- identifier[MM] , identifier[MM] + identifier[Nmax] + literal[int] ):
keyword[if] identifier[np] . identifier[mod] ( identifier[nu] , literal[int] )== literal[int] :
identifier[s] [ identifier[nu] - identifier[MM] ]=- literal[int] / identifier[nu]
keyword[return] identifier[s] | def s_data(nrows_fdata, Nmax, Q):
""" I am going to assume we will always have even data. This is pretty
safe because it means that we have measured both poles of the sphere and
have data that has been continued.
nrows_fdata: Number of rows in fdata.
Nmax: The largest number of n values desired.
Q: A value greater than nrows_fdata + Nmax. This can be
selected to be factorable into small primes to
increase the speed of the fft (probably not that big
of a deal today).
"""
if np.mod(nrows_fdata, 2) == 1:
raise Exception('nrows_fdata must be even.') # depends on [control=['if'], data=[]]
L1 = nrows_fdata
s = np.zeros(Q, dtype=np.complex128)
MM = int(L1 / 2)
for nu in xrange(-MM, MM + Nmax + 1):
if np.mod(nu, 2) == 1:
s[nu - MM] = -1j / nu # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['nu']]
return s |
def _getStreamDef(self, modelDescription):
"""
Generate stream definition based on
"""
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in modelDescription['streamDef']:
for key in aggregationPeriod.keys():
if key in modelDescription['streamDef']['aggregation']:
aggregationPeriod[key] = modelDescription['streamDef']['aggregation'][key]
if 'fields' in modelDescription['streamDef']['aggregation']:
for (fieldName, func) in modelDescription['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
streamDef = copy.deepcopy(modelDescription['streamDef'])
streamDef['aggregation'] = copy.deepcopy(aggregationInfo)
return streamDef | def function[_getStreamDef, parameter[self, modelDescription]]:
constant[
Generate stream definition based on
]
variable[aggregationPeriod] assign[=] dictionary[[<ast.Constant object at 0x7da1b00283a0>, <ast.Constant object at 0x7da1b0029390>, <ast.Constant object at 0x7da1b0029690>, <ast.Constant object at 0x7da1b0029660>, <ast.Constant object at 0x7da1b002ba00>, <ast.Constant object at 0x7da1b002b670>, <ast.Constant object at 0x7da1b002bfd0>, <ast.Constant object at 0x7da1b002abf0>, <ast.Constant object at 0x7da1b002af50>], [<ast.Constant object at 0x7da1b00297b0>, <ast.Constant object at 0x7da1b0029a20>, <ast.Constant object at 0x7da1b0028280>, <ast.Constant object at 0x7da1b002be20>, <ast.Constant object at 0x7da1b0029750>, <ast.Constant object at 0x7da1b002bac0>, <ast.Constant object at 0x7da1b002b550>, <ast.Constant object at 0x7da1b002b6d0>, <ast.Constant object at 0x7da1b002a290>]]
variable[aggFunctionsDict] assign[=] dictionary[[], []]
if compare[constant[aggregation] in call[name[modelDescription]][constant[streamDef]]] begin[:]
for taget[name[key]] in starred[call[name[aggregationPeriod].keys, parameter[]]] begin[:]
if compare[name[key] in call[call[name[modelDescription]][constant[streamDef]]][constant[aggregation]]] begin[:]
call[name[aggregationPeriod]][name[key]] assign[=] call[call[call[name[modelDescription]][constant[streamDef]]][constant[aggregation]]][name[key]]
if compare[constant[fields] in call[call[name[modelDescription]][constant[streamDef]]][constant[aggregation]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b002a350>, <ast.Name object at 0x7da1b0029ff0>]]] in starred[call[call[call[name[modelDescription]][constant[streamDef]]][constant[aggregation]]][constant[fields]]] begin[:]
call[name[aggFunctionsDict]][name[fieldName]] assign[=] call[name[str], parameter[name[func]]]
variable[hasAggregation] assign[=] constant[False]
for taget[name[v]] in starred[call[name[aggregationPeriod].values, parameter[]]] begin[:]
if compare[name[v] not_equal[!=] constant[0]] begin[:]
variable[hasAggregation] assign[=] constant[True]
break
variable[aggFunctionList] assign[=] call[name[aggFunctionsDict].items, parameter[]]
variable[aggregationInfo] assign[=] call[name[dict], parameter[name[aggregationPeriod]]]
call[name[aggregationInfo]][constant[fields]] assign[=] name[aggFunctionList]
variable[streamDef] assign[=] call[name[copy].deepcopy, parameter[call[name[modelDescription]][constant[streamDef]]]]
call[name[streamDef]][constant[aggregation]] assign[=] call[name[copy].deepcopy, parameter[name[aggregationInfo]]]
return[name[streamDef]] | keyword[def] identifier[_getStreamDef] ( identifier[self] , identifier[modelDescription] ):
literal[string]
identifier[aggregationPeriod] ={
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
}
identifier[aggFunctionsDict] ={}
keyword[if] literal[string] keyword[in] identifier[modelDescription] [ literal[string] ]:
keyword[for] identifier[key] keyword[in] identifier[aggregationPeriod] . identifier[keys] ():
keyword[if] identifier[key] keyword[in] identifier[modelDescription] [ literal[string] ][ literal[string] ]:
identifier[aggregationPeriod] [ identifier[key] ]= identifier[modelDescription] [ literal[string] ][ literal[string] ][ identifier[key] ]
keyword[if] literal[string] keyword[in] identifier[modelDescription] [ literal[string] ][ literal[string] ]:
keyword[for] ( identifier[fieldName] , identifier[func] ) keyword[in] identifier[modelDescription] [ literal[string] ][ literal[string] ][ literal[string] ]:
identifier[aggFunctionsDict] [ identifier[fieldName] ]= identifier[str] ( identifier[func] )
identifier[hasAggregation] = keyword[False]
keyword[for] identifier[v] keyword[in] identifier[aggregationPeriod] . identifier[values] ():
keyword[if] identifier[v] != literal[int] :
identifier[hasAggregation] = keyword[True]
keyword[break]
identifier[aggFunctionList] = identifier[aggFunctionsDict] . identifier[items] ()
identifier[aggregationInfo] = identifier[dict] ( identifier[aggregationPeriod] )
identifier[aggregationInfo] [ literal[string] ]= identifier[aggFunctionList]
identifier[streamDef] = identifier[copy] . identifier[deepcopy] ( identifier[modelDescription] [ literal[string] ])
identifier[streamDef] [ literal[string] ]= identifier[copy] . identifier[deepcopy] ( identifier[aggregationInfo] )
keyword[return] identifier[streamDef] | def _getStreamDef(self, modelDescription):
"""
Generate stream definition based on
"""
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {'days': 0, 'hours': 0, 'microseconds': 0, 'milliseconds': 0, 'minutes': 0, 'months': 0, 'seconds': 0, 'weeks': 0, 'years': 0}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in modelDescription['streamDef']:
for key in aggregationPeriod.keys():
if key in modelDescription['streamDef']['aggregation']:
aggregationPeriod[key] = modelDescription['streamDef']['aggregation'][key] # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
if 'fields' in modelDescription['streamDef']['aggregation']:
for (fieldName, func) in modelDescription['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']]
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
streamDef = copy.deepcopy(modelDescription['streamDef'])
streamDef['aggregation'] = copy.deepcopy(aggregationInfo)
return streamDef |
def _build_config(config_cls, property_path=None):
""" Builds the schema definition for a given config class.
:param class config_cls: The config class to build a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:raises ValueError: When the given ``config_cls`` is not a config decorated class
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
if not is_config_type(config_cls):
raise ValueError(f"class {config_cls!r} is not a config class")
schema = {"type": "object", "required": [], "properties": {}}
cls_entry = getattr(config_cls, CONFIG_KEY)
# add schema title, defaults to config classes `__qualname__`
schema_title = cls_entry.get("title", config_cls.__qualname__)
if isinstance(schema_title, str):
schema["title"] = schema_title
schema_description = cls_entry.get("description")
if isinstance(schema_description, str):
schema["description"] = schema_description
# if the length of the property path is 0, assume that current object is root
if len(property_path) <= 0:
schema["$id"] = f"{config_cls.__qualname__}.json"
# NOTE: requires draft-07 for typing.Union type schema generation
schema["$schema"] = "http://json-schema.org/draft-07/schema#"
else:
schema["$id"] = f"#/{'/'.join(property_path)}"
property_path.append("properties")
for var in attr.fields(config_cls):
if not is_config_var(var):
# encountered attribute is not a serialized field (i.e. missing CONFIG_KEY)
continue
entry = var.metadata[CONFIG_KEY]
var_name = entry.name if entry.name else var.name
if entry.required:
schema["required"].append(var_name)
if is_config_type(var.type):
schema["properties"][var_name] = _build_config(
var.type, property_path=property_path + [var_name]
)
else:
schema["properties"][var_name] = _build_var(
var, property_path=property_path
)
return schema | def function[_build_config, parameter[config_cls, property_path]]:
constant[ Builds the schema definition for a given config class.
:param class config_cls: The config class to build a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:raises ValueError: When the given ``config_cls`` is not a config decorated class
:return: The built schema definition
:rtype: Dict[str, Any]
]
if <ast.UnaryOp object at 0x7da1b0e3bf10> begin[:]
variable[property_path] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da1b0e38a30> begin[:]
<ast.Raise object at 0x7da1b0e399f0>
variable[schema] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e3b250>, <ast.Constant object at 0x7da1b0e39060>, <ast.Constant object at 0x7da1b0e3b4c0>], [<ast.Constant object at 0x7da1b0e3a470>, <ast.List object at 0x7da1b0e3a440>, <ast.Dict object at 0x7da1b0e3ab60>]]
variable[cls_entry] assign[=] call[name[getattr], parameter[name[config_cls], name[CONFIG_KEY]]]
variable[schema_title] assign[=] call[name[cls_entry].get, parameter[constant[title], name[config_cls].__qualname__]]
if call[name[isinstance], parameter[name[schema_title], name[str]]] begin[:]
call[name[schema]][constant[title]] assign[=] name[schema_title]
variable[schema_description] assign[=] call[name[cls_entry].get, parameter[constant[description]]]
if call[name[isinstance], parameter[name[schema_description], name[str]]] begin[:]
call[name[schema]][constant[description]] assign[=] name[schema_description]
if compare[call[name[len], parameter[name[property_path]]] less_or_equal[<=] constant[0]] begin[:]
call[name[schema]][constant[$id]] assign[=] <ast.JoinedStr object at 0x7da2045664d0>
call[name[schema]][constant[$schema]] assign[=] constant[http://json-schema.org/draft-07/schema#]
call[name[property_path].append, parameter[constant[properties]]]
for taget[name[var]] in starred[call[name[attr].fields, parameter[name[config_cls]]]] begin[:]
if <ast.UnaryOp object at 0x7da204566c80> begin[:]
continue
variable[entry] assign[=] call[name[var].metadata][name[CONFIG_KEY]]
variable[var_name] assign[=] <ast.IfExp object at 0x7da204566470>
if name[entry].required begin[:]
call[call[name[schema]][constant[required]].append, parameter[name[var_name]]]
if call[name[is_config_type], parameter[name[var].type]] begin[:]
call[call[name[schema]][constant[properties]]][name[var_name]] assign[=] call[name[_build_config], parameter[name[var].type]]
return[name[schema]] | keyword[def] identifier[_build_config] ( identifier[config_cls] , identifier[property_path] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[property_path] :
identifier[property_path] =[]
keyword[if] keyword[not] identifier[is_config_type] ( identifier[config_cls] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[schema] ={ literal[string] : literal[string] , literal[string] :[], literal[string] :{}}
identifier[cls_entry] = identifier[getattr] ( identifier[config_cls] , identifier[CONFIG_KEY] )
identifier[schema_title] = identifier[cls_entry] . identifier[get] ( literal[string] , identifier[config_cls] . identifier[__qualname__] )
keyword[if] identifier[isinstance] ( identifier[schema_title] , identifier[str] ):
identifier[schema] [ literal[string] ]= identifier[schema_title]
identifier[schema_description] = identifier[cls_entry] . identifier[get] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[schema_description] , identifier[str] ):
identifier[schema] [ literal[string] ]= identifier[schema_description]
keyword[if] identifier[len] ( identifier[property_path] )<= literal[int] :
identifier[schema] [ literal[string] ]= literal[string]
identifier[schema] [ literal[string] ]= literal[string]
keyword[else] :
identifier[schema] [ literal[string] ]= literal[string]
identifier[property_path] . identifier[append] ( literal[string] )
keyword[for] identifier[var] keyword[in] identifier[attr] . identifier[fields] ( identifier[config_cls] ):
keyword[if] keyword[not] identifier[is_config_var] ( identifier[var] ):
keyword[continue]
identifier[entry] = identifier[var] . identifier[metadata] [ identifier[CONFIG_KEY] ]
identifier[var_name] = identifier[entry] . identifier[name] keyword[if] identifier[entry] . identifier[name] keyword[else] identifier[var] . identifier[name]
keyword[if] identifier[entry] . identifier[required] :
identifier[schema] [ literal[string] ]. identifier[append] ( identifier[var_name] )
keyword[if] identifier[is_config_type] ( identifier[var] . identifier[type] ):
identifier[schema] [ literal[string] ][ identifier[var_name] ]= identifier[_build_config] (
identifier[var] . identifier[type] , identifier[property_path] = identifier[property_path] +[ identifier[var_name] ]
)
keyword[else] :
identifier[schema] [ literal[string] ][ identifier[var_name] ]= identifier[_build_var] (
identifier[var] , identifier[property_path] = identifier[property_path]
)
keyword[return] identifier[schema] | def _build_config(config_cls, property_path=None):
""" Builds the schema definition for a given config class.
:param class config_cls: The config class to build a schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:raises ValueError: When the given ``config_cls`` is not a config decorated class
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = [] # depends on [control=['if'], data=[]]
if not is_config_type(config_cls):
raise ValueError(f'class {config_cls!r} is not a config class') # depends on [control=['if'], data=[]]
schema = {'type': 'object', 'required': [], 'properties': {}}
cls_entry = getattr(config_cls, CONFIG_KEY)
# add schema title, defaults to config classes `__qualname__`
schema_title = cls_entry.get('title', config_cls.__qualname__)
if isinstance(schema_title, str):
schema['title'] = schema_title # depends on [control=['if'], data=[]]
schema_description = cls_entry.get('description')
if isinstance(schema_description, str):
schema['description'] = schema_description # depends on [control=['if'], data=[]]
# if the length of the property path is 0, assume that current object is root
if len(property_path) <= 0:
schema['$id'] = f'{config_cls.__qualname__}.json'
# NOTE: requires draft-07 for typing.Union type schema generation
schema['$schema'] = 'http://json-schema.org/draft-07/schema#' # depends on [control=['if'], data=[]]
else:
schema['$id'] = f"#/{'/'.join(property_path)}"
property_path.append('properties')
for var in attr.fields(config_cls):
if not is_config_var(var):
# encountered attribute is not a serialized field (i.e. missing CONFIG_KEY)
continue # depends on [control=['if'], data=[]]
entry = var.metadata[CONFIG_KEY]
var_name = entry.name if entry.name else var.name
if entry.required:
schema['required'].append(var_name) # depends on [control=['if'], data=[]]
if is_config_type(var.type):
schema['properties'][var_name] = _build_config(var.type, property_path=property_path + [var_name]) # depends on [control=['if'], data=[]]
else:
schema['properties'][var_name] = _build_var(var, property_path=property_path) # depends on [control=['for'], data=['var']]
return schema |
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
return self._fsapfs_file_entry.read(size=size) | def function[read, parameter[self, size]]:
constant[Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
]
if <ast.UnaryOp object at 0x7da1b065abf0> begin[:]
<ast.Raise object at 0x7da1b06583d0>
return[call[name[self]._fsapfs_file_entry.read, parameter[]]] | keyword[def] identifier[read] ( identifier[self] , identifier[size] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_is_open] :
keyword[raise] identifier[IOError] ( literal[string] )
keyword[return] identifier[self] . identifier[_fsapfs_file_entry] . identifier[read] ( identifier[size] = identifier[size] ) | def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.') # depends on [control=['if'], data=[]]
return self._fsapfs_file_entry.read(size=size) |
def export_posterior_probability(self, filename, title="Posterior Probability"):
"""
Writes the posterior probability of read origin
:param filename: File name for output
:param title: The title of the posterior probability matrix
:return: Nothing but the method writes a file in EMASE format (PyTables)
"""
self.probability.save(h5file=filename, title=title) | def function[export_posterior_probability, parameter[self, filename, title]]:
constant[
Writes the posterior probability of read origin
:param filename: File name for output
:param title: The title of the posterior probability matrix
:return: Nothing but the method writes a file in EMASE format (PyTables)
]
call[name[self].probability.save, parameter[]] | keyword[def] identifier[export_posterior_probability] ( identifier[self] , identifier[filename] , identifier[title] = literal[string] ):
literal[string]
identifier[self] . identifier[probability] . identifier[save] ( identifier[h5file] = identifier[filename] , identifier[title] = identifier[title] ) | def export_posterior_probability(self, filename, title='Posterior Probability'):
"""
Writes the posterior probability of read origin
:param filename: File name for output
:param title: The title of the posterior probability matrix
:return: Nothing but the method writes a file in EMASE format (PyTables)
"""
self.probability.save(h5file=filename, title=title) |
def assign_properties(thing):
"""Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
"""
for p in range(5):
property_name = "property" + str(p + 1)
property = request_parameter(parameter=property_name, optional=True)
if property:
setattr(thing, property_name, property)
session.commit() | def function[assign_properties, parameter[thing]]:
constant[Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
]
for taget[name[p]] in starred[call[name[range], parameter[constant[5]]]] begin[:]
variable[property_name] assign[=] binary_operation[constant[property] + call[name[str], parameter[binary_operation[name[p] + constant[1]]]]]
variable[property] assign[=] call[name[request_parameter], parameter[]]
if name[property] begin[:]
call[name[setattr], parameter[name[thing], name[property_name], name[property]]]
call[name[session].commit, parameter[]] | keyword[def] identifier[assign_properties] ( identifier[thing] ):
literal[string]
keyword[for] identifier[p] keyword[in] identifier[range] ( literal[int] ):
identifier[property_name] = literal[string] + identifier[str] ( identifier[p] + literal[int] )
identifier[property] = identifier[request_parameter] ( identifier[parameter] = identifier[property_name] , identifier[optional] = keyword[True] )
keyword[if] identifier[property] :
identifier[setattr] ( identifier[thing] , identifier[property_name] , identifier[property] )
identifier[session] . identifier[commit] () | def assign_properties(thing):
"""Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
"""
for p in range(5):
property_name = 'property' + str(p + 1)
property = request_parameter(parameter=property_name, optional=True)
if property:
setattr(thing, property_name, property) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
session.commit() |
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master) | def function[connect_master, parameter[self, failed]]:
constant[
Return a future which will complete when you are connected to a master
]
<ast.Tuple object at 0x7da1b21ebb80> assign[=] <ast.Yield object at 0x7da1b21eb0a0>
<ast.Yield object at 0x7da1b21e80d0> | keyword[def] identifier[connect_master] ( identifier[self] , identifier[failed] = keyword[False] ):
literal[string]
identifier[master] , identifier[self] . identifier[pub_channel] = keyword[yield] identifier[self] . identifier[eval_master] ( identifier[self] . identifier[opts] , identifier[self] . identifier[timeout] , identifier[self] . identifier[safe] , identifier[failed] )
keyword[yield] identifier[self] . identifier[_post_master_init] ( identifier[master] ) | def connect_master(self, failed=False):
"""
Return a future which will complete when you are connected to a master
"""
(master, self.pub_channel) = (yield self.eval_master(self.opts, self.timeout, self.safe, failed))
yield self._post_master_init(master) |
def write_libraries(dir, libraries):
"""Write a list of libraries to disk.
Args:
dir: Output directory.
libraries: List of (filename, library) pairs.
"""
files = [open(os.path.join(dir, k), "w") for k, _ in libraries]
# Document mentioned symbols for all libraries
for f, (_, v) in zip(files, libraries):
v.write_markdown_to_file(f)
# Document symbols that no library mentioned. We do this after writing
# out all libraries so that earlier libraries know what later libraries
# documented.
for f, (_, v) in zip(files, libraries):
v.write_other_members(f)
f.close() | def function[write_libraries, parameter[dir, libraries]]:
constant[Write a list of libraries to disk.
Args:
dir: Output directory.
libraries: List of (filename, library) pairs.
]
variable[files] assign[=] <ast.ListComp object at 0x7da1b117bdc0>
for taget[tuple[[<ast.Name object at 0x7da1b1178ca0>, <ast.Tuple object at 0x7da1b1179d50>]]] in starred[call[name[zip], parameter[name[files], name[libraries]]]] begin[:]
call[name[v].write_markdown_to_file, parameter[name[f]]]
for taget[tuple[[<ast.Name object at 0x7da1b1179d20>, <ast.Tuple object at 0x7da1b11794e0>]]] in starred[call[name[zip], parameter[name[files], name[libraries]]]] begin[:]
call[name[v].write_other_members, parameter[name[f]]]
call[name[f].close, parameter[]] | keyword[def] identifier[write_libraries] ( identifier[dir] , identifier[libraries] ):
literal[string]
identifier[files] =[ identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dir] , identifier[k] ), literal[string] ) keyword[for] identifier[k] , identifier[_] keyword[in] identifier[libraries] ]
keyword[for] identifier[f] ,( identifier[_] , identifier[v] ) keyword[in] identifier[zip] ( identifier[files] , identifier[libraries] ):
identifier[v] . identifier[write_markdown_to_file] ( identifier[f] )
keyword[for] identifier[f] ,( identifier[_] , identifier[v] ) keyword[in] identifier[zip] ( identifier[files] , identifier[libraries] ):
identifier[v] . identifier[write_other_members] ( identifier[f] )
identifier[f] . identifier[close] () | def write_libraries(dir, libraries):
"""Write a list of libraries to disk.
Args:
dir: Output directory.
libraries: List of (filename, library) pairs.
"""
files = [open(os.path.join(dir, k), 'w') for (k, _) in libraries]
# Document mentioned symbols for all libraries
for (f, (_, v)) in zip(files, libraries):
v.write_markdown_to_file(f) # depends on [control=['for'], data=[]]
# Document symbols that no library mentioned. We do this after writing
# out all libraries so that earlier libraries know what later libraries
# documented.
for (f, (_, v)) in zip(files, libraries):
v.write_other_members(f)
f.close() # depends on [control=['for'], data=[]] |
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors) | def function[create_ustar_header, parameter[self, info, encoding, errors]]:
constant[Return the object as a ustar header block.
]
call[name[info]][constant[magic]] assign[=] name[POSIX_MAGIC]
if compare[call[name[len], parameter[call[name[info]][constant[linkname]]]] greater[>] name[LENGTH_LINK]] begin[:]
<ast.Raise object at 0x7da18bcc9690>
if compare[call[name[len], parameter[call[name[info]][constant[name]]]] greater[>] name[LENGTH_NAME]] begin[:]
<ast.Tuple object at 0x7da18bcc92a0> assign[=] call[name[self]._posix_split_name, parameter[call[name[info]][constant[name]]]]
return[call[name[self]._create_header, parameter[name[info], name[USTAR_FORMAT], name[encoding], name[errors]]]] | keyword[def] identifier[create_ustar_header] ( identifier[self] , identifier[info] , identifier[encoding] , identifier[errors] ):
literal[string]
identifier[info] [ literal[string] ]= identifier[POSIX_MAGIC]
keyword[if] identifier[len] ( identifier[info] [ literal[string] ])> identifier[LENGTH_LINK] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[info] [ literal[string] ])> identifier[LENGTH_NAME] :
identifier[info] [ literal[string] ], identifier[info] [ literal[string] ]= identifier[self] . identifier[_posix_split_name] ( identifier[info] [ literal[string] ])
keyword[return] identifier[self] . identifier[_create_header] ( identifier[info] , identifier[USTAR_FORMAT] , identifier[encoding] , identifier[errors] ) | def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info['magic'] = POSIX_MAGIC
if len(info['linkname']) > LENGTH_LINK:
raise ValueError('linkname is too long') # depends on [control=['if'], data=[]]
if len(info['name']) > LENGTH_NAME:
(info['prefix'], info['name']) = self._posix_split_name(info['name']) # depends on [control=['if'], data=[]]
return self._create_header(info, USTAR_FORMAT, encoding, errors) |
def _densMoments_approx_higherorder_gaussxpolyInts(self,ll,ul,maxj):
"""Calculate all of the polynomial x Gaussian integrals occuring
in the higher-order terms, recursively"""
gaussxpolyInt= numpy.zeros((maxj,len(ul)))
gaussxpolyInt[-1]= 1./numpy.sqrt(numpy.pi)\
*(numpy.exp(-ll**2.)-numpy.exp(-ul**2.))
gaussxpolyInt[-2]= 1./numpy.sqrt(numpy.pi)\
*(numpy.exp(-ll**2.)*ll-numpy.exp(-ul**2.)*ul)\
+0.5*(special.erf(ul)-special.erf(ll))
for jj in range(maxj-2):
gaussxpolyInt[-jj-3]= 1./numpy.sqrt(numpy.pi)\
*(numpy.exp(-ll**2.)*ll**(jj+2)-numpy.exp(-ul**2.)*ul**(jj+2))\
+0.5*(jj+2)*gaussxpolyInt[-jj-1]
return gaussxpolyInt | def function[_densMoments_approx_higherorder_gaussxpolyInts, parameter[self, ll, ul, maxj]]:
constant[Calculate all of the polynomial x Gaussian integrals occuring
in the higher-order terms, recursively]
variable[gaussxpolyInt] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Name object at 0x7da1b0ea7be0>, <ast.Call object at 0x7da1b0ea7d30>]]]]
call[name[gaussxpolyInt]][<ast.UnaryOp object at 0x7da1b0ea5f00>] assign[=] binary_operation[binary_operation[constant[1.0] / call[name[numpy].sqrt, parameter[name[numpy].pi]]] * binary_operation[call[name[numpy].exp, parameter[<ast.UnaryOp object at 0x7da1b0ea74c0>]] - call[name[numpy].exp, parameter[<ast.UnaryOp object at 0x7da1b0ea76d0>]]]]
call[name[gaussxpolyInt]][<ast.UnaryOp object at 0x7da1b0ea7cd0>] assign[=] binary_operation[binary_operation[binary_operation[constant[1.0] / call[name[numpy].sqrt, parameter[name[numpy].pi]]] * binary_operation[binary_operation[call[name[numpy].exp, parameter[<ast.UnaryOp object at 0x7da1b0ea5210>]] * name[ll]] - binary_operation[call[name[numpy].exp, parameter[<ast.UnaryOp object at 0x7da1b0ea6ce0>]] * name[ul]]]] + binary_operation[constant[0.5] * binary_operation[call[name[special].erf, parameter[name[ul]]] - call[name[special].erf, parameter[name[ll]]]]]]
for taget[name[jj]] in starred[call[name[range], parameter[binary_operation[name[maxj] - constant[2]]]]] begin[:]
call[name[gaussxpolyInt]][binary_operation[<ast.UnaryOp object at 0x7da1b0ea4730> - constant[3]]] assign[=] binary_operation[binary_operation[binary_operation[constant[1.0] / call[name[numpy].sqrt, parameter[name[numpy].pi]]] * binary_operation[binary_operation[call[name[numpy].exp, parameter[<ast.UnaryOp object at 0x7da1b0ea63e0>]] * binary_operation[name[ll] ** binary_operation[name[jj] + constant[2]]]] - binary_operation[call[name[numpy].exp, parameter[<ast.UnaryOp object at 0x7da18bcca680>]] * binary_operation[name[ul] ** binary_operation[name[jj] + constant[2]]]]]] + binary_operation[binary_operation[constant[0.5] * binary_operation[name[jj] + constant[2]]] * call[name[gaussxpolyInt]][binary_operation[<ast.UnaryOp object at 0x7da18bccb010> - constant[1]]]]]
return[name[gaussxpolyInt]] | keyword[def] identifier[_densMoments_approx_higherorder_gaussxpolyInts] ( identifier[self] , identifier[ll] , identifier[ul] , identifier[maxj] ):
literal[string]
identifier[gaussxpolyInt] = identifier[numpy] . identifier[zeros] (( identifier[maxj] , identifier[len] ( identifier[ul] )))
identifier[gaussxpolyInt] [- literal[int] ]= literal[int] / identifier[numpy] . identifier[sqrt] ( identifier[numpy] . identifier[pi] )*( identifier[numpy] . identifier[exp] (- identifier[ll] ** literal[int] )- identifier[numpy] . identifier[exp] (- identifier[ul] ** literal[int] ))
identifier[gaussxpolyInt] [- literal[int] ]= literal[int] / identifier[numpy] . identifier[sqrt] ( identifier[numpy] . identifier[pi] )*( identifier[numpy] . identifier[exp] (- identifier[ll] ** literal[int] )* identifier[ll] - identifier[numpy] . identifier[exp] (- identifier[ul] ** literal[int] )* identifier[ul] )+ literal[int] *( identifier[special] . identifier[erf] ( identifier[ul] )- identifier[special] . identifier[erf] ( identifier[ll] ))
keyword[for] identifier[jj] keyword[in] identifier[range] ( identifier[maxj] - literal[int] ):
identifier[gaussxpolyInt] [- identifier[jj] - literal[int] ]= literal[int] / identifier[numpy] . identifier[sqrt] ( identifier[numpy] . identifier[pi] )*( identifier[numpy] . identifier[exp] (- identifier[ll] ** literal[int] )* identifier[ll] **( identifier[jj] + literal[int] )- identifier[numpy] . identifier[exp] (- identifier[ul] ** literal[int] )* identifier[ul] **( identifier[jj] + literal[int] ))+ literal[int] *( identifier[jj] + literal[int] )* identifier[gaussxpolyInt] [- identifier[jj] - literal[int] ]
keyword[return] identifier[gaussxpolyInt] | def _densMoments_approx_higherorder_gaussxpolyInts(self, ll, ul, maxj):
"""Calculate all of the polynomial x Gaussian integrals occuring
in the higher-order terms, recursively"""
gaussxpolyInt = numpy.zeros((maxj, len(ul)))
gaussxpolyInt[-1] = 1.0 / numpy.sqrt(numpy.pi) * (numpy.exp(-ll ** 2.0) - numpy.exp(-ul ** 2.0))
gaussxpolyInt[-2] = 1.0 / numpy.sqrt(numpy.pi) * (numpy.exp(-ll ** 2.0) * ll - numpy.exp(-ul ** 2.0) * ul) + 0.5 * (special.erf(ul) - special.erf(ll))
for jj in range(maxj - 2):
gaussxpolyInt[-jj - 3] = 1.0 / numpy.sqrt(numpy.pi) * (numpy.exp(-ll ** 2.0) * ll ** (jj + 2) - numpy.exp(-ul ** 2.0) * ul ** (jj + 2)) + 0.5 * (jj + 2) * gaussxpolyInt[-jj - 1] # depends on [control=['for'], data=['jj']]
return gaussxpolyInt |
def _handle_response(self, response):
"""
Handles the response received from Scrapyd.
"""
if not response.ok:
raise ScrapydResponseError(
"Scrapyd returned a {0} error: {1}".format(
response.status_code,
response.text))
try:
json = response.json()
except ValueError:
raise ScrapydResponseError("Scrapyd returned an invalid JSON "
"response: {0}".format(response.text))
if json['status'] == 'ok':
json.pop('status')
return json
elif json['status'] == 'error':
raise ScrapydResponseError(json['message']) | def function[_handle_response, parameter[self, response]]:
constant[
Handles the response received from Scrapyd.
]
if <ast.UnaryOp object at 0x7da1b063c250> begin[:]
<ast.Raise object at 0x7da1b063e410>
<ast.Try object at 0x7da1b063e440>
if compare[call[name[json]][constant[status]] equal[==] constant[ok]] begin[:]
call[name[json].pop, parameter[constant[status]]]
return[name[json]] | keyword[def] identifier[_handle_response] ( identifier[self] , identifier[response] ):
literal[string]
keyword[if] keyword[not] identifier[response] . identifier[ok] :
keyword[raise] identifier[ScrapydResponseError] (
literal[string] . identifier[format] (
identifier[response] . identifier[status_code] ,
identifier[response] . identifier[text] ))
keyword[try] :
identifier[json] = identifier[response] . identifier[json] ()
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ScrapydResponseError] ( literal[string]
literal[string] . identifier[format] ( identifier[response] . identifier[text] ))
keyword[if] identifier[json] [ literal[string] ]== literal[string] :
identifier[json] . identifier[pop] ( literal[string] )
keyword[return] identifier[json]
keyword[elif] identifier[json] [ literal[string] ]== literal[string] :
keyword[raise] identifier[ScrapydResponseError] ( identifier[json] [ literal[string] ]) | def _handle_response(self, response):
"""
Handles the response received from Scrapyd.
"""
if not response.ok:
raise ScrapydResponseError('Scrapyd returned a {0} error: {1}'.format(response.status_code, response.text)) # depends on [control=['if'], data=[]]
try:
json = response.json() # depends on [control=['try'], data=[]]
except ValueError:
raise ScrapydResponseError('Scrapyd returned an invalid JSON response: {0}'.format(response.text)) # depends on [control=['except'], data=[]]
if json['status'] == 'ok':
json.pop('status')
return json # depends on [control=['if'], data=[]]
elif json['status'] == 'error':
raise ScrapydResponseError(json['message']) # depends on [control=['if'], data=[]] |
def _parseIntegerArgument(args, key, defaultValue):
"""
Attempts to parse the specified key in the specified argument
dictionary into an integer. If the argument cannot be parsed,
raises a BadRequestIntegerException. If the key is not present,
return the specified default value.
"""
ret = defaultValue
try:
if key in args:
try:
ret = int(args[key])
except ValueError:
raise exceptions.BadRequestIntegerException(key, args[key])
except TypeError:
raise Exception((key, args))
return ret | def function[_parseIntegerArgument, parameter[args, key, defaultValue]]:
constant[
Attempts to parse the specified key in the specified argument
dictionary into an integer. If the argument cannot be parsed,
raises a BadRequestIntegerException. If the key is not present,
return the specified default value.
]
variable[ret] assign[=] name[defaultValue]
<ast.Try object at 0x7da18ede5ed0>
return[name[ret]] | keyword[def] identifier[_parseIntegerArgument] ( identifier[args] , identifier[key] , identifier[defaultValue] ):
literal[string]
identifier[ret] = identifier[defaultValue]
keyword[try] :
keyword[if] identifier[key] keyword[in] identifier[args] :
keyword[try] :
identifier[ret] = identifier[int] ( identifier[args] [ identifier[key] ])
keyword[except] identifier[ValueError] :
keyword[raise] identifier[exceptions] . identifier[BadRequestIntegerException] ( identifier[key] , identifier[args] [ identifier[key] ])
keyword[except] identifier[TypeError] :
keyword[raise] identifier[Exception] (( identifier[key] , identifier[args] ))
keyword[return] identifier[ret] | def _parseIntegerArgument(args, key, defaultValue):
"""
Attempts to parse the specified key in the specified argument
dictionary into an integer. If the argument cannot be parsed,
raises a BadRequestIntegerException. If the key is not present,
return the specified default value.
"""
ret = defaultValue
try:
if key in args:
try:
ret = int(args[key]) # depends on [control=['try'], data=[]]
except ValueError:
raise exceptions.BadRequestIntegerException(key, args[key]) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['key', 'args']] # depends on [control=['try'], data=[]]
except TypeError:
raise Exception((key, args)) # depends on [control=['except'], data=[]]
return ret |
def generate_code_cover(self):
"""
Generate a list of all recovered basic blocks.
"""
lst = []
for cfg_node in self.graph.nodes():
size = cfg_node.size
lst.append((cfg_node.addr, size))
lst = sorted(lst, key=lambda x: x[0])
return lst | def function[generate_code_cover, parameter[self]]:
constant[
Generate a list of all recovered basic blocks.
]
variable[lst] assign[=] list[[]]
for taget[name[cfg_node]] in starred[call[name[self].graph.nodes, parameter[]]] begin[:]
variable[size] assign[=] name[cfg_node].size
call[name[lst].append, parameter[tuple[[<ast.Attribute object at 0x7da18dc9bd60>, <ast.Name object at 0x7da18dc99840>]]]]
variable[lst] assign[=] call[name[sorted], parameter[name[lst]]]
return[name[lst]] | keyword[def] identifier[generate_code_cover] ( identifier[self] ):
literal[string]
identifier[lst] =[]
keyword[for] identifier[cfg_node] keyword[in] identifier[self] . identifier[graph] . identifier[nodes] ():
identifier[size] = identifier[cfg_node] . identifier[size]
identifier[lst] . identifier[append] (( identifier[cfg_node] . identifier[addr] , identifier[size] ))
identifier[lst] = identifier[sorted] ( identifier[lst] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])
keyword[return] identifier[lst] | def generate_code_cover(self):
"""
Generate a list of all recovered basic blocks.
"""
lst = []
for cfg_node in self.graph.nodes():
size = cfg_node.size
lst.append((cfg_node.addr, size)) # depends on [control=['for'], data=['cfg_node']]
lst = sorted(lst, key=lambda x: x[0])
return lst |
def load(self, pkcs11dll_filename=None, *init_string):
"""
load a PKCS#11 library
:type pkcs11dll_filename: string
:param pkcs11dll_filename: the library name.
If this parameter is not set then the environment variable
`PYKCS11LIB` is used instead
:returns: a :class:`PyKCS11Lib` object
:raises: :class:`PyKCS11Error` (-1): when the load fails
"""
if pkcs11dll_filename is None:
pkcs11dll_filename = os.getenv("PYKCS11LIB")
if pkcs11dll_filename is None:
raise PyKCS11Error(-1, "No PKCS11 library specified (set PYKCS11LIB env variable)")
rv = self.lib.Load(pkcs11dll_filename)
if rv == 0:
raise PyKCS11Error(-1, pkcs11dll_filename) | def function[load, parameter[self, pkcs11dll_filename]]:
constant[
load a PKCS#11 library
:type pkcs11dll_filename: string
:param pkcs11dll_filename: the library name.
If this parameter is not set then the environment variable
`PYKCS11LIB` is used instead
:returns: a :class:`PyKCS11Lib` object
:raises: :class:`PyKCS11Error` (-1): when the load fails
]
if compare[name[pkcs11dll_filename] is constant[None]] begin[:]
variable[pkcs11dll_filename] assign[=] call[name[os].getenv, parameter[constant[PYKCS11LIB]]]
if compare[name[pkcs11dll_filename] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b2347e80>
variable[rv] assign[=] call[name[self].lib.Load, parameter[name[pkcs11dll_filename]]]
if compare[name[rv] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b2347220> | keyword[def] identifier[load] ( identifier[self] , identifier[pkcs11dll_filename] = keyword[None] ,* identifier[init_string] ):
literal[string]
keyword[if] identifier[pkcs11dll_filename] keyword[is] keyword[None] :
identifier[pkcs11dll_filename] = identifier[os] . identifier[getenv] ( literal[string] )
keyword[if] identifier[pkcs11dll_filename] keyword[is] keyword[None] :
keyword[raise] identifier[PyKCS11Error] (- literal[int] , literal[string] )
identifier[rv] = identifier[self] . identifier[lib] . identifier[Load] ( identifier[pkcs11dll_filename] )
keyword[if] identifier[rv] == literal[int] :
keyword[raise] identifier[PyKCS11Error] (- literal[int] , identifier[pkcs11dll_filename] ) | def load(self, pkcs11dll_filename=None, *init_string):
"""
load a PKCS#11 library
:type pkcs11dll_filename: string
:param pkcs11dll_filename: the library name.
If this parameter is not set then the environment variable
`PYKCS11LIB` is used instead
:returns: a :class:`PyKCS11Lib` object
:raises: :class:`PyKCS11Error` (-1): when the load fails
"""
if pkcs11dll_filename is None:
pkcs11dll_filename = os.getenv('PYKCS11LIB')
if pkcs11dll_filename is None:
raise PyKCS11Error(-1, 'No PKCS11 library specified (set PYKCS11LIB env variable)') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['pkcs11dll_filename']]
rv = self.lib.Load(pkcs11dll_filename)
if rv == 0:
raise PyKCS11Error(-1, pkcs11dll_filename) # depends on [control=['if'], data=[]] |
def has_duplicate_min(x):
"""
Checks if the minimal value of x is observed more than once
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.sum(x == np.min(x)) >= 2 | def function[has_duplicate_min, parameter[x]]:
constant[
Checks if the minimal value of x is observed more than once
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
]
if <ast.UnaryOp object at 0x7da18f810760> begin[:]
variable[x] assign[=] call[name[np].asarray, parameter[name[x]]]
return[compare[call[name[np].sum, parameter[compare[name[x] equal[==] call[name[np].min, parameter[name[x]]]]]] greater_or_equal[>=] constant[2]]] | keyword[def] identifier[has_duplicate_min] ( identifier[x] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[x] ,( identifier[np] . identifier[ndarray] , identifier[pd] . identifier[Series] )):
identifier[x] = identifier[np] . identifier[asarray] ( identifier[x] )
keyword[return] identifier[np] . identifier[sum] ( identifier[x] == identifier[np] . identifier[min] ( identifier[x] ))>= literal[int] | def has_duplicate_min(x):
"""
Checks if the minimal value of x is observed more than once
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x) # depends on [control=['if'], data=[]]
return np.sum(x == np.min(x)) >= 2 |
def Sine(x, a, omega, phi, y0):
"""Sine function
Inputs:
-------
``x``: independent variable
``a``: amplitude
``omega``: circular frequency
``phi``: phase
``y0``: offset
Formula:
--------
``a*sin(x*omega + phi)+y0``
"""
return a * np.sin(x * omega + phi) + y0 | def function[Sine, parameter[x, a, omega, phi, y0]]:
constant[Sine function
Inputs:
-------
``x``: independent variable
``a``: amplitude
``omega``: circular frequency
``phi``: phase
``y0``: offset
Formula:
--------
``a*sin(x*omega + phi)+y0``
]
return[binary_operation[binary_operation[name[a] * call[name[np].sin, parameter[binary_operation[binary_operation[name[x] * name[omega]] + name[phi]]]]] + name[y0]]] | keyword[def] identifier[Sine] ( identifier[x] , identifier[a] , identifier[omega] , identifier[phi] , identifier[y0] ):
literal[string]
keyword[return] identifier[a] * identifier[np] . identifier[sin] ( identifier[x] * identifier[omega] + identifier[phi] )+ identifier[y0] | def Sine(x, a, omega, phi, y0):
"""Sine function
Inputs:
-------
``x``: independent variable
``a``: amplitude
``omega``: circular frequency
``phi``: phase
``y0``: offset
Formula:
--------
``a*sin(x*omega + phi)+y0``
"""
return a * np.sin(x * omega + phi) + y0 |
def sum_string(amount, gender, items=None):
"""
Get sum in words
@param amount: amount of objects
@type amount: C{integer types}
@param gender: gender of object (MALE, FEMALE or NEUTER)
@type gender: C{int}
@param items: variants of object in three forms:
for one object, for two objects and for five objects
@type items: 3-element C{sequence} of C{unicode} or
just C{unicode} (three variants with delimeter ',')
@return: in-words representation objects' amount
@rtype: C{unicode}
@raise ValueError: items isn't 3-element C{sequence} or C{unicode}
@raise ValueError: amount bigger than 10**11
@raise ValueError: amount is negative
"""
if isinstance(items, six.text_type):
items = split_values(items)
if items is None:
items = (u"", u"", u"")
try:
one_item, two_items, five_items = items
except ValueError:
raise ValueError("Items must be 3-element sequence")
check_positive(amount)
if amount == 0:
if five_items:
return u"ноль %s" % five_items
else:
return u"ноль"
into = u''
tmp_val = amount
# единицы
into, tmp_val = _sum_string_fn(into, tmp_val, gender, items)
# тысячи
into, tmp_val = _sum_string_fn(into, tmp_val, FEMALE,
(u"тысяча", u"тысячи", u"тысяч"))
# миллионы
into, tmp_val = _sum_string_fn(into, tmp_val, MALE,
(u"миллион", u"миллиона", u"миллионов"))
# миллиарды
into, tmp_val = _sum_string_fn(into, tmp_val, MALE,
(u"миллиард", u"миллиарда", u"миллиардов"))
if tmp_val == 0:
return into
else:
raise ValueError("Cannot operand with numbers bigger than 10**11") | def function[sum_string, parameter[amount, gender, items]]:
constant[
Get sum in words
@param amount: amount of objects
@type amount: C{integer types}
@param gender: gender of object (MALE, FEMALE or NEUTER)
@type gender: C{int}
@param items: variants of object in three forms:
for one object, for two objects and for five objects
@type items: 3-element C{sequence} of C{unicode} or
just C{unicode} (three variants with delimeter ',')
@return: in-words representation objects' amount
@rtype: C{unicode}
@raise ValueError: items isn't 3-element C{sequence} or C{unicode}
@raise ValueError: amount bigger than 10**11
@raise ValueError: amount is negative
]
if call[name[isinstance], parameter[name[items], name[six].text_type]] begin[:]
variable[items] assign[=] call[name[split_values], parameter[name[items]]]
if compare[name[items] is constant[None]] begin[:]
variable[items] assign[=] tuple[[<ast.Constant object at 0x7da1b0d20520>, <ast.Constant object at 0x7da1b0d21c00>, <ast.Constant object at 0x7da1b0d22710>]]
<ast.Try object at 0x7da1b0d22020>
call[name[check_positive], parameter[name[amount]]]
if compare[name[amount] equal[==] constant[0]] begin[:]
if name[five_items] begin[:]
return[binary_operation[constant[ноль %s] <ast.Mod object at 0x7da2590d6920> name[five_items]]]
variable[into] assign[=] constant[]
variable[tmp_val] assign[=] name[amount]
<ast.Tuple object at 0x7da1b0d226e0> assign[=] call[name[_sum_string_fn], parameter[name[into], name[tmp_val], name[gender], name[items]]]
<ast.Tuple object at 0x7da1b0d20880> assign[=] call[name[_sum_string_fn], parameter[name[into], name[tmp_val], name[FEMALE], tuple[[<ast.Constant object at 0x7da1b0d208b0>, <ast.Constant object at 0x7da1b0d21fc0>, <ast.Constant object at 0x7da1b0d20e50>]]]]
<ast.Tuple object at 0x7da1b0d20280> assign[=] call[name[_sum_string_fn], parameter[name[into], name[tmp_val], name[MALE], tuple[[<ast.Constant object at 0x7da1b0d22260>, <ast.Constant object at 0x7da1b0d20af0>, <ast.Constant object at 0x7da1b0d20ca0>]]]]
<ast.Tuple object at 0x7da1b0d22380> assign[=] call[name[_sum_string_fn], parameter[name[into], name[tmp_val], name[MALE], tuple[[<ast.Constant object at 0x7da1b0eff940>, <ast.Constant object at 0x7da1b0efe050>, <ast.Constant object at 0x7da1b0efe110>]]]]
if compare[name[tmp_val] equal[==] constant[0]] begin[:]
return[name[into]] | keyword[def] identifier[sum_string] ( identifier[amount] , identifier[gender] , identifier[items] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[items] , identifier[six] . identifier[text_type] ):
identifier[items] = identifier[split_values] ( identifier[items] )
keyword[if] identifier[items] keyword[is] keyword[None] :
identifier[items] =( literal[string] , literal[string] , literal[string] )
keyword[try] :
identifier[one_item] , identifier[two_items] , identifier[five_items] = identifier[items]
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[check_positive] ( identifier[amount] )
keyword[if] identifier[amount] == literal[int] :
keyword[if] identifier[five_items] :
keyword[return] literal[string] % identifier[five_items]
keyword[else] :
keyword[return] literal[string]
identifier[into] = literal[string]
identifier[tmp_val] = identifier[amount]
identifier[into] , identifier[tmp_val] = identifier[_sum_string_fn] ( identifier[into] , identifier[tmp_val] , identifier[gender] , identifier[items] )
identifier[into] , identifier[tmp_val] = identifier[_sum_string_fn] ( identifier[into] , identifier[tmp_val] , identifier[FEMALE] ,
( literal[string] , literal[string] , literal[string] ))
identifier[into] , identifier[tmp_val] = identifier[_sum_string_fn] ( identifier[into] , identifier[tmp_val] , identifier[MALE] ,
( literal[string] , literal[string] , literal[string] ))
identifier[into] , identifier[tmp_val] = identifier[_sum_string_fn] ( identifier[into] , identifier[tmp_val] , identifier[MALE] ,
( literal[string] , literal[string] , literal[string] ))
keyword[if] identifier[tmp_val] == literal[int] :
keyword[return] identifier[into]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def sum_string(amount, gender, items=None):
"""
Get sum in words
@param amount: amount of objects
@type amount: C{integer types}
@param gender: gender of object (MALE, FEMALE or NEUTER)
@type gender: C{int}
@param items: variants of object in three forms:
for one object, for two objects and for five objects
@type items: 3-element C{sequence} of C{unicode} or
just C{unicode} (three variants with delimeter ',')
@return: in-words representation objects' amount
@rtype: C{unicode}
@raise ValueError: items isn't 3-element C{sequence} or C{unicode}
@raise ValueError: amount bigger than 10**11
@raise ValueError: amount is negative
"""
if isinstance(items, six.text_type):
items = split_values(items) # depends on [control=['if'], data=[]]
if items is None:
items = (u'', u'', u'') # depends on [control=['if'], data=['items']]
try:
(one_item, two_items, five_items) = items # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('Items must be 3-element sequence') # depends on [control=['except'], data=[]]
check_positive(amount)
if amount == 0:
if five_items:
return u'ноль %s' % five_items # depends on [control=['if'], data=[]]
else:
return u'ноль' # depends on [control=['if'], data=[]]
into = u''
tmp_val = amount
# единицы
(into, tmp_val) = _sum_string_fn(into, tmp_val, gender, items)
# тысячи
(into, tmp_val) = _sum_string_fn(into, tmp_val, FEMALE, (u'тысяча', u'тысячи', u'тысяч'))
# миллионы
(into, tmp_val) = _sum_string_fn(into, tmp_val, MALE, (u'миллион', u'миллиона', u'миллионов'))
# миллиарды
(into, tmp_val) = _sum_string_fn(into, tmp_val, MALE, (u'миллиард', u'миллиарда', u'миллиардов'))
if tmp_val == 0:
return into # depends on [control=['if'], data=[]]
else:
raise ValueError('Cannot operand with numbers bigger than 10**11') |
def send_key(self, key):
""" Send a key to the Horizon box. """
cmd = struct.pack(">BBBBBBH", 4, 1, 0, 0, 0, 0, key)
self.con.send(cmd)
cmd = struct.pack(">BBBBBBH", 4, 0, 0, 0, 0, 0, key)
self.con.send(cmd) | def function[send_key, parameter[self, key]]:
constant[ Send a key to the Horizon box. ]
variable[cmd] assign[=] call[name[struct].pack, parameter[constant[>BBBBBBH], constant[4], constant[1], constant[0], constant[0], constant[0], constant[0], name[key]]]
call[name[self].con.send, parameter[name[cmd]]]
variable[cmd] assign[=] call[name[struct].pack, parameter[constant[>BBBBBBH], constant[4], constant[0], constant[0], constant[0], constant[0], constant[0], name[key]]]
call[name[self].con.send, parameter[name[cmd]]] | keyword[def] identifier[send_key] ( identifier[self] , identifier[key] ):
literal[string]
identifier[cmd] = identifier[struct] . identifier[pack] ( literal[string] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , identifier[key] )
identifier[self] . identifier[con] . identifier[send] ( identifier[cmd] )
identifier[cmd] = identifier[struct] . identifier[pack] ( literal[string] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , identifier[key] )
identifier[self] . identifier[con] . identifier[send] ( identifier[cmd] ) | def send_key(self, key):
""" Send a key to the Horizon box. """
cmd = struct.pack('>BBBBBBH', 4, 1, 0, 0, 0, 0, key)
self.con.send(cmd)
cmd = struct.pack('>BBBBBBH', 4, 0, 0, 0, 0, 0, key)
self.con.send(cmd) |
def _get_forecast(api_result: dict) -> List[SmhiForecast]:
"""Converts results fråm API to SmhiForeCast list"""
forecasts = []
# Need the ordered dict to get
# the days in order in next stage
forecasts_ordered = OrderedDict()
forecasts_ordered = _get_all_forecast_from_api(api_result)
# Used to calc the daycount
day_nr = 1
for day in forecasts_ordered:
forecasts_day = forecasts_ordered[day]
if day_nr == 1:
# Add the most recent forecast
forecasts.append(copy.deepcopy(forecasts_day[0]))
total_precipitation = float(0.0)
forecast_temp_max = -100.0
forecast_temp_min = 100.0
forecast = None
for forcast_day in forecasts_day:
temperature = forcast_day.temperature
if forecast_temp_min > temperature:
forecast_temp_min = temperature
if forecast_temp_max < temperature:
forecast_temp_max = temperature
if forcast_day.valid_time.hour == 12:
forecast = copy.deepcopy(forcast_day)
total_precipitation = total_precipitation + \
forcast_day._total_precipitation
if forecast is None:
# We passed 12 noon, set to current
forecast = forecasts_day[0]
forecast._temperature_max = forecast_temp_max
forecast._temperature_min = forecast_temp_min
forecast._total_precipitation = total_precipitation
forecast._mean_precipitation = total_precipitation/24
forecasts.append(forecast)
day_nr = day_nr + 1
return forecasts | def function[_get_forecast, parameter[api_result]]:
constant[Converts results fråm API to SmhiForeCast list]
variable[forecasts] assign[=] list[[]]
variable[forecasts_ordered] assign[=] call[name[OrderedDict], parameter[]]
variable[forecasts_ordered] assign[=] call[name[_get_all_forecast_from_api], parameter[name[api_result]]]
variable[day_nr] assign[=] constant[1]
for taget[name[day]] in starred[name[forecasts_ordered]] begin[:]
variable[forecasts_day] assign[=] call[name[forecasts_ordered]][name[day]]
if compare[name[day_nr] equal[==] constant[1]] begin[:]
call[name[forecasts].append, parameter[call[name[copy].deepcopy, parameter[call[name[forecasts_day]][constant[0]]]]]]
variable[total_precipitation] assign[=] call[name[float], parameter[constant[0.0]]]
variable[forecast_temp_max] assign[=] <ast.UnaryOp object at 0x7da18dc05cc0>
variable[forecast_temp_min] assign[=] constant[100.0]
variable[forecast] assign[=] constant[None]
for taget[name[forcast_day]] in starred[name[forecasts_day]] begin[:]
variable[temperature] assign[=] name[forcast_day].temperature
if compare[name[forecast_temp_min] greater[>] name[temperature]] begin[:]
variable[forecast_temp_min] assign[=] name[temperature]
if compare[name[forecast_temp_max] less[<] name[temperature]] begin[:]
variable[forecast_temp_max] assign[=] name[temperature]
if compare[name[forcast_day].valid_time.hour equal[==] constant[12]] begin[:]
variable[forecast] assign[=] call[name[copy].deepcopy, parameter[name[forcast_day]]]
variable[total_precipitation] assign[=] binary_operation[name[total_precipitation] + name[forcast_day]._total_precipitation]
if compare[name[forecast] is constant[None]] begin[:]
variable[forecast] assign[=] call[name[forecasts_day]][constant[0]]
name[forecast]._temperature_max assign[=] name[forecast_temp_max]
name[forecast]._temperature_min assign[=] name[forecast_temp_min]
name[forecast]._total_precipitation assign[=] name[total_precipitation]
name[forecast]._mean_precipitation assign[=] binary_operation[name[total_precipitation] / constant[24]]
call[name[forecasts].append, parameter[name[forecast]]]
variable[day_nr] assign[=] binary_operation[name[day_nr] + constant[1]]
return[name[forecasts]] | keyword[def] identifier[_get_forecast] ( identifier[api_result] : identifier[dict] )-> identifier[List] [ identifier[SmhiForecast] ]:
literal[string]
identifier[forecasts] =[]
identifier[forecasts_ordered] = identifier[OrderedDict] ()
identifier[forecasts_ordered] = identifier[_get_all_forecast_from_api] ( identifier[api_result] )
identifier[day_nr] = literal[int]
keyword[for] identifier[day] keyword[in] identifier[forecasts_ordered] :
identifier[forecasts_day] = identifier[forecasts_ordered] [ identifier[day] ]
keyword[if] identifier[day_nr] == literal[int] :
identifier[forecasts] . identifier[append] ( identifier[copy] . identifier[deepcopy] ( identifier[forecasts_day] [ literal[int] ]))
identifier[total_precipitation] = identifier[float] ( literal[int] )
identifier[forecast_temp_max] =- literal[int]
identifier[forecast_temp_min] = literal[int]
identifier[forecast] = keyword[None]
keyword[for] identifier[forcast_day] keyword[in] identifier[forecasts_day] :
identifier[temperature] = identifier[forcast_day] . identifier[temperature]
keyword[if] identifier[forecast_temp_min] > identifier[temperature] :
identifier[forecast_temp_min] = identifier[temperature]
keyword[if] identifier[forecast_temp_max] < identifier[temperature] :
identifier[forecast_temp_max] = identifier[temperature]
keyword[if] identifier[forcast_day] . identifier[valid_time] . identifier[hour] == literal[int] :
identifier[forecast] = identifier[copy] . identifier[deepcopy] ( identifier[forcast_day] )
identifier[total_precipitation] = identifier[total_precipitation] + identifier[forcast_day] . identifier[_total_precipitation]
keyword[if] identifier[forecast] keyword[is] keyword[None] :
identifier[forecast] = identifier[forecasts_day] [ literal[int] ]
identifier[forecast] . identifier[_temperature_max] = identifier[forecast_temp_max]
identifier[forecast] . identifier[_temperature_min] = identifier[forecast_temp_min]
identifier[forecast] . identifier[_total_precipitation] = identifier[total_precipitation]
identifier[forecast] . identifier[_mean_precipitation] = identifier[total_precipitation] / literal[int]
identifier[forecasts] . identifier[append] ( identifier[forecast] )
identifier[day_nr] = identifier[day_nr] + literal[int]
keyword[return] identifier[forecasts] | def _get_forecast(api_result: dict) -> List[SmhiForecast]:
"""Converts results fråm API to SmhiForeCast list"""
forecasts = [] # Need the ordered dict to get
# the days in order in next stage
forecasts_ordered = OrderedDict()
forecasts_ordered = _get_all_forecast_from_api(api_result) # Used to calc the daycount
day_nr = 1
for day in forecasts_ordered:
forecasts_day = forecasts_ordered[day]
if day_nr == 1: # Add the most recent forecast
forecasts.append(copy.deepcopy(forecasts_day[0])) # depends on [control=['if'], data=[]]
total_precipitation = float(0.0)
forecast_temp_max = -100.0
forecast_temp_min = 100.0
forecast = None
for forcast_day in forecasts_day:
temperature = forcast_day.temperature
if forecast_temp_min > temperature:
forecast_temp_min = temperature # depends on [control=['if'], data=['forecast_temp_min', 'temperature']]
if forecast_temp_max < temperature:
forecast_temp_max = temperature # depends on [control=['if'], data=['forecast_temp_max', 'temperature']]
if forcast_day.valid_time.hour == 12:
forecast = copy.deepcopy(forcast_day) # depends on [control=['if'], data=[]]
total_precipitation = total_precipitation + forcast_day._total_precipitation # depends on [control=['for'], data=['forcast_day']]
if forecast is None: # We passed 12 noon, set to current
forecast = forecasts_day[0] # depends on [control=['if'], data=['forecast']]
forecast._temperature_max = forecast_temp_max
forecast._temperature_min = forecast_temp_min
forecast._total_precipitation = total_precipitation
forecast._mean_precipitation = total_precipitation / 24
forecasts.append(forecast)
day_nr = day_nr + 1 # depends on [control=['for'], data=['day']]
return forecasts |
def delete_account(self):
"""
Delete a user's account.
Deleting the user's account can only be done if the user's domain is controlled by the authorized organization,
and removing the account will also remove the user from all organizations with access to that domain.
:return: None, because you cannot follow this command with another.
"""
if self.id_type == IdentityTypes.adobeID:
raise ArgumentError("You cannot delete an Adobe ID account.")
self.append(removeFromDomain={})
return None | def function[delete_account, parameter[self]]:
constant[
Delete a user's account.
Deleting the user's account can only be done if the user's domain is controlled by the authorized organization,
and removing the account will also remove the user from all organizations with access to that domain.
:return: None, because you cannot follow this command with another.
]
if compare[name[self].id_type equal[==] name[IdentityTypes].adobeID] begin[:]
<ast.Raise object at 0x7da18eb544f0>
call[name[self].append, parameter[]]
return[constant[None]] | keyword[def] identifier[delete_account] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[id_type] == identifier[IdentityTypes] . identifier[adobeID] :
keyword[raise] identifier[ArgumentError] ( literal[string] )
identifier[self] . identifier[append] ( identifier[removeFromDomain] ={})
keyword[return] keyword[None] | def delete_account(self):
"""
Delete a user's account.
Deleting the user's account can only be done if the user's domain is controlled by the authorized organization,
and removing the account will also remove the user from all organizations with access to that domain.
:return: None, because you cannot follow this command with another.
"""
if self.id_type == IdentityTypes.adobeID:
raise ArgumentError('You cannot delete an Adobe ID account.') # depends on [control=['if'], data=[]]
self.append(removeFromDomain={})
return None |
def facts(self, name=None, value=None, **kwargs):
"""Query for facts limited by either name, value and/or query.
:param name: (Optional) Only return facts that match this name.
:type name: :obj:`string`
:param value: (Optional) Only return facts of `name` that\
match this value. Use of this parameter requires the `name`\
parameter be set.
:type value: :obj:`string`
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function
:returns: A generator yielding Facts.
:rtype: :class:`pypuppetdb.types.Fact`
"""
if name is not None and value is not None:
path = '{0}/{1}'.format(name, value)
elif name is not None and value is None:
path = name
else:
path = None
facts = self._query('facts', path=path, **kwargs)
for fact in facts:
yield Fact(
node=fact['certname'],
name=fact['name'],
value=fact['value'],
environment=fact['environment']
) | def function[facts, parameter[self, name, value]]:
constant[Query for facts limited by either name, value and/or query.
:param name: (Optional) Only return facts that match this name.
:type name: :obj:`string`
:param value: (Optional) Only return facts of `name` that match this value. Use of this parameter requires the `name` parameter be set.
:type value: :obj:`string`
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function
:returns: A generator yielding Facts.
:rtype: :class:`pypuppetdb.types.Fact`
]
if <ast.BoolOp object at 0x7da18f09e200> begin[:]
variable[path] assign[=] call[constant[{0}/{1}].format, parameter[name[name], name[value]]]
variable[facts] assign[=] call[name[self]._query, parameter[constant[facts]]]
for taget[name[fact]] in starred[name[facts]] begin[:]
<ast.Yield object at 0x7da18f09ca60> | keyword[def] identifier[facts] ( identifier[self] , identifier[name] = keyword[None] , identifier[value] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[path] = literal[string] . identifier[format] ( identifier[name] , identifier[value] )
keyword[elif] identifier[name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[value] keyword[is] keyword[None] :
identifier[path] = identifier[name]
keyword[else] :
identifier[path] = keyword[None]
identifier[facts] = identifier[self] . identifier[_query] ( literal[string] , identifier[path] = identifier[path] ,** identifier[kwargs] )
keyword[for] identifier[fact] keyword[in] identifier[facts] :
keyword[yield] identifier[Fact] (
identifier[node] = identifier[fact] [ literal[string] ],
identifier[name] = identifier[fact] [ literal[string] ],
identifier[value] = identifier[fact] [ literal[string] ],
identifier[environment] = identifier[fact] [ literal[string] ]
) | def facts(self, name=None, value=None, **kwargs):
"""Query for facts limited by either name, value and/or query.
:param name: (Optional) Only return facts that match this name.
:type name: :obj:`string`
:param value: (Optional) Only return facts of `name` that match this value. Use of this parameter requires the `name` parameter be set.
:type value: :obj:`string`
:param \\*\\*kwargs: The rest of the keyword arguments are passed
to the _query function
:returns: A generator yielding Facts.
:rtype: :class:`pypuppetdb.types.Fact`
"""
if name is not None and value is not None:
path = '{0}/{1}'.format(name, value) # depends on [control=['if'], data=[]]
elif name is not None and value is None:
path = name # depends on [control=['if'], data=[]]
else:
path = None
facts = self._query('facts', path=path, **kwargs)
for fact in facts:
yield Fact(node=fact['certname'], name=fact['name'], value=fact['value'], environment=fact['environment']) # depends on [control=['for'], data=['fact']] |
def count(self):
"""Return a count of instances."""
if self._primary_keys is None:
return self.queryset.count()
else:
return len(self.pks) | def function[count, parameter[self]]:
constant[Return a count of instances.]
if compare[name[self]._primary_keys is constant[None]] begin[:]
return[call[name[self].queryset.count, parameter[]]] | keyword[def] identifier[count] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_primary_keys] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[queryset] . identifier[count] ()
keyword[else] :
keyword[return] identifier[len] ( identifier[self] . identifier[pks] ) | def count(self):
"""Return a count of instances."""
if self._primary_keys is None:
return self.queryset.count() # depends on [control=['if'], data=[]]
else:
return len(self.pks) |
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters | def function[_bind_parameters, parameter[operation, parameters]]:
constant[ Helper method that binds parameters to a SQL query. ]
variable[string_parameters] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b052b0d0>, <ast.Name object at 0x7da1b052b610>]]] in starred[call[name[iteritems], parameter[name[parameters]]]] begin[:]
if compare[name[value] is constant[None]] begin[:]
call[name[string_parameters]][name[name]] assign[=] constant[NULL]
return[binary_operation[name[operation] <ast.Mod object at 0x7da2590d6920> name[string_parameters]]] | keyword[def] identifier[_bind_parameters] ( identifier[operation] , identifier[parameters] ):
literal[string]
identifier[string_parameters] ={}
keyword[for] ( identifier[name] , identifier[value] ) keyword[in] identifier[iteritems] ( identifier[parameters] ):
keyword[if] identifier[value] keyword[is] keyword[None] :
identifier[string_parameters] [ identifier[name] ]= literal[string]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[basestring] ):
identifier[string_parameters] [ identifier[name] ]= literal[string] + identifier[_escape] ( identifier[value] )+ literal[string]
keyword[else] :
identifier[string_parameters] [ identifier[name] ]= identifier[str] ( identifier[value] )
keyword[return] identifier[operation] % identifier[string_parameters] | def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL' # depends on [control=['if'], data=[]]
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'" # depends on [control=['if'], data=[]]
else:
string_parameters[name] = str(value) # depends on [control=['for'], data=[]]
return operation % string_parameters |
def paste_action_callback(self, *event):
"""Add clipboard key value pairs into all selected sub-dictionary"""
if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None:
_, dict_paths = self.get_view_selection()
selected_data_list = rafcon.gui.clipboard.global_clipboard.get_semantic_dictionary_list()
# enforce paste on root level if semantic data empty or nothing is selected
if not dict_paths and not self.model.state.semantic_data:
dict_paths = [[]]
for target_dict_path_as_list in dict_paths:
prev_value = self.model.state.semantic_data
value = self.model.state.semantic_data
for path_element in target_dict_path_as_list:
prev_value = value
value = value[path_element]
if not isinstance(value, dict) and len(dict_paths) <= 1: # if one selection take parent
target_dict_path_as_list.pop(-1)
value = prev_value
if isinstance(value, dict):
for key_to_paste, value_to_add in selected_data_list:
self.model.state.add_semantic_data(target_dict_path_as_list, value_to_add, key_to_paste)
self.reload_tree_store_data() | def function[paste_action_callback, parameter[self]]:
constant[Add clipboard key value pairs into all selected sub-dictionary]
if <ast.BoolOp object at 0x7da1b1a3fc40> begin[:]
<ast.Tuple object at 0x7da1b1a3c580> assign[=] call[name[self].get_view_selection, parameter[]]
variable[selected_data_list] assign[=] call[name[rafcon].gui.clipboard.global_clipboard.get_semantic_dictionary_list, parameter[]]
if <ast.BoolOp object at 0x7da1b1a3c7f0> begin[:]
variable[dict_paths] assign[=] list[[<ast.List object at 0x7da1b1a90310>]]
for taget[name[target_dict_path_as_list]] in starred[name[dict_paths]] begin[:]
variable[prev_value] assign[=] name[self].model.state.semantic_data
variable[value] assign[=] name[self].model.state.semantic_data
for taget[name[path_element]] in starred[name[target_dict_path_as_list]] begin[:]
variable[prev_value] assign[=] name[value]
variable[value] assign[=] call[name[value]][name[path_element]]
if <ast.BoolOp object at 0x7da1b1a3c5b0> begin[:]
call[name[target_dict_path_as_list].pop, parameter[<ast.UnaryOp object at 0x7da1b1a3f8b0>]]
variable[value] assign[=] name[prev_value]
if call[name[isinstance], parameter[name[value], name[dict]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1a3c400>, <ast.Name object at 0x7da1b1a3c3d0>]]] in starred[name[selected_data_list]] begin[:]
call[name[self].model.state.add_semantic_data, parameter[name[target_dict_path_as_list], name[value_to_add], name[key_to_paste]]]
call[name[self].reload_tree_store_data, parameter[]] | keyword[def] identifier[paste_action_callback] ( identifier[self] ,* identifier[event] ):
literal[string]
keyword[if] identifier[react_to_event] ( identifier[self] . identifier[view] , identifier[self] . identifier[tree_view] , identifier[event] ) keyword[and] identifier[self] . identifier[active_entry_widget] keyword[is] keyword[None] :
identifier[_] , identifier[dict_paths] = identifier[self] . identifier[get_view_selection] ()
identifier[selected_data_list] = identifier[rafcon] . identifier[gui] . identifier[clipboard] . identifier[global_clipboard] . identifier[get_semantic_dictionary_list] ()
keyword[if] keyword[not] identifier[dict_paths] keyword[and] keyword[not] identifier[self] . identifier[model] . identifier[state] . identifier[semantic_data] :
identifier[dict_paths] =[[]]
keyword[for] identifier[target_dict_path_as_list] keyword[in] identifier[dict_paths] :
identifier[prev_value] = identifier[self] . identifier[model] . identifier[state] . identifier[semantic_data]
identifier[value] = identifier[self] . identifier[model] . identifier[state] . identifier[semantic_data]
keyword[for] identifier[path_element] keyword[in] identifier[target_dict_path_as_list] :
identifier[prev_value] = identifier[value]
identifier[value] = identifier[value] [ identifier[path_element] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[dict] ) keyword[and] identifier[len] ( identifier[dict_paths] )<= literal[int] :
identifier[target_dict_path_as_list] . identifier[pop] (- literal[int] )
identifier[value] = identifier[prev_value]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
keyword[for] identifier[key_to_paste] , identifier[value_to_add] keyword[in] identifier[selected_data_list] :
identifier[self] . identifier[model] . identifier[state] . identifier[add_semantic_data] ( identifier[target_dict_path_as_list] , identifier[value_to_add] , identifier[key_to_paste] )
identifier[self] . identifier[reload_tree_store_data] () | def paste_action_callback(self, *event):
"""Add clipboard key value pairs into all selected sub-dictionary"""
if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None:
(_, dict_paths) = self.get_view_selection()
selected_data_list = rafcon.gui.clipboard.global_clipboard.get_semantic_dictionary_list()
# enforce paste on root level if semantic data empty or nothing is selected
if not dict_paths and (not self.model.state.semantic_data):
dict_paths = [[]] # depends on [control=['if'], data=[]]
for target_dict_path_as_list in dict_paths:
prev_value = self.model.state.semantic_data
value = self.model.state.semantic_data
for path_element in target_dict_path_as_list:
prev_value = value
value = value[path_element] # depends on [control=['for'], data=['path_element']]
if not isinstance(value, dict) and len(dict_paths) <= 1: # if one selection take parent
target_dict_path_as_list.pop(-1)
value = prev_value # depends on [control=['if'], data=[]]
if isinstance(value, dict):
for (key_to_paste, value_to_add) in selected_data_list:
self.model.state.add_semantic_data(target_dict_path_as_list, value_to_add, key_to_paste) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['target_dict_path_as_list']]
self.reload_tree_store_data() # depends on [control=['if'], data=[]] |
def add_source_tag(self, id, tag_value, **kwargs): # noqa: E501
"""Add a tag to a specific source # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_source_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_source_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
else:
(data) = self.add_source_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
return data | def function[add_source_tag, parameter[self, id, tag_value]]:
constant[Add a tag to a specific source # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_source_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].add_source_tag_with_http_info, parameter[name[id], name[tag_value]]]] | keyword[def] identifier[add_source_tag] ( identifier[self] , identifier[id] , identifier[tag_value] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[add_source_tag_with_http_info] ( identifier[id] , identifier[tag_value] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[add_source_tag_with_http_info] ( identifier[id] , identifier[tag_value] ,** identifier[kwargs] )
keyword[return] identifier[data] | def add_source_tag(self, id, tag_value, **kwargs): # noqa: E501
'Add a tag to a specific source # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.add_source_tag(id, tag_value, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str id: (required)\n :param str tag_value: (required)\n :return: ResponseContainer\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_source_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.add_source_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
return data |
def _get_admin_change_url(self, model, context):
"""
Returns the admin change url.
"""
app_label = model._meta.app_label
return reverse('%s:%s_%s_changelist' % (get_admin_site_name(context),
app_label,
model.__name__.lower())) | def function[_get_admin_change_url, parameter[self, model, context]]:
constant[
Returns the admin change url.
]
variable[app_label] assign[=] name[model]._meta.app_label
return[call[name[reverse], parameter[binary_operation[constant[%s:%s_%s_changelist] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da2044c0f70>, <ast.Name object at 0x7da2044c1240>, <ast.Call object at 0x7da2044c17e0>]]]]]] | keyword[def] identifier[_get_admin_change_url] ( identifier[self] , identifier[model] , identifier[context] ):
literal[string]
identifier[app_label] = identifier[model] . identifier[_meta] . identifier[app_label]
keyword[return] identifier[reverse] ( literal[string] %( identifier[get_admin_site_name] ( identifier[context] ),
identifier[app_label] ,
identifier[model] . identifier[__name__] . identifier[lower] ())) | def _get_admin_change_url(self, model, context):
"""
Returns the admin change url.
"""
app_label = model._meta.app_label
return reverse('%s:%s_%s_changelist' % (get_admin_site_name(context), app_label, model.__name__.lower())) |
def location_from_dictionary(d):
"""
Builds a *Location* object out of a data dictionary. Only certain
properties of the dictionary are used: if these properties are not
found or cannot be read, an error is issued.
:param d: a data dictionary
:type d: dict
:returns: a *Location* instance
:raises: *KeyError* if it is impossible to find or read the data
needed to build the instance
"""
country = None
if 'sys' in d and 'country' in d['sys']:
country = d['sys']['country']
if 'city' in d:
data = d['city']
else:
data = d
if 'name' in data:
name = data['name']
else:
name = None
if 'id' in data:
ID = int(data['id'])
else:
ID = None
if 'coord' in data:
lon = data['coord'].get('lon', 0.0)
lat = data['coord'].get('lat', 0.0)
elif 'coord' in data['station']:
if 'lon' in data['station']['coord']:
lon = data['station']['coord'].get('lon', 0.0)
elif 'lng' in data['station']['coord']:
lon = data['station']['coord'].get('lng', 0.0)
else:
lon = 0.0
lat = data['station']['coord'].get('lat', 0.0)
else:
raise KeyError("Impossible to read geographical coordinates from JSON")
if 'country' in data:
country = data['country']
return Location(name, lon, lat, ID, country) | def function[location_from_dictionary, parameter[d]]:
constant[
Builds a *Location* object out of a data dictionary. Only certain
properties of the dictionary are used: if these properties are not
found or cannot be read, an error is issued.
:param d: a data dictionary
:type d: dict
:returns: a *Location* instance
:raises: *KeyError* if it is impossible to find or read the data
needed to build the instance
]
variable[country] assign[=] constant[None]
if <ast.BoolOp object at 0x7da20e955c00> begin[:]
variable[country] assign[=] call[call[name[d]][constant[sys]]][constant[country]]
if compare[constant[city] in name[d]] begin[:]
variable[data] assign[=] call[name[d]][constant[city]]
if compare[constant[name] in name[data]] begin[:]
variable[name] assign[=] call[name[data]][constant[name]]
if compare[constant[id] in name[data]] begin[:]
variable[ID] assign[=] call[name[int], parameter[call[name[data]][constant[id]]]]
if compare[constant[coord] in name[data]] begin[:]
variable[lon] assign[=] call[call[name[data]][constant[coord]].get, parameter[constant[lon], constant[0.0]]]
variable[lat] assign[=] call[call[name[data]][constant[coord]].get, parameter[constant[lat], constant[0.0]]]
if compare[constant[country] in name[data]] begin[:]
variable[country] assign[=] call[name[data]][constant[country]]
return[call[name[Location], parameter[name[name], name[lon], name[lat], name[ID], name[country]]]] | keyword[def] identifier[location_from_dictionary] ( identifier[d] ):
literal[string]
identifier[country] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[d] keyword[and] literal[string] keyword[in] identifier[d] [ literal[string] ]:
identifier[country] = identifier[d] [ literal[string] ][ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[data] = identifier[d] [ literal[string] ]
keyword[else] :
identifier[data] = identifier[d]
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[name] = identifier[data] [ literal[string] ]
keyword[else] :
identifier[name] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[ID] = identifier[int] ( identifier[data] [ literal[string] ])
keyword[else] :
identifier[ID] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[lon] = identifier[data] [ literal[string] ]. identifier[get] ( literal[string] , literal[int] )
identifier[lat] = identifier[data] [ literal[string] ]. identifier[get] ( literal[string] , literal[int] )
keyword[elif] literal[string] keyword[in] identifier[data] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[data] [ literal[string] ][ literal[string] ]:
identifier[lon] = identifier[data] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] , literal[int] )
keyword[elif] literal[string] keyword[in] identifier[data] [ literal[string] ][ literal[string] ]:
identifier[lon] = identifier[data] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] , literal[int] )
keyword[else] :
identifier[lon] = literal[int]
identifier[lat] = identifier[data] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] , literal[int] )
keyword[else] :
keyword[raise] identifier[KeyError] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[country] = identifier[data] [ literal[string] ]
keyword[return] identifier[Location] ( identifier[name] , identifier[lon] , identifier[lat] , identifier[ID] , identifier[country] ) | def location_from_dictionary(d):
"""
Builds a *Location* object out of a data dictionary. Only certain
properties of the dictionary are used: if these properties are not
found or cannot be read, an error is issued.
:param d: a data dictionary
:type d: dict
:returns: a *Location* instance
:raises: *KeyError* if it is impossible to find or read the data
needed to build the instance
"""
country = None
if 'sys' in d and 'country' in d['sys']:
country = d['sys']['country'] # depends on [control=['if'], data=[]]
if 'city' in d:
data = d['city'] # depends on [control=['if'], data=['d']]
else:
data = d
if 'name' in data:
name = data['name'] # depends on [control=['if'], data=['data']]
else:
name = None
if 'id' in data:
ID = int(data['id']) # depends on [control=['if'], data=['data']]
else:
ID = None
if 'coord' in data:
lon = data['coord'].get('lon', 0.0)
lat = data['coord'].get('lat', 0.0) # depends on [control=['if'], data=['data']]
elif 'coord' in data['station']:
if 'lon' in data['station']['coord']:
lon = data['station']['coord'].get('lon', 0.0) # depends on [control=['if'], data=[]]
elif 'lng' in data['station']['coord']:
lon = data['station']['coord'].get('lng', 0.0) # depends on [control=['if'], data=[]]
else:
lon = 0.0
lat = data['station']['coord'].get('lat', 0.0) # depends on [control=['if'], data=[]]
else:
raise KeyError('Impossible to read geographical coordinates from JSON')
if 'country' in data:
country = data['country'] # depends on [control=['if'], data=['data']]
return Location(name, lon, lat, ID, country) |
def nStepsForParam(self, param):
"""Gets the number of steps *parameter* will yeild
:param param: parameter to get the expansion count for
:type param: dict
"""
if param['parameter'] == 'filename':
return len(param['names'])
else:
if param['step'] > 0:
if abs(param['start'] - param['stop']) < param['step']:
return 0
# print 'range', param['start'] - param['stop']
nsteps = np.around(abs(param['start'] - param['stop']), 4) / float(param['step'])
nsteps = int(np.ceil(nsteps)+1)
elif param['start'] == param['stop']:
nsteps = 1
else:
nsteps = 0
return nsteps | def function[nStepsForParam, parameter[self, param]]:
constant[Gets the number of steps *parameter* will yeild
:param param: parameter to get the expansion count for
:type param: dict
]
if compare[call[name[param]][constant[parameter]] equal[==] constant[filename]] begin[:]
return[call[name[len], parameter[call[name[param]][constant[names]]]]] | keyword[def] identifier[nStepsForParam] ( identifier[self] , identifier[param] ):
literal[string]
keyword[if] identifier[param] [ literal[string] ]== literal[string] :
keyword[return] identifier[len] ( identifier[param] [ literal[string] ])
keyword[else] :
keyword[if] identifier[param] [ literal[string] ]> literal[int] :
keyword[if] identifier[abs] ( identifier[param] [ literal[string] ]- identifier[param] [ literal[string] ])< identifier[param] [ literal[string] ]:
keyword[return] literal[int]
identifier[nsteps] = identifier[np] . identifier[around] ( identifier[abs] ( identifier[param] [ literal[string] ]- identifier[param] [ literal[string] ]), literal[int] )/ identifier[float] ( identifier[param] [ literal[string] ])
identifier[nsteps] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[nsteps] )+ literal[int] )
keyword[elif] identifier[param] [ literal[string] ]== identifier[param] [ literal[string] ]:
identifier[nsteps] = literal[int]
keyword[else] :
identifier[nsteps] = literal[int]
keyword[return] identifier[nsteps] | def nStepsForParam(self, param):
"""Gets the number of steps *parameter* will yeild
:param param: parameter to get the expansion count for
:type param: dict
"""
if param['parameter'] == 'filename':
return len(param['names']) # depends on [control=['if'], data=[]]
else:
if param['step'] > 0:
if abs(param['start'] - param['stop']) < param['step']:
return 0 # depends on [control=['if'], data=[]]
# print 'range', param['start'] - param['stop']
nsteps = np.around(abs(param['start'] - param['stop']), 4) / float(param['step'])
nsteps = int(np.ceil(nsteps) + 1) # depends on [control=['if'], data=[]]
elif param['start'] == param['stop']:
nsteps = 1 # depends on [control=['if'], data=[]]
else:
nsteps = 0
return nsteps |
def sync_all(name, **kwargs):
'''
Performs the same task as saltutil.sync_all module
See :mod:`saltutil module for full list of options <salt.modules.saltutil>`
.. code-block:: yaml
sync_everything:
saltutil.sync_all:
- refresh: True
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if __opts__['test']:
ret['result'] = None
ret['comment'] = "saltutil.sync_all would have been run"
return ret
try:
sync_status = __salt__['saltutil.sync_all'](**kwargs)
for key, value in sync_status.items():
if value:
ret['changes'][key] = value
ret['comment'] = "Sync performed"
except Exception as e:
log.error("Failed to run saltutil.sync_all: %s", e)
ret['result'] = False
ret['comment'] = "Failed to run sync_all: {0}".format(e)
return ret
if not ret['changes']:
ret['comment'] = "No updates to sync"
return ret | def function[sync_all, parameter[name]]:
constant[
Performs the same task as saltutil.sync_all module
See :mod:`saltutil module for full list of options <salt.modules.saltutil>`
.. code-block:: yaml
sync_everything:
saltutil.sync_all:
- refresh: True
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18f7235b0>, <ast.Constant object at 0x7da18f723040>, <ast.Constant object at 0x7da18f722ef0>, <ast.Constant object at 0x7da18f722c50>], [<ast.Name object at 0x7da18f723f70>, <ast.Dict object at 0x7da18f722770>, <ast.Constant object at 0x7da18f7216c0>, <ast.Constant object at 0x7da18f721ff0>]]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[comment]] assign[=] constant[saltutil.sync_all would have been run]
return[name[ret]]
<ast.Try object at 0x7da18f723910>
if <ast.UnaryOp object at 0x7da18f7205b0> begin[:]
call[name[ret]][constant[comment]] assign[=] constant[No updates to sync]
return[name[ret]] | keyword[def] identifier[sync_all] ( identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] , literal[string] :{}, literal[string] : keyword[True] , literal[string] : literal[string] }
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[try] :
identifier[sync_status] = identifier[__salt__] [ literal[string] ](** identifier[kwargs] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[sync_status] . identifier[items] ():
keyword[if] identifier[value] :
identifier[ret] [ literal[string] ][ identifier[key] ]= identifier[value]
identifier[ret] [ literal[string] ]= literal[string]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( literal[string] , identifier[e] )
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[e] )
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[ret] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret] | def sync_all(name, **kwargs):
"""
Performs the same task as saltutil.sync_all module
See :mod:`saltutil module for full list of options <salt.modules.saltutil>`
.. code-block:: yaml
sync_everything:
saltutil.sync_all:
- refresh: True
"""
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'saltutil.sync_all would have been run'
return ret # depends on [control=['if'], data=[]]
try:
sync_status = __salt__['saltutil.sync_all'](**kwargs)
for (key, value) in sync_status.items():
if value:
ret['changes'][key] = value
ret['comment'] = 'Sync performed' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
log.error('Failed to run saltutil.sync_all: %s', e)
ret['result'] = False
ret['comment'] = 'Failed to run sync_all: {0}'.format(e)
return ret # depends on [control=['except'], data=['e']]
if not ret['changes']:
ret['comment'] = 'No updates to sync' # depends on [control=['if'], data=[]]
return ret |
def prep_db_parallel(samples, parallel_fn):
"""Prepares gemini databases in parallel, handling jointly called populations.
"""
batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls)
to_process = []
has_batches = False
for (name, caller), info in batch_groups.items():
fnames = [x[0] for x in info]
to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras])
has_batches = True
for name, caller, data, fname in singles:
to_process.append([[fname], (str(name), caller, False), [data], extras])
output = parallel_fn("prep_gemini_db", to_process)
out_fetch = {}
for batch_id, out_file in output:
out_fetch[tuple(batch_id)] = out_file
out = []
for batch_name, data in out_retrieve:
out_variants = []
for vrn in data["variants"]:
use_population = vrn.pop("population", True)
if use_population:
vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])]
out_variants.append(vrn)
data["variants"] = out_variants
out.append([data])
for x in extras:
out.append([x])
return out | def function[prep_db_parallel, parameter[samples, parallel_fn]]:
constant[Prepares gemini databases in parallel, handling jointly called populations.
]
<ast.Tuple object at 0x7da1b19841c0> assign[=] call[name[_group_by_batches], parameter[name[samples], name[_has_variant_calls]]]
variable[to_process] assign[=] list[[]]
variable[has_batches] assign[=] constant[False]
for taget[tuple[[<ast.Tuple object at 0x7da1b19857e0>, <ast.Name object at 0x7da1b19845e0>]]] in starred[call[name[batch_groups].items, parameter[]]] begin[:]
variable[fnames] assign[=] <ast.ListComp object at 0x7da1b1986710>
call[name[to_process].append, parameter[list[[<ast.Name object at 0x7da1b1985120>, <ast.Tuple object at 0x7da1b1986830>, <ast.ListComp object at 0x7da1b1986620>, <ast.Name object at 0x7da1b1984cd0>]]]]
variable[has_batches] assign[=] constant[True]
for taget[tuple[[<ast.Name object at 0x7da1b1987100>, <ast.Name object at 0x7da1b1987010>, <ast.Name object at 0x7da1b1984070>, <ast.Name object at 0x7da1b1985ab0>]]] in starred[name[singles]] begin[:]
call[name[to_process].append, parameter[list[[<ast.List object at 0x7da1b19b87c0>, <ast.Tuple object at 0x7da1b19baa70>, <ast.List object at 0x7da1b19bbb20>, <ast.Name object at 0x7da1b19babc0>]]]]
variable[output] assign[=] call[name[parallel_fn], parameter[constant[prep_gemini_db], name[to_process]]]
variable[out_fetch] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b19bbf10>, <ast.Name object at 0x7da1b19b8340>]]] in starred[name[output]] begin[:]
call[name[out_fetch]][call[name[tuple], parameter[name[batch_id]]]] assign[=] name[out_file]
variable[out] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b19b98d0>, <ast.Name object at 0x7da1b19bafb0>]]] in starred[name[out_retrieve]] begin[:]
variable[out_variants] assign[=] list[[]]
for taget[name[vrn]] in starred[call[name[data]][constant[variants]]] begin[:]
variable[use_population] assign[=] call[name[vrn].pop, parameter[constant[population], constant[True]]]
if name[use_population] begin[:]
call[name[vrn]][constant[population]] assign[=] call[name[out_fetch]][tuple[[<ast.Name object at 0x7da1b1896560>, <ast.Subscript object at 0x7da1b18945e0>]]]
call[name[out_variants].append, parameter[name[vrn]]]
call[name[data]][constant[variants]] assign[=] name[out_variants]
call[name[out].append, parameter[list[[<ast.Name object at 0x7da1b18338b0>]]]]
for taget[name[x]] in starred[name[extras]] begin[:]
call[name[out].append, parameter[list[[<ast.Name object at 0x7da1b1830fd0>]]]]
return[name[out]] | keyword[def] identifier[prep_db_parallel] ( identifier[samples] , identifier[parallel_fn] ):
literal[string]
identifier[batch_groups] , identifier[singles] , identifier[out_retrieve] , identifier[extras] = identifier[_group_by_batches] ( identifier[samples] , identifier[_has_variant_calls] )
identifier[to_process] =[]
identifier[has_batches] = keyword[False]
keyword[for] ( identifier[name] , identifier[caller] ), identifier[info] keyword[in] identifier[batch_groups] . identifier[items] ():
identifier[fnames] =[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[info] ]
identifier[to_process] . identifier[append] ([ identifier[fnames] ,( identifier[str] ( identifier[name] ), identifier[caller] , keyword[True] ),[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[info] ], identifier[extras] ])
identifier[has_batches] = keyword[True]
keyword[for] identifier[name] , identifier[caller] , identifier[data] , identifier[fname] keyword[in] identifier[singles] :
identifier[to_process] . identifier[append] ([[ identifier[fname] ],( identifier[str] ( identifier[name] ), identifier[caller] , keyword[False] ),[ identifier[data] ], identifier[extras] ])
identifier[output] = identifier[parallel_fn] ( literal[string] , identifier[to_process] )
identifier[out_fetch] ={}
keyword[for] identifier[batch_id] , identifier[out_file] keyword[in] identifier[output] :
identifier[out_fetch] [ identifier[tuple] ( identifier[batch_id] )]= identifier[out_file]
identifier[out] =[]
keyword[for] identifier[batch_name] , identifier[data] keyword[in] identifier[out_retrieve] :
identifier[out_variants] =[]
keyword[for] identifier[vrn] keyword[in] identifier[data] [ literal[string] ]:
identifier[use_population] = identifier[vrn] . identifier[pop] ( literal[string] , keyword[True] )
keyword[if] identifier[use_population] :
identifier[vrn] [ literal[string] ]= identifier[out_fetch] [( identifier[batch_name] , identifier[vrn] [ literal[string] ])]
identifier[out_variants] . identifier[append] ( identifier[vrn] )
identifier[data] [ literal[string] ]= identifier[out_variants]
identifier[out] . identifier[append] ([ identifier[data] ])
keyword[for] identifier[x] keyword[in] identifier[extras] :
identifier[out] . identifier[append] ([ identifier[x] ])
keyword[return] identifier[out] | def prep_db_parallel(samples, parallel_fn):
"""Prepares gemini databases in parallel, handling jointly called populations.
"""
(batch_groups, singles, out_retrieve, extras) = _group_by_batches(samples, _has_variant_calls)
to_process = []
has_batches = False
for ((name, caller), info) in batch_groups.items():
fnames = [x[0] for x in info]
to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras])
has_batches = True # depends on [control=['for'], data=[]]
for (name, caller, data, fname) in singles:
to_process.append([[fname], (str(name), caller, False), [data], extras]) # depends on [control=['for'], data=[]]
output = parallel_fn('prep_gemini_db', to_process)
out_fetch = {}
for (batch_id, out_file) in output:
out_fetch[tuple(batch_id)] = out_file # depends on [control=['for'], data=[]]
out = []
for (batch_name, data) in out_retrieve:
out_variants = []
for vrn in data['variants']:
use_population = vrn.pop('population', True)
if use_population:
vrn['population'] = out_fetch[batch_name, vrn['variantcaller']] # depends on [control=['if'], data=[]]
out_variants.append(vrn) # depends on [control=['for'], data=['vrn']]
data['variants'] = out_variants
out.append([data]) # depends on [control=['for'], data=[]]
for x in extras:
out.append([x]) # depends on [control=['for'], data=['x']]
return out |
def _GetLink(self):
"""Retrieves the link.
Returns:
str: full path of the linked file entry.
"""
if self._link is None:
self._link = ''
if self.entry_type != definitions.FILE_ENTRY_TYPE_LINK:
return self._link
cpio_archive_file = self._file_system.GetCPIOArchiveFile()
link_data = cpio_archive_file.ReadDataAtOffset(
self._cpio_archive_file_entry.data_offset,
self._cpio_archive_file_entry.data_size)
# TODO: should this be ASCII?
self._link = link_data.decode('ascii')
return self._link | def function[_GetLink, parameter[self]]:
constant[Retrieves the link.
Returns:
str: full path of the linked file entry.
]
if compare[name[self]._link is constant[None]] begin[:]
name[self]._link assign[=] constant[]
if compare[name[self].entry_type not_equal[!=] name[definitions].FILE_ENTRY_TYPE_LINK] begin[:]
return[name[self]._link]
variable[cpio_archive_file] assign[=] call[name[self]._file_system.GetCPIOArchiveFile, parameter[]]
variable[link_data] assign[=] call[name[cpio_archive_file].ReadDataAtOffset, parameter[name[self]._cpio_archive_file_entry.data_offset, name[self]._cpio_archive_file_entry.data_size]]
name[self]._link assign[=] call[name[link_data].decode, parameter[constant[ascii]]]
return[name[self]._link] | keyword[def] identifier[_GetLink] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_link] keyword[is] keyword[None] :
identifier[self] . identifier[_link] = literal[string]
keyword[if] identifier[self] . identifier[entry_type] != identifier[definitions] . identifier[FILE_ENTRY_TYPE_LINK] :
keyword[return] identifier[self] . identifier[_link]
identifier[cpio_archive_file] = identifier[self] . identifier[_file_system] . identifier[GetCPIOArchiveFile] ()
identifier[link_data] = identifier[cpio_archive_file] . identifier[ReadDataAtOffset] (
identifier[self] . identifier[_cpio_archive_file_entry] . identifier[data_offset] ,
identifier[self] . identifier[_cpio_archive_file_entry] . identifier[data_size] )
identifier[self] . identifier[_link] = identifier[link_data] . identifier[decode] ( literal[string] )
keyword[return] identifier[self] . identifier[_link] | def _GetLink(self):
"""Retrieves the link.
Returns:
str: full path of the linked file entry.
"""
if self._link is None:
self._link = ''
if self.entry_type != definitions.FILE_ENTRY_TYPE_LINK:
return self._link # depends on [control=['if'], data=[]]
cpio_archive_file = self._file_system.GetCPIOArchiveFile()
link_data = cpio_archive_file.ReadDataAtOffset(self._cpio_archive_file_entry.data_offset, self._cpio_archive_file_entry.data_size)
# TODO: should this be ASCII?
self._link = link_data.decode('ascii') # depends on [control=['if'], data=[]]
return self._link |
def common_twig(self):
"""
The twig that is common between all items in this ParameterSet.
This twig gives a single string which can point back to this ParameterSet
(but may include other entries as well)
see also :meth:`uniquetwig`
:return: twig (full) of this Parameter
"""
return "@".join([getattr(self, k) for k in _meta_fields_twig if self.meta.get(k) is not None]) | def function[common_twig, parameter[self]]:
constant[
The twig that is common between all items in this ParameterSet.
This twig gives a single string which can point back to this ParameterSet
(but may include other entries as well)
see also :meth:`uniquetwig`
:return: twig (full) of this Parameter
]
return[call[constant[@].join, parameter[<ast.ListComp object at 0x7da2054a6710>]]] | keyword[def] identifier[common_twig] ( identifier[self] ):
literal[string]
keyword[return] literal[string] . identifier[join] ([ identifier[getattr] ( identifier[self] , identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[_meta_fields_twig] keyword[if] identifier[self] . identifier[meta] . identifier[get] ( identifier[k] ) keyword[is] keyword[not] keyword[None] ]) | def common_twig(self):
"""
The twig that is common between all items in this ParameterSet.
This twig gives a single string which can point back to this ParameterSet
(but may include other entries as well)
see also :meth:`uniquetwig`
:return: twig (full) of this Parameter
"""
return '@'.join([getattr(self, k) for k in _meta_fields_twig if self.meta.get(k) is not None]) |
def cv(row, col_name, arg, current_data_model, df, con):
"""
row[col_name] must contain only values from the appropriate controlled vocabulary
"""
vocabulary = con.vocab.vocabularies
cell_value = str(row[col_name])
if not cell_value:
return None
elif cell_value == "None":
return None
cell_values = cell_value.split(":")
cell_values = [c.strip() for c in cell_values]
# get possible values for controlled vocabulary
# exclude weird unicode
possible_values = []
for val in vocabulary[col_name]:
try:
possible_values.append(str(val).lower())
except UnicodeEncodeError as ex:
print(val, ex)
for value in cell_values:
if str(value).lower() == "nan":
continue
elif str(value).lower() in possible_values:
continue
elif value.lower() == "none":
continue
else:
try:
if str(float(value)) in possible_values:
continue
except:
pass
return '"{}" is not in controlled vocabulary for {}'.format(value, arg)
return None | def function[cv, parameter[row, col_name, arg, current_data_model, df, con]]:
constant[
row[col_name] must contain only values from the appropriate controlled vocabulary
]
variable[vocabulary] assign[=] name[con].vocab.vocabularies
variable[cell_value] assign[=] call[name[str], parameter[call[name[row]][name[col_name]]]]
if <ast.UnaryOp object at 0x7da1b042f460> begin[:]
return[constant[None]]
variable[cell_values] assign[=] call[name[cell_value].split, parameter[constant[:]]]
variable[cell_values] assign[=] <ast.ListComp object at 0x7da1b042fca0>
variable[possible_values] assign[=] list[[]]
for taget[name[val]] in starred[call[name[vocabulary]][name[col_name]]] begin[:]
<ast.Try object at 0x7da1b042ef80>
for taget[name[value]] in starred[name[cell_values]] begin[:]
if compare[call[call[name[str], parameter[name[value]]].lower, parameter[]] equal[==] constant[nan]] begin[:]
continue
return[constant[None]] | keyword[def] identifier[cv] ( identifier[row] , identifier[col_name] , identifier[arg] , identifier[current_data_model] , identifier[df] , identifier[con] ):
literal[string]
identifier[vocabulary] = identifier[con] . identifier[vocab] . identifier[vocabularies]
identifier[cell_value] = identifier[str] ( identifier[row] [ identifier[col_name] ])
keyword[if] keyword[not] identifier[cell_value] :
keyword[return] keyword[None]
keyword[elif] identifier[cell_value] == literal[string] :
keyword[return] keyword[None]
identifier[cell_values] = identifier[cell_value] . identifier[split] ( literal[string] )
identifier[cell_values] =[ identifier[c] . identifier[strip] () keyword[for] identifier[c] keyword[in] identifier[cell_values] ]
identifier[possible_values] =[]
keyword[for] identifier[val] keyword[in] identifier[vocabulary] [ identifier[col_name] ]:
keyword[try] :
identifier[possible_values] . identifier[append] ( identifier[str] ( identifier[val] ). identifier[lower] ())
keyword[except] identifier[UnicodeEncodeError] keyword[as] identifier[ex] :
identifier[print] ( identifier[val] , identifier[ex] )
keyword[for] identifier[value] keyword[in] identifier[cell_values] :
keyword[if] identifier[str] ( identifier[value] ). identifier[lower] ()== literal[string] :
keyword[continue]
keyword[elif] identifier[str] ( identifier[value] ). identifier[lower] () keyword[in] identifier[possible_values] :
keyword[continue]
keyword[elif] identifier[value] . identifier[lower] ()== literal[string] :
keyword[continue]
keyword[else] :
keyword[try] :
keyword[if] identifier[str] ( identifier[float] ( identifier[value] )) keyword[in] identifier[possible_values] :
keyword[continue]
keyword[except] :
keyword[pass]
keyword[return] literal[string] . identifier[format] ( identifier[value] , identifier[arg] )
keyword[return] keyword[None] | def cv(row, col_name, arg, current_data_model, df, con):
"""
row[col_name] must contain only values from the appropriate controlled vocabulary
"""
vocabulary = con.vocab.vocabularies
cell_value = str(row[col_name])
if not cell_value:
return None # depends on [control=['if'], data=[]]
elif cell_value == 'None':
return None # depends on [control=['if'], data=[]]
cell_values = cell_value.split(':')
cell_values = [c.strip() for c in cell_values]
# get possible values for controlled vocabulary
# exclude weird unicode
possible_values = []
for val in vocabulary[col_name]:
try:
possible_values.append(str(val).lower()) # depends on [control=['try'], data=[]]
except UnicodeEncodeError as ex:
print(val, ex) # depends on [control=['except'], data=['ex']] # depends on [control=['for'], data=['val']]
for value in cell_values:
if str(value).lower() == 'nan':
continue # depends on [control=['if'], data=[]]
elif str(value).lower() in possible_values:
continue # depends on [control=['if'], data=[]]
elif value.lower() == 'none':
continue # depends on [control=['if'], data=[]]
else:
try:
if str(float(value)) in possible_values:
continue # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
return '"{}" is not in controlled vocabulary for {}'.format(value, arg) # depends on [control=['for'], data=['value']]
return None |
def _set_portfast(self, v, load=False):
"""
Setter method for portfast, mapped from YANG variable /interface/port_channel/spanning_tree/portfast (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_portfast is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_portfast() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=portfast.portfast, is_container='container', presence=False, yang_name="portfast", rest_name="portfast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable an interface to move directly to forwarding on link up', u'display-when': u' ((/protocol/spanning-tree/stp) or (/protocol/spanning-tree/pvst))'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """portfast must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=portfast.portfast, is_container='container', presence=False, yang_name="portfast", rest_name="portfast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable an interface to move directly to forwarding on link up', u'display-when': u' ((/protocol/spanning-tree/stp) or (/protocol/spanning-tree/pvst))'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__portfast = t
if hasattr(self, '_set'):
self._set() | def function[_set_portfast, parameter[self, v, load]]:
constant[
Setter method for portfast, mapped from YANG variable /interface/port_channel/spanning_tree/portfast (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_portfast is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_portfast() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18bccb460>
name[self].__portfast assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_portfast] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[portfast] . identifier[portfast] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__portfast] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_portfast(self, v, load=False):
"""
Setter method for portfast, mapped from YANG variable /interface/port_channel/spanning_tree/portfast (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_portfast is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_portfast() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=portfast.portfast, is_container='container', presence=False, yang_name='portfast', rest_name='portfast', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable an interface to move directly to forwarding on link up', u'display-when': u' ((/protocol/spanning-tree/stp) or (/protocol/spanning-tree/pvst))'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'portfast must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=portfast.portfast, is_container=\'container\', presence=False, yang_name="portfast", rest_name="portfast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Enable an interface to move directly to forwarding on link up\', u\'display-when\': u\' ((/protocol/spanning-tree/stp) or (/protocol/spanning-tree/pvst))\'}}, namespace=\'urn:brocade.com:mgmt:brocade-xstp\', defining_module=\'brocade-xstp\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__portfast = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def _get_variant_region(self):
"""Categorize variant by location in transcript (5'utr, exon, intron, 3'utr)
:return "exon", "intron", "five_utr", "three_utr", "whole_gene"
:rtype str
"""
if self._var_c.posedit.pos.start.datum == Datum.CDS_END and self._var_c.posedit.pos.end.datum == Datum.CDS_END:
result = self.T_UTR
elif self._var_c.posedit.pos.start.base < 0 and self._var_c.posedit.pos.end.base < 0:
result = self.F_UTR
elif self._var_c.posedit.pos.start.base < 0 and self._var_c.posedit.pos.end.datum == Datum.CDS_END:
result = self.WHOLE_GENE
elif self._var_c.posedit.pos.start.offset != 0 or self._var_c.posedit.pos.end.offset != 0:
# leave out anything intronic for now
result = self.INTRON
else: # anything else that contains an exon
result = self.EXON
return result | def function[_get_variant_region, parameter[self]]:
constant[Categorize variant by location in transcript (5'utr, exon, intron, 3'utr)
:return "exon", "intron", "five_utr", "three_utr", "whole_gene"
:rtype str
]
if <ast.BoolOp object at 0x7da20c6a8e20> begin[:]
variable[result] assign[=] name[self].T_UTR
return[name[result]] | keyword[def] identifier[_get_variant_region] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_var_c] . identifier[posedit] . identifier[pos] . identifier[start] . identifier[datum] == identifier[Datum] . identifier[CDS_END] keyword[and] identifier[self] . identifier[_var_c] . identifier[posedit] . identifier[pos] . identifier[end] . identifier[datum] == identifier[Datum] . identifier[CDS_END] :
identifier[result] = identifier[self] . identifier[T_UTR]
keyword[elif] identifier[self] . identifier[_var_c] . identifier[posedit] . identifier[pos] . identifier[start] . identifier[base] < literal[int] keyword[and] identifier[self] . identifier[_var_c] . identifier[posedit] . identifier[pos] . identifier[end] . identifier[base] < literal[int] :
identifier[result] = identifier[self] . identifier[F_UTR]
keyword[elif] identifier[self] . identifier[_var_c] . identifier[posedit] . identifier[pos] . identifier[start] . identifier[base] < literal[int] keyword[and] identifier[self] . identifier[_var_c] . identifier[posedit] . identifier[pos] . identifier[end] . identifier[datum] == identifier[Datum] . identifier[CDS_END] :
identifier[result] = identifier[self] . identifier[WHOLE_GENE]
keyword[elif] identifier[self] . identifier[_var_c] . identifier[posedit] . identifier[pos] . identifier[start] . identifier[offset] != literal[int] keyword[or] identifier[self] . identifier[_var_c] . identifier[posedit] . identifier[pos] . identifier[end] . identifier[offset] != literal[int] :
identifier[result] = identifier[self] . identifier[INTRON]
keyword[else] :
identifier[result] = identifier[self] . identifier[EXON]
keyword[return] identifier[result] | def _get_variant_region(self):
"""Categorize variant by location in transcript (5'utr, exon, intron, 3'utr)
:return "exon", "intron", "five_utr", "three_utr", "whole_gene"
:rtype str
"""
if self._var_c.posedit.pos.start.datum == Datum.CDS_END and self._var_c.posedit.pos.end.datum == Datum.CDS_END:
result = self.T_UTR # depends on [control=['if'], data=[]]
elif self._var_c.posedit.pos.start.base < 0 and self._var_c.posedit.pos.end.base < 0:
result = self.F_UTR # depends on [control=['if'], data=[]]
elif self._var_c.posedit.pos.start.base < 0 and self._var_c.posedit.pos.end.datum == Datum.CDS_END:
result = self.WHOLE_GENE # depends on [control=['if'], data=[]]
elif self._var_c.posedit.pos.start.offset != 0 or self._var_c.posedit.pos.end.offset != 0:
# leave out anything intronic for now
result = self.INTRON # depends on [control=['if'], data=[]]
else: # anything else that contains an exon
result = self.EXON
return result |
def sync_remote_to_local(force="no"):
"""
Replace your remote db with your local
Example:
sync_remote_to_local:force=yes
"""
assert "local_wp_dir" in env, "Missing local_wp_dir in env"
if force != "yes":
message = "This will replace your local database with your "\
"remote, are you sure [y/n]"
answer = prompt(message, "y")
if answer != "y":
logger.info("Sync stopped")
return
init_tasks() # Bootstrap fabrik
remote_file = "sync_%s.sql" % int(time.time()*1000)
remote_path = "/tmp/%s" % remote_file
with env.cd(paths.get_current_path()):
env.run("wp db export %s" % remote_path)
local_wp_dir = env.local_wp_dir
local_path = "/tmp/%s" % remote_file
# Download sync file
get(remote_path, local_path)
with lcd(local_wp_dir):
elocal("wp db import %s" % local_path)
# Cleanup
env.run("rm %s" % remote_path)
elocal("rm %s" % local_path) | def function[sync_remote_to_local, parameter[force]]:
constant[
Replace your remote db with your local
Example:
sync_remote_to_local:force=yes
]
assert[compare[constant[local_wp_dir] in name[env]]]
if compare[name[force] not_equal[!=] constant[yes]] begin[:]
variable[message] assign[=] constant[This will replace your local database with your remote, are you sure [y/n]]
variable[answer] assign[=] call[name[prompt], parameter[name[message], constant[y]]]
if compare[name[answer] not_equal[!=] constant[y]] begin[:]
call[name[logger].info, parameter[constant[Sync stopped]]]
return[None]
call[name[init_tasks], parameter[]]
variable[remote_file] assign[=] binary_operation[constant[sync_%s.sql] <ast.Mod object at 0x7da2590d6920> call[name[int], parameter[binary_operation[call[name[time].time, parameter[]] * constant[1000]]]]]
variable[remote_path] assign[=] binary_operation[constant[/tmp/%s] <ast.Mod object at 0x7da2590d6920> name[remote_file]]
with call[name[env].cd, parameter[call[name[paths].get_current_path, parameter[]]]] begin[:]
call[name[env].run, parameter[binary_operation[constant[wp db export %s] <ast.Mod object at 0x7da2590d6920> name[remote_path]]]]
variable[local_wp_dir] assign[=] name[env].local_wp_dir
variable[local_path] assign[=] binary_operation[constant[/tmp/%s] <ast.Mod object at 0x7da2590d6920> name[remote_file]]
call[name[get], parameter[name[remote_path], name[local_path]]]
with call[name[lcd], parameter[name[local_wp_dir]]] begin[:]
call[name[elocal], parameter[binary_operation[constant[wp db import %s] <ast.Mod object at 0x7da2590d6920> name[local_path]]]]
call[name[env].run, parameter[binary_operation[constant[rm %s] <ast.Mod object at 0x7da2590d6920> name[remote_path]]]]
call[name[elocal], parameter[binary_operation[constant[rm %s] <ast.Mod object at 0x7da2590d6920> name[local_path]]]] | keyword[def] identifier[sync_remote_to_local] ( identifier[force] = literal[string] ):
literal[string]
keyword[assert] literal[string] keyword[in] identifier[env] , literal[string]
keyword[if] identifier[force] != literal[string] :
identifier[message] = literal[string] literal[string]
identifier[answer] = identifier[prompt] ( identifier[message] , literal[string] )
keyword[if] identifier[answer] != literal[string] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[return]
identifier[init_tasks] ()
identifier[remote_file] = literal[string] % identifier[int] ( identifier[time] . identifier[time] ()* literal[int] )
identifier[remote_path] = literal[string] % identifier[remote_file]
keyword[with] identifier[env] . identifier[cd] ( identifier[paths] . identifier[get_current_path] ()):
identifier[env] . identifier[run] ( literal[string] % identifier[remote_path] )
identifier[local_wp_dir] = identifier[env] . identifier[local_wp_dir]
identifier[local_path] = literal[string] % identifier[remote_file]
identifier[get] ( identifier[remote_path] , identifier[local_path] )
keyword[with] identifier[lcd] ( identifier[local_wp_dir] ):
identifier[elocal] ( literal[string] % identifier[local_path] )
identifier[env] . identifier[run] ( literal[string] % identifier[remote_path] )
identifier[elocal] ( literal[string] % identifier[local_path] ) | def sync_remote_to_local(force='no'):
"""
Replace your remote db with your local
Example:
sync_remote_to_local:force=yes
"""
assert 'local_wp_dir' in env, 'Missing local_wp_dir in env'
if force != 'yes':
message = 'This will replace your local database with your remote, are you sure [y/n]'
answer = prompt(message, 'y')
if answer != 'y':
logger.info('Sync stopped')
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
init_tasks() # Bootstrap fabrik
remote_file = 'sync_%s.sql' % int(time.time() * 1000)
remote_path = '/tmp/%s' % remote_file
with env.cd(paths.get_current_path()):
env.run('wp db export %s' % remote_path) # depends on [control=['with'], data=[]]
local_wp_dir = env.local_wp_dir
local_path = '/tmp/%s' % remote_file
# Download sync file
get(remote_path, local_path)
with lcd(local_wp_dir):
elocal('wp db import %s' % local_path) # depends on [control=['with'], data=[]]
# Cleanup
env.run('rm %s' % remote_path)
elocal('rm %s' % local_path) |
def create_geoms(self, plot):
"""
This guide is not geom based
Return self if colorbar will be drawn and None if not.
"""
for l in plot.layers:
exclude = set()
if isinstance(l.show_legend, dict):
l.show_legend = rename_aesthetics(l.show_legend)
exclude = {ae for ae, val in l.show_legend.items()
if not val}
elif l.show_legend not in (None, True):
continue
matched = self.legend_aesthetics(l, plot)
# layer uses guide
if set(matched) - exclude:
break
# no break, no layer uses this guide
else:
return None
return self | def function[create_geoms, parameter[self, plot]]:
constant[
This guide is not geom based
Return self if colorbar will be drawn and None if not.
]
for taget[name[l]] in starred[name[plot].layers] begin[:]
variable[exclude] assign[=] call[name[set], parameter[]]
if call[name[isinstance], parameter[name[l].show_legend, name[dict]]] begin[:]
name[l].show_legend assign[=] call[name[rename_aesthetics], parameter[name[l].show_legend]]
variable[exclude] assign[=] <ast.SetComp object at 0x7da18ede5c00>
variable[matched] assign[=] call[name[self].legend_aesthetics, parameter[name[l], name[plot]]]
if binary_operation[call[name[set], parameter[name[matched]]] - name[exclude]] begin[:]
break
return[name[self]] | keyword[def] identifier[create_geoms] ( identifier[self] , identifier[plot] ):
literal[string]
keyword[for] identifier[l] keyword[in] identifier[plot] . identifier[layers] :
identifier[exclude] = identifier[set] ()
keyword[if] identifier[isinstance] ( identifier[l] . identifier[show_legend] , identifier[dict] ):
identifier[l] . identifier[show_legend] = identifier[rename_aesthetics] ( identifier[l] . identifier[show_legend] )
identifier[exclude] ={ identifier[ae] keyword[for] identifier[ae] , identifier[val] keyword[in] identifier[l] . identifier[show_legend] . identifier[items] ()
keyword[if] keyword[not] identifier[val] }
keyword[elif] identifier[l] . identifier[show_legend] keyword[not] keyword[in] ( keyword[None] , keyword[True] ):
keyword[continue]
identifier[matched] = identifier[self] . identifier[legend_aesthetics] ( identifier[l] , identifier[plot] )
keyword[if] identifier[set] ( identifier[matched] )- identifier[exclude] :
keyword[break]
keyword[else] :
keyword[return] keyword[None]
keyword[return] identifier[self] | def create_geoms(self, plot):
"""
This guide is not geom based
Return self if colorbar will be drawn and None if not.
"""
for l in plot.layers:
exclude = set()
if isinstance(l.show_legend, dict):
l.show_legend = rename_aesthetics(l.show_legend)
exclude = {ae for (ae, val) in l.show_legend.items() if not val} # depends on [control=['if'], data=[]]
elif l.show_legend not in (None, True):
continue # depends on [control=['if'], data=[]]
matched = self.legend_aesthetics(l, plot)
# layer uses guide
if set(matched) - exclude:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['l']]
else:
# no break, no layer uses this guide
return None
return self |
def setup(config):
"""
Setup the environment for an example run.
"""
formatter = config.Formatter()
if config.verbose:
formatter = result.Verbose(formatter)
if config.color:
formatter = result.Colored(formatter)
current_result = result.ExampleResult(formatter)
ivoire.current_result = ivoire._manager.result = current_result | def function[setup, parameter[config]]:
constant[
Setup the environment for an example run.
]
variable[formatter] assign[=] call[name[config].Formatter, parameter[]]
if name[config].verbose begin[:]
variable[formatter] assign[=] call[name[result].Verbose, parameter[name[formatter]]]
if name[config].color begin[:]
variable[formatter] assign[=] call[name[result].Colored, parameter[name[formatter]]]
variable[current_result] assign[=] call[name[result].ExampleResult, parameter[name[formatter]]]
name[ivoire].current_result assign[=] name[current_result] | keyword[def] identifier[setup] ( identifier[config] ):
literal[string]
identifier[formatter] = identifier[config] . identifier[Formatter] ()
keyword[if] identifier[config] . identifier[verbose] :
identifier[formatter] = identifier[result] . identifier[Verbose] ( identifier[formatter] )
keyword[if] identifier[config] . identifier[color] :
identifier[formatter] = identifier[result] . identifier[Colored] ( identifier[formatter] )
identifier[current_result] = identifier[result] . identifier[ExampleResult] ( identifier[formatter] )
identifier[ivoire] . identifier[current_result] = identifier[ivoire] . identifier[_manager] . identifier[result] = identifier[current_result] | def setup(config):
"""
Setup the environment for an example run.
"""
formatter = config.Formatter()
if config.verbose:
formatter = result.Verbose(formatter) # depends on [control=['if'], data=[]]
if config.color:
formatter = result.Colored(formatter) # depends on [control=['if'], data=[]]
current_result = result.ExampleResult(formatter)
ivoire.current_result = ivoire._manager.result = current_result |
def remove_nan_normals(self):
"""Removes normal vectors with nan magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where(np.isfinite(np.linalg.norm(self._data, axis=0)))[0]
self._data = self._data[:, points_of_interest] | def function[remove_nan_normals, parameter[self]]:
constant[Removes normal vectors with nan magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place.
]
variable[points_of_interest] assign[=] call[call[name[np].where, parameter[call[name[np].isfinite, parameter[call[name[np].linalg.norm, parameter[name[self]._data]]]]]]][constant[0]]
name[self]._data assign[=] call[name[self]._data][tuple[[<ast.Slice object at 0x7da1b1241150>, <ast.Name object at 0x7da1b1242fb0>]]] | keyword[def] identifier[remove_nan_normals] ( identifier[self] ):
literal[string]
identifier[points_of_interest] = identifier[np] . identifier[where] ( identifier[np] . identifier[isfinite] ( identifier[np] . identifier[linalg] . identifier[norm] ( identifier[self] . identifier[_data] , identifier[axis] = literal[int] )))[ literal[int] ]
identifier[self] . identifier[_data] = identifier[self] . identifier[_data] [:, identifier[points_of_interest] ] | def remove_nan_normals(self):
"""Removes normal vectors with nan magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where(np.isfinite(np.linalg.norm(self._data, axis=0)))[0]
self._data = self._data[:, points_of_interest] |
def random_string(self, length, alphabet):
"""Return string of `length` elements chosen from `alphabet`."""
return ''.join(
self.choice(alphabet) for n in range(length)
) | def function[random_string, parameter[self, length, alphabet]]:
constant[Return string of `length` elements chosen from `alphabet`.]
return[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da1b01c3a00>]]] | keyword[def] identifier[random_string] ( identifier[self] , identifier[length] , identifier[alphabet] ):
literal[string]
keyword[return] literal[string] . identifier[join] (
identifier[self] . identifier[choice] ( identifier[alphabet] ) keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[length] )
) | def random_string(self, length, alphabet):
"""Return string of `length` elements chosen from `alphabet`."""
return ''.join((self.choice(alphabet) for n in range(length))) |
def received(self, data):
"""
API for the connection to forward
information to this subscription instance.
:param data: The JSON data which was received.
:type data: Message
"""
self.logger.debug('Data received: {}'.format(data))
message_type = None
if 'type' in data:
message_type = data['type']
if message_type == 'confirm_subscription':
self._subscribed()
elif message_type == 'reject_subscription':
self._rejected()
elif self.receive_callback is not None and 'message' in data:
self.receive_callback(data['message'])
else:
self.logger.warning('Message type unknown. ({})'.format(message_type)) | def function[received, parameter[self, data]]:
constant[
API for the connection to forward
information to this subscription instance.
:param data: The JSON data which was received.
:type data: Message
]
call[name[self].logger.debug, parameter[call[constant[Data received: {}].format, parameter[name[data]]]]]
variable[message_type] assign[=] constant[None]
if compare[constant[type] in name[data]] begin[:]
variable[message_type] assign[=] call[name[data]][constant[type]]
if compare[name[message_type] equal[==] constant[confirm_subscription]] begin[:]
call[name[self]._subscribed, parameter[]] | keyword[def] identifier[received] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[data] ))
identifier[message_type] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[message_type] = identifier[data] [ literal[string] ]
keyword[if] identifier[message_type] == literal[string] :
identifier[self] . identifier[_subscribed] ()
keyword[elif] identifier[message_type] == literal[string] :
identifier[self] . identifier[_rejected] ()
keyword[elif] identifier[self] . identifier[receive_callback] keyword[is] keyword[not] keyword[None] keyword[and] literal[string] keyword[in] identifier[data] :
identifier[self] . identifier[receive_callback] ( identifier[data] [ literal[string] ])
keyword[else] :
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[message_type] )) | def received(self, data):
"""
API for the connection to forward
information to this subscription instance.
:param data: The JSON data which was received.
:type data: Message
"""
self.logger.debug('Data received: {}'.format(data))
message_type = None
if 'type' in data:
message_type = data['type'] # depends on [control=['if'], data=['data']]
if message_type == 'confirm_subscription':
self._subscribed() # depends on [control=['if'], data=[]]
elif message_type == 'reject_subscription':
self._rejected() # depends on [control=['if'], data=[]]
elif self.receive_callback is not None and 'message' in data:
self.receive_callback(data['message']) # depends on [control=['if'], data=[]]
else:
self.logger.warning('Message type unknown. ({})'.format(message_type)) |
def render_table(request,
table,
links=None,
context=None,
template='tri_table/list.html',
blank_on_empty=False,
paginate_by=40, # pragma: no mutate
page=None,
paginator=None,
show_hits=False,
hit_label='Items',
post_bulk_edit=lambda table, queryset, updates: None):
"""
Render a table. This automatically handles pagination, sorting, filtering and bulk operations.
:param request: the request object. This is set on the table object so that it is available for lambda expressions.
:param table: an instance of Table
:param links: a list of instances of Link
:param context: dict of extra context parameters
:param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance.
:param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty
:param show_hits: Display how many items there are total in the paginator.
:param hit_label: Label for the show_hits display.
:return: a string with the rendered HTML table
"""
if not context:
context = {}
if isinstance(table, Namespace):
table = table()
assert isinstance(table, Table), table
table.request = request
should_return, dispatch_result = handle_dispatch(request=request, obj=table)
if should_return:
return dispatch_result
context['bulk_form'] = table.bulk_form
context['query_form'] = table.query_form
context['tri_query_error'] = table.query_error
if table.bulk_form and request.method == 'POST':
if table.bulk_form.is_valid():
queryset = table.bulk_queryset()
updates = {
field.name: field.value
for field in table.bulk_form.fields
if field.value is not None and field.value != '' and field.attr is not None
}
queryset.update(**updates)
post_bulk_edit(table=table, queryset=queryset, updates=updates)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
table.context = table_context(
request,
table=table,
links=links,
paginate_by=paginate_by,
page=page,
extra_context=context,
paginator=paginator,
show_hits=show_hits,
hit_label=hit_label,
)
if not table.data and blank_on_empty:
return ''
if table.query_form and not table.query_form.is_valid():
table.data = None
table.context['invalid_form_message'] = mark_safe('<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>')
return render_template(request, template, table.context) | def function[render_table, parameter[request, table, links, context, template, blank_on_empty, paginate_by, page, paginator, show_hits, hit_label, post_bulk_edit]]:
constant[
Render a table. This automatically handles pagination, sorting, filtering and bulk operations.
:param request: the request object. This is set on the table object so that it is available for lambda expressions.
:param table: an instance of Table
:param links: a list of instances of Link
:param context: dict of extra context parameters
:param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance.
:param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty
:param show_hits: Display how many items there are total in the paginator.
:param hit_label: Label for the show_hits display.
:return: a string with the rendered HTML table
]
if <ast.UnaryOp object at 0x7da18f58d9f0> begin[:]
variable[context] assign[=] dictionary[[], []]
if call[name[isinstance], parameter[name[table], name[Namespace]]] begin[:]
variable[table] assign[=] call[name[table], parameter[]]
assert[call[name[isinstance], parameter[name[table], name[Table]]]]
name[table].request assign[=] name[request]
<ast.Tuple object at 0x7da18f58f910> assign[=] call[name[handle_dispatch], parameter[]]
if name[should_return] begin[:]
return[name[dispatch_result]]
call[name[context]][constant[bulk_form]] assign[=] name[table].bulk_form
call[name[context]][constant[query_form]] assign[=] name[table].query_form
call[name[context]][constant[tri_query_error]] assign[=] name[table].query_error
if <ast.BoolOp object at 0x7da18f58c850> begin[:]
if call[name[table].bulk_form.is_valid, parameter[]] begin[:]
variable[queryset] assign[=] call[name[table].bulk_queryset, parameter[]]
variable[updates] assign[=] <ast.DictComp object at 0x7da18f58de10>
call[name[queryset].update, parameter[]]
call[name[post_bulk_edit], parameter[]]
return[call[name[HttpResponseRedirect], parameter[call[name[request].META][constant[HTTP_REFERER]]]]]
name[table].context assign[=] call[name[table_context], parameter[name[request]]]
if <ast.BoolOp object at 0x7da212db4ca0> begin[:]
return[constant[]]
if <ast.BoolOp object at 0x7da204566530> begin[:]
name[table].data assign[=] constant[None]
call[name[table].context][constant[invalid_form_message]] assign[=] call[name[mark_safe], parameter[constant[<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>]]]
return[call[name[render_template], parameter[name[request], name[template], name[table].context]]] | keyword[def] identifier[render_table] ( identifier[request] ,
identifier[table] ,
identifier[links] = keyword[None] ,
identifier[context] = keyword[None] ,
identifier[template] = literal[string] ,
identifier[blank_on_empty] = keyword[False] ,
identifier[paginate_by] = literal[int] ,
identifier[page] = keyword[None] ,
identifier[paginator] = keyword[None] ,
identifier[show_hits] = keyword[False] ,
identifier[hit_label] = literal[string] ,
identifier[post_bulk_edit] = keyword[lambda] identifier[table] , identifier[queryset] , identifier[updates] : keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[context] :
identifier[context] ={}
keyword[if] identifier[isinstance] ( identifier[table] , identifier[Namespace] ):
identifier[table] = identifier[table] ()
keyword[assert] identifier[isinstance] ( identifier[table] , identifier[Table] ), identifier[table]
identifier[table] . identifier[request] = identifier[request]
identifier[should_return] , identifier[dispatch_result] = identifier[handle_dispatch] ( identifier[request] = identifier[request] , identifier[obj] = identifier[table] )
keyword[if] identifier[should_return] :
keyword[return] identifier[dispatch_result]
identifier[context] [ literal[string] ]= identifier[table] . identifier[bulk_form]
identifier[context] [ literal[string] ]= identifier[table] . identifier[query_form]
identifier[context] [ literal[string] ]= identifier[table] . identifier[query_error]
keyword[if] identifier[table] . identifier[bulk_form] keyword[and] identifier[request] . identifier[method] == literal[string] :
keyword[if] identifier[table] . identifier[bulk_form] . identifier[is_valid] ():
identifier[queryset] = identifier[table] . identifier[bulk_queryset] ()
identifier[updates] ={
identifier[field] . identifier[name] : identifier[field] . identifier[value]
keyword[for] identifier[field] keyword[in] identifier[table] . identifier[bulk_form] . identifier[fields]
keyword[if] identifier[field] . identifier[value] keyword[is] keyword[not] keyword[None] keyword[and] identifier[field] . identifier[value] != literal[string] keyword[and] identifier[field] . identifier[attr] keyword[is] keyword[not] keyword[None]
}
identifier[queryset] . identifier[update] (** identifier[updates] )
identifier[post_bulk_edit] ( identifier[table] = identifier[table] , identifier[queryset] = identifier[queryset] , identifier[updates] = identifier[updates] )
keyword[return] identifier[HttpResponseRedirect] ( identifier[request] . identifier[META] [ literal[string] ])
identifier[table] . identifier[context] = identifier[table_context] (
identifier[request] ,
identifier[table] = identifier[table] ,
identifier[links] = identifier[links] ,
identifier[paginate_by] = identifier[paginate_by] ,
identifier[page] = identifier[page] ,
identifier[extra_context] = identifier[context] ,
identifier[paginator] = identifier[paginator] ,
identifier[show_hits] = identifier[show_hits] ,
identifier[hit_label] = identifier[hit_label] ,
)
keyword[if] keyword[not] identifier[table] . identifier[data] keyword[and] identifier[blank_on_empty] :
keyword[return] literal[string]
keyword[if] identifier[table] . identifier[query_form] keyword[and] keyword[not] identifier[table] . identifier[query_form] . identifier[is_valid] ():
identifier[table] . identifier[data] = keyword[None]
identifier[table] . identifier[context] [ literal[string] ]= identifier[mark_safe] ( literal[string] )
keyword[return] identifier[render_template] ( identifier[request] , identifier[template] , identifier[table] . identifier[context] ) | def render_table(request, table, links=None, context=None, template='tri_table/list.html', blank_on_empty=False, paginate_by=40, page=None, paginator=None, show_hits=False, hit_label='Items', post_bulk_edit=lambda table, queryset, updates: None): # pragma: no mutate
'\n Render a table. This automatically handles pagination, sorting, filtering and bulk operations.\n\n :param request: the request object. This is set on the table object so that it is available for lambda expressions.\n :param table: an instance of Table\n :param links: a list of instances of Link\n :param context: dict of extra context parameters\n :param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance.\n :param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty\n :param show_hits: Display how many items there are total in the paginator.\n :param hit_label: Label for the show_hits display.\n :return: a string with the rendered HTML table\n '
if not context:
context = {} # depends on [control=['if'], data=[]]
if isinstance(table, Namespace):
table = table() # depends on [control=['if'], data=[]]
assert isinstance(table, Table), table
table.request = request
(should_return, dispatch_result) = handle_dispatch(request=request, obj=table)
if should_return:
return dispatch_result # depends on [control=['if'], data=[]]
context['bulk_form'] = table.bulk_form
context['query_form'] = table.query_form
context['tri_query_error'] = table.query_error
if table.bulk_form and request.method == 'POST':
if table.bulk_form.is_valid():
queryset = table.bulk_queryset()
updates = {field.name: field.value for field in table.bulk_form.fields if field.value is not None and field.value != '' and (field.attr is not None)}
queryset.update(**updates)
post_bulk_edit(table=table, queryset=queryset, updates=updates)
return HttpResponseRedirect(request.META['HTTP_REFERER']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
table.context = table_context(request, table=table, links=links, paginate_by=paginate_by, page=page, extra_context=context, paginator=paginator, show_hits=show_hits, hit_label=hit_label)
if not table.data and blank_on_empty:
return '' # depends on [control=['if'], data=[]]
if table.query_form and (not table.query_form.is_valid()):
table.data = None
table.context['invalid_form_message'] = mark_safe('<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>') # depends on [control=['if'], data=[]]
return render_template(request, template, table.context) |
def discrepancy_plot(
data, name='discrepancy', report_p=True, format='png', suffix='-gof',
path='./', fontmap=None, verbose=1):
'''
Generate goodness-of-fit deviate scatter plot.
:Arguments:
data: list
List (or list of lists for vector-valued variables) of discrepancy values, output
from the `pymc.diagnostics.discrepancy` function .
name: string
The name of the plot.
report_p: bool
Flag for annotating the p-value to the plot.
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix (defaults to "-gof").
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
'''
if verbose > 0:
print_('Plotting', name + suffix)
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
# Generate new scatter plot
figure()
try:
x, y = transpose(data)
except ValueError:
x, y = data
scatter(x, y)
# Plot x=y line
lo = nmin(ravel(data))
hi = nmax(ravel(data))
datarange = hi - lo
lo -= 0.1 * datarange
hi += 0.1 * datarange
pyplot((lo, hi), (lo, hi))
# Plot options
xlabel('Observed deviates', fontsize='x-small')
ylabel('Simulated deviates', fontsize='x-small')
if report_p:
# Put p-value in legend
count = sum(s > o for o, s in zip(x, y))
text(lo + 0.1 * datarange, hi - 0.1 * datarange,
'p=%.3f' % (count / len(x)), horizontalalignment='center',
fontsize=10)
# Save to file
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
savefig("%s%s%s.%s" % (path, name, suffix, format)) | def function[discrepancy_plot, parameter[data, name, report_p, format, suffix, path, fontmap, verbose]]:
constant[
Generate goodness-of-fit deviate scatter plot.
:Arguments:
data: list
List (or list of lists for vector-valued variables) of discrepancy values, output
from the `pymc.diagnostics.discrepancy` function .
name: string
The name of the plot.
report_p: bool
Flag for annotating the p-value to the plot.
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix (defaults to "-gof").
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
]
if compare[name[verbose] greater[>] constant[0]] begin[:]
call[name[print_], parameter[constant[Plotting], binary_operation[name[name] + name[suffix]]]]
if compare[name[fontmap] is constant[None]] begin[:]
variable[fontmap] assign[=] dictionary[[<ast.Constant object at 0x7da20c7cb760>, <ast.Constant object at 0x7da20c7cbfd0>, <ast.Constant object at 0x7da20c7c8250>, <ast.Constant object at 0x7da20c7c8730>, <ast.Constant object at 0x7da20c7cb820>], [<ast.Constant object at 0x7da20c7cbd00>, <ast.Constant object at 0x7da20c7c8f40>, <ast.Constant object at 0x7da20c7ca6b0>, <ast.Constant object at 0x7da20c7c9060>, <ast.Constant object at 0x7da20c7ca1a0>]]
call[name[figure], parameter[]]
<ast.Try object at 0x7da20c7c9ff0>
call[name[scatter], parameter[name[x], name[y]]]
variable[lo] assign[=] call[name[nmin], parameter[call[name[ravel], parameter[name[data]]]]]
variable[hi] assign[=] call[name[nmax], parameter[call[name[ravel], parameter[name[data]]]]]
variable[datarange] assign[=] binary_operation[name[hi] - name[lo]]
<ast.AugAssign object at 0x7da20c7cae90>
<ast.AugAssign object at 0x7da20c7c8700>
call[name[pyplot], parameter[tuple[[<ast.Name object at 0x7da20c7c8580>, <ast.Name object at 0x7da20c7cb670>]], tuple[[<ast.Name object at 0x7da20c7c94e0>, <ast.Name object at 0x7da20c7ca140>]]]]
call[name[xlabel], parameter[constant[Observed deviates]]]
call[name[ylabel], parameter[constant[Simulated deviates]]]
if name[report_p] begin[:]
variable[count] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da20c7c87f0>]]
call[name[text], parameter[binary_operation[name[lo] + binary_operation[constant[0.1] * name[datarange]]], binary_operation[name[hi] - binary_operation[constant[0.1] * name[datarange]]], binary_operation[constant[p=%.3f] <ast.Mod object at 0x7da2590d6920> binary_operation[name[count] / call[name[len], parameter[name[x]]]]]]]
if <ast.UnaryOp object at 0x7da20c7cbf40> begin[:]
call[name[os].mkdir, parameter[name[path]]]
if <ast.UnaryOp object at 0x7da20c7c91e0> begin[:]
<ast.AugAssign object at 0x7da20c7c96c0>
call[name[savefig], parameter[binary_operation[constant[%s%s%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c7c96f0>, <ast.Name object at 0x7da20c7c9900>, <ast.Name object at 0x7da20c7c9c30>, <ast.Name object at 0x7da20c7c81c0>]]]]] | keyword[def] identifier[discrepancy_plot] (
identifier[data] , identifier[name] = literal[string] , identifier[report_p] = keyword[True] , identifier[format] = literal[string] , identifier[suffix] = literal[string] ,
identifier[path] = literal[string] , identifier[fontmap] = keyword[None] , identifier[verbose] = literal[int] ):
literal[string]
keyword[if] identifier[verbose] > literal[int] :
identifier[print_] ( literal[string] , identifier[name] + identifier[suffix] )
keyword[if] identifier[fontmap] keyword[is] keyword[None] :
identifier[fontmap] ={ literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] }
identifier[figure] ()
keyword[try] :
identifier[x] , identifier[y] = identifier[transpose] ( identifier[data] )
keyword[except] identifier[ValueError] :
identifier[x] , identifier[y] = identifier[data]
identifier[scatter] ( identifier[x] , identifier[y] )
identifier[lo] = identifier[nmin] ( identifier[ravel] ( identifier[data] ))
identifier[hi] = identifier[nmax] ( identifier[ravel] ( identifier[data] ))
identifier[datarange] = identifier[hi] - identifier[lo]
identifier[lo] -= literal[int] * identifier[datarange]
identifier[hi] += literal[int] * identifier[datarange]
identifier[pyplot] (( identifier[lo] , identifier[hi] ),( identifier[lo] , identifier[hi] ))
identifier[xlabel] ( literal[string] , identifier[fontsize] = literal[string] )
identifier[ylabel] ( literal[string] , identifier[fontsize] = literal[string] )
keyword[if] identifier[report_p] :
identifier[count] = identifier[sum] ( identifier[s] > identifier[o] keyword[for] identifier[o] , identifier[s] keyword[in] identifier[zip] ( identifier[x] , identifier[y] ))
identifier[text] ( identifier[lo] + literal[int] * identifier[datarange] , identifier[hi] - literal[int] * identifier[datarange] ,
literal[string] %( identifier[count] / identifier[len] ( identifier[x] )), identifier[horizontalalignment] = literal[string] ,
identifier[fontsize] = literal[int] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
identifier[os] . identifier[mkdir] ( identifier[path] )
keyword[if] keyword[not] identifier[path] . identifier[endswith] ( literal[string] ):
identifier[path] += literal[string]
identifier[savefig] ( literal[string] %( identifier[path] , identifier[name] , identifier[suffix] , identifier[format] )) | def discrepancy_plot(data, name='discrepancy', report_p=True, format='png', suffix='-gof', path='./', fontmap=None, verbose=1):
"""
Generate goodness-of-fit deviate scatter plot.
:Arguments:
data: list
List (or list of lists for vector-valued variables) of discrepancy values, output
from the `pymc.diagnostics.discrepancy` function .
name: string
The name of the plot.
report_p: bool
Flag for annotating the p-value to the plot.
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix (defaults to "-gof").
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
if verbose > 0:
print_('Plotting', name + suffix) # depends on [control=['if'], data=[]]
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4} # depends on [control=['if'], data=['fontmap']]
# Generate new scatter plot
figure()
try:
(x, y) = transpose(data) # depends on [control=['try'], data=[]]
except ValueError:
(x, y) = data # depends on [control=['except'], data=[]]
scatter(x, y)
# Plot x=y line
lo = nmin(ravel(data))
hi = nmax(ravel(data))
datarange = hi - lo
lo -= 0.1 * datarange
hi += 0.1 * datarange
pyplot((lo, hi), (lo, hi))
# Plot options
xlabel('Observed deviates', fontsize='x-small')
ylabel('Simulated deviates', fontsize='x-small')
if report_p:
# Put p-value in legend
count = sum((s > o for (o, s) in zip(x, y)))
text(lo + 0.1 * datarange, hi - 0.1 * datarange, 'p=%.3f' % (count / len(x)), horizontalalignment='center', fontsize=10) # depends on [control=['if'], data=[]]
# Save to file
if not os.path.exists(path):
os.mkdir(path) # depends on [control=['if'], data=[]]
if not path.endswith('/'):
path += '/' # depends on [control=['if'], data=[]]
savefig('%s%s%s.%s' % (path, name, suffix, format)) |
def reduce_formula(sym_amt, iupac_ordering=False):
"""
Helper method to reduce a sym_amt dict to a reduced formula and factor.
Args:
sym_amt (dict): {symbol: amount}.
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
(reduced_formula, factor).
"""
syms = sorted(sym_amt.keys(), key=lambda x: [get_el_sp(x).X, x])
syms = list(filter(
lambda x: abs(sym_amt[x]) > Composition.amount_tolerance, syms))
factor = 1
# Enforce integers for doing gcd.
if all((int(i) == i for i in sym_amt.values())):
factor = abs(gcd(*(int(i) for i in sym_amt.values())))
polyanion = []
# if the composition contains a poly anion
if len(syms) >= 3 and get_el_sp(syms[-1]).X - get_el_sp(syms[-2]).X < 1.65:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor
for i in [-2, -1]}
(poly_form, poly_factor) = reduce_formula(
poly_sym_amt, iupac_ordering=iupac_ordering)
if poly_factor != 1:
polyanion.append("({}){}".format(poly_form, int(poly_factor)))
syms = syms[:len(syms) - 2 if polyanion else len(syms)]
if iupac_ordering:
syms = sorted(syms,
key=lambda x: [get_el_sp(x).iupac_ordering, x])
reduced_form = []
for s in syms:
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt))
reduced_form = "".join(reduced_form + polyanion)
return reduced_form, factor | def function[reduce_formula, parameter[sym_amt, iupac_ordering]]:
constant[
Helper method to reduce a sym_amt dict to a reduced formula and factor.
Args:
sym_amt (dict): {symbol: amount}.
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
(reduced_formula, factor).
]
variable[syms] assign[=] call[name[sorted], parameter[call[name[sym_amt].keys, parameter[]]]]
variable[syms] assign[=] call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da20c6abbb0>, name[syms]]]]]
variable[factor] assign[=] constant[1]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da20c6a9ab0>]] begin[:]
variable[factor] assign[=] call[name[abs], parameter[call[name[gcd], parameter[<ast.Starred object at 0x7da20c6abac0>]]]]
variable[polyanion] assign[=] list[[]]
if <ast.BoolOp object at 0x7da20c6a9ba0> begin[:]
variable[poly_sym_amt] assign[=] <ast.DictComp object at 0x7da2047eb370>
<ast.Tuple object at 0x7da20c6a8d00> assign[=] call[name[reduce_formula], parameter[name[poly_sym_amt]]]
if compare[name[poly_factor] not_equal[!=] constant[1]] begin[:]
call[name[polyanion].append, parameter[call[constant[({}){}].format, parameter[name[poly_form], call[name[int], parameter[name[poly_factor]]]]]]]
variable[syms] assign[=] call[name[syms]][<ast.Slice object at 0x7da20c6a8850>]
if name[iupac_ordering] begin[:]
variable[syms] assign[=] call[name[sorted], parameter[name[syms]]]
variable[reduced_form] assign[=] list[[]]
for taget[name[s]] in starred[name[syms]] begin[:]
variable[normamt] assign[=] binary_operation[binary_operation[call[name[sym_amt]][name[s]] * constant[1.0]] / name[factor]]
call[name[reduced_form].append, parameter[name[s]]]
call[name[reduced_form].append, parameter[call[name[formula_double_format], parameter[name[normamt]]]]]
variable[reduced_form] assign[=] call[constant[].join, parameter[binary_operation[name[reduced_form] + name[polyanion]]]]
return[tuple[[<ast.Name object at 0x7da20c6aa200>, <ast.Name object at 0x7da20c6aa110>]]] | keyword[def] identifier[reduce_formula] ( identifier[sym_amt] , identifier[iupac_ordering] = keyword[False] ):
literal[string]
identifier[syms] = identifier[sorted] ( identifier[sym_amt] . identifier[keys] (), identifier[key] = keyword[lambda] identifier[x] :[ identifier[get_el_sp] ( identifier[x] ). identifier[X] , identifier[x] ])
identifier[syms] = identifier[list] ( identifier[filter] (
keyword[lambda] identifier[x] : identifier[abs] ( identifier[sym_amt] [ identifier[x] ])> identifier[Composition] . identifier[amount_tolerance] , identifier[syms] ))
identifier[factor] = literal[int]
keyword[if] identifier[all] (( identifier[int] ( identifier[i] )== identifier[i] keyword[for] identifier[i] keyword[in] identifier[sym_amt] . identifier[values] ())):
identifier[factor] = identifier[abs] ( identifier[gcd] (*( identifier[int] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[sym_amt] . identifier[values] ())))
identifier[polyanion] =[]
keyword[if] identifier[len] ( identifier[syms] )>= literal[int] keyword[and] identifier[get_el_sp] ( identifier[syms] [- literal[int] ]). identifier[X] - identifier[get_el_sp] ( identifier[syms] [- literal[int] ]). identifier[X] < literal[int] :
identifier[poly_sym_amt] ={ identifier[syms] [ identifier[i] ]: identifier[sym_amt] [ identifier[syms] [ identifier[i] ]]/ identifier[factor]
keyword[for] identifier[i] keyword[in] [- literal[int] ,- literal[int] ]}
( identifier[poly_form] , identifier[poly_factor] )= identifier[reduce_formula] (
identifier[poly_sym_amt] , identifier[iupac_ordering] = identifier[iupac_ordering] )
keyword[if] identifier[poly_factor] != literal[int] :
identifier[polyanion] . identifier[append] ( literal[string] . identifier[format] ( identifier[poly_form] , identifier[int] ( identifier[poly_factor] )))
identifier[syms] = identifier[syms] [: identifier[len] ( identifier[syms] )- literal[int] keyword[if] identifier[polyanion] keyword[else] identifier[len] ( identifier[syms] )]
keyword[if] identifier[iupac_ordering] :
identifier[syms] = identifier[sorted] ( identifier[syms] ,
identifier[key] = keyword[lambda] identifier[x] :[ identifier[get_el_sp] ( identifier[x] ). identifier[iupac_ordering] , identifier[x] ])
identifier[reduced_form] =[]
keyword[for] identifier[s] keyword[in] identifier[syms] :
identifier[normamt] = identifier[sym_amt] [ identifier[s] ]* literal[int] / identifier[factor]
identifier[reduced_form] . identifier[append] ( identifier[s] )
identifier[reduced_form] . identifier[append] ( identifier[formula_double_format] ( identifier[normamt] ))
identifier[reduced_form] = literal[string] . identifier[join] ( identifier[reduced_form] + identifier[polyanion] )
keyword[return] identifier[reduced_form] , identifier[factor] | def reduce_formula(sym_amt, iupac_ordering=False):
"""
Helper method to reduce a sym_amt dict to a reduced formula and factor.
Args:
sym_amt (dict): {symbol: amount}.
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
(reduced_formula, factor).
"""
syms = sorted(sym_amt.keys(), key=lambda x: [get_el_sp(x).X, x])
syms = list(filter(lambda x: abs(sym_amt[x]) > Composition.amount_tolerance, syms))
factor = 1
# Enforce integers for doing gcd.
if all((int(i) == i for i in sym_amt.values())):
factor = abs(gcd(*(int(i) for i in sym_amt.values()))) # depends on [control=['if'], data=[]]
polyanion = []
# if the composition contains a poly anion
if len(syms) >= 3 and get_el_sp(syms[-1]).X - get_el_sp(syms[-2]).X < 1.65:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor for i in [-2, -1]}
(poly_form, poly_factor) = reduce_formula(poly_sym_amt, iupac_ordering=iupac_ordering)
if poly_factor != 1:
polyanion.append('({}){}'.format(poly_form, int(poly_factor))) # depends on [control=['if'], data=['poly_factor']] # depends on [control=['if'], data=[]]
syms = syms[:len(syms) - 2 if polyanion else len(syms)]
if iupac_ordering:
syms = sorted(syms, key=lambda x: [get_el_sp(x).iupac_ordering, x]) # depends on [control=['if'], data=[]]
reduced_form = []
for s in syms:
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt)) # depends on [control=['for'], data=['s']]
reduced_form = ''.join(reduced_form + polyanion)
return (reduced_form, factor) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.