code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def on_exit(self):
"""
Cleanup after the command finishes.
Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs.
"""
if self.zip_path:
# Only try to remove uploaded zip if we're running a command that has loaded credentials
if self.load_credentials:
self.remove_uploaded_zip()
self.remove_local_zip()
|
def function[on_exit, parameter[self]]:
constant[
Cleanup after the command finishes.
Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs.
]
if name[self].zip_path begin[:]
if name[self].load_credentials begin[:]
call[name[self].remove_uploaded_zip, parameter[]]
call[name[self].remove_local_zip, parameter[]]
|
keyword[def] identifier[on_exit] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[zip_path] :
keyword[if] identifier[self] . identifier[load_credentials] :
identifier[self] . identifier[remove_uploaded_zip] ()
identifier[self] . identifier[remove_local_zip] ()
|
def on_exit(self):
"""
Cleanup after the command finishes.
Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs.
"""
if self.zip_path:
# Only try to remove uploaded zip if we're running a command that has loaded credentials
if self.load_credentials:
self.remove_uploaded_zip() # depends on [control=['if'], data=[]]
self.remove_local_zip() # depends on [control=['if'], data=[]]
|
def best_hits(self):
"""
returns a dict with query => best mapped position
"""
self.quality_sort()
best_hits = dict((query, next(blines)) for (query, blines) in \
groupby(self, lambda x: x.query))
self.ref_sort()
return best_hits
|
def function[best_hits, parameter[self]]:
constant[
returns a dict with query => best mapped position
]
call[name[self].quality_sort, parameter[]]
variable[best_hits] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18f00fb50>]]
call[name[self].ref_sort, parameter[]]
return[name[best_hits]]
|
keyword[def] identifier[best_hits] ( identifier[self] ):
literal[string]
identifier[self] . identifier[quality_sort] ()
identifier[best_hits] = identifier[dict] (( identifier[query] , identifier[next] ( identifier[blines] )) keyword[for] ( identifier[query] , identifier[blines] ) keyword[in] identifier[groupby] ( identifier[self] , keyword[lambda] identifier[x] : identifier[x] . identifier[query] ))
identifier[self] . identifier[ref_sort] ()
keyword[return] identifier[best_hits]
|
def best_hits(self):
"""
returns a dict with query => best mapped position
"""
self.quality_sort()
best_hits = dict(((query, next(blines)) for (query, blines) in groupby(self, lambda x: x.query)))
self.ref_sort()
return best_hits
|
def findNextSection(self, scope, name):
""" Starts with given par (scope+name) and looks further down the list
of parameters until one of a different non-null scope is found. Upon
success, returns the (scope, name) tuple, otherwise (None, None). """
# first find index of starting point
plist = self._taskParsObj.getParList()
start = 0
for i in range(len(plist)):
if scope == plist[i].scope and name == plist[i].name:
start = i
break
else:
print('WARNING: could not find starting par: '+scope+'.'+name)
return (None, None)
# now find first different (non-null) scope in a par, after start
for i in range(start, len(plist)):
if len(plist[i].scope) > 0 and plist[i].scope != scope:
return (plist[i].scope, plist[i].name)
# else didn't find it
return (None, None)
|
def function[findNextSection, parameter[self, scope, name]]:
constant[ Starts with given par (scope+name) and looks further down the list
of parameters until one of a different non-null scope is found. Upon
success, returns the (scope, name) tuple, otherwise (None, None). ]
variable[plist] assign[=] call[name[self]._taskParsObj.getParList, parameter[]]
variable[start] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[plist]]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b0ef4190> begin[:]
variable[start] assign[=] name[i]
break
for taget[name[i]] in starred[call[name[range], parameter[name[start], call[name[len], parameter[name[plist]]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b0ef4400> begin[:]
return[tuple[[<ast.Attribute object at 0x7da1b0ef6a40>, <ast.Attribute object at 0x7da1b0ef7a60>]]]
return[tuple[[<ast.Constant object at 0x7da1b0ef4a00>, <ast.Constant object at 0x7da1b0ef7dc0>]]]
|
keyword[def] identifier[findNextSection] ( identifier[self] , identifier[scope] , identifier[name] ):
literal[string]
identifier[plist] = identifier[self] . identifier[_taskParsObj] . identifier[getParList] ()
identifier[start] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[plist] )):
keyword[if] identifier[scope] == identifier[plist] [ identifier[i] ]. identifier[scope] keyword[and] identifier[name] == identifier[plist] [ identifier[i] ]. identifier[name] :
identifier[start] = identifier[i]
keyword[break]
keyword[else] :
identifier[print] ( literal[string] + identifier[scope] + literal[string] + identifier[name] )
keyword[return] ( keyword[None] , keyword[None] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[start] , identifier[len] ( identifier[plist] )):
keyword[if] identifier[len] ( identifier[plist] [ identifier[i] ]. identifier[scope] )> literal[int] keyword[and] identifier[plist] [ identifier[i] ]. identifier[scope] != identifier[scope] :
keyword[return] ( identifier[plist] [ identifier[i] ]. identifier[scope] , identifier[plist] [ identifier[i] ]. identifier[name] )
keyword[return] ( keyword[None] , keyword[None] )
|
def findNextSection(self, scope, name):
""" Starts with given par (scope+name) and looks further down the list
of parameters until one of a different non-null scope is found. Upon
success, returns the (scope, name) tuple, otherwise (None, None). """
# first find index of starting point
plist = self._taskParsObj.getParList()
start = 0
for i in range(len(plist)):
if scope == plist[i].scope and name == plist[i].name:
start = i
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
else:
print('WARNING: could not find starting par: ' + scope + '.' + name)
return (None, None)
# now find first different (non-null) scope in a par, after start
for i in range(start, len(plist)):
if len(plist[i].scope) > 0 and plist[i].scope != scope:
return (plist[i].scope, plist[i].name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# else didn't find it
return (None, None)
|
def before_sample(analysis_request):
"""Method triggered before "sample" transition for the Analysis Request
passed in is performed
"""
if not analysis_request.getDateSampled():
analysis_request.setDateSampled(DateTime())
if not analysis_request.getSampler():
analysis_request.setSampler(api.get_current_user().id)
|
def function[before_sample, parameter[analysis_request]]:
constant[Method triggered before "sample" transition for the Analysis Request
passed in is performed
]
if <ast.UnaryOp object at 0x7da1b1d3b340> begin[:]
call[name[analysis_request].setDateSampled, parameter[call[name[DateTime], parameter[]]]]
if <ast.UnaryOp object at 0x7da1b23c4610> begin[:]
call[name[analysis_request].setSampler, parameter[call[name[api].get_current_user, parameter[]].id]]
|
keyword[def] identifier[before_sample] ( identifier[analysis_request] ):
literal[string]
keyword[if] keyword[not] identifier[analysis_request] . identifier[getDateSampled] ():
identifier[analysis_request] . identifier[setDateSampled] ( identifier[DateTime] ())
keyword[if] keyword[not] identifier[analysis_request] . identifier[getSampler] ():
identifier[analysis_request] . identifier[setSampler] ( identifier[api] . identifier[get_current_user] (). identifier[id] )
|
def before_sample(analysis_request):
"""Method triggered before "sample" transition for the Analysis Request
passed in is performed
"""
if not analysis_request.getDateSampled():
analysis_request.setDateSampled(DateTime()) # depends on [control=['if'], data=[]]
if not analysis_request.getSampler():
analysis_request.setSampler(api.get_current_user().id) # depends on [control=['if'], data=[]]
|
def run(cmd, background=False):
"""
Executes the given command
If background flag is True the command will run in background
and this method will return a :class:`Popen` object
If background is False (default) the command will run in this thread
and this method will return stdout.
A CommandException will be raised if command fails
"""
logger.debug('Running command: %s' % cmd)
if background:
return subprocess.Popen(cmd, shell=True, close_fds=True)
else:
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
logger.error("Command failed: %s" % cmd)
if output:
logger.debug('OUTPUT:\n' + output)
if status != 0:
raise CommandException(status, output)
return output
|
def function[run, parameter[cmd, background]]:
constant[
Executes the given command
If background flag is True the command will run in background
and this method will return a :class:`Popen` object
If background is False (default) the command will run in this thread
and this method will return stdout.
A CommandException will be raised if command fails
]
call[name[logger].debug, parameter[binary_operation[constant[Running command: %s] <ast.Mod object at 0x7da2590d6920> name[cmd]]]]
if name[background] begin[:]
return[call[name[subprocess].Popen, parameter[name[cmd]]]]
|
keyword[def] identifier[run] ( identifier[cmd] , identifier[background] = keyword[False] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] % identifier[cmd] )
keyword[if] identifier[background] :
keyword[return] identifier[subprocess] . identifier[Popen] ( identifier[cmd] , identifier[shell] = keyword[True] , identifier[close_fds] = keyword[True] )
keyword[else] :
( identifier[status] , identifier[output] )= identifier[commands] . identifier[getstatusoutput] ( identifier[cmd] )
keyword[if] identifier[status] != literal[int] :
identifier[logger] . identifier[error] ( literal[string] % identifier[cmd] )
keyword[if] identifier[output] :
identifier[logger] . identifier[debug] ( literal[string] + identifier[output] )
keyword[if] identifier[status] != literal[int] :
keyword[raise] identifier[CommandException] ( identifier[status] , identifier[output] )
keyword[return] identifier[output]
|
def run(cmd, background=False):
"""
Executes the given command
If background flag is True the command will run in background
and this method will return a :class:`Popen` object
If background is False (default) the command will run in this thread
and this method will return stdout.
A CommandException will be raised if command fails
"""
logger.debug('Running command: %s' % cmd)
if background:
return subprocess.Popen(cmd, shell=True, close_fds=True) # depends on [control=['if'], data=[]]
else:
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
logger.error('Command failed: %s' % cmd) # depends on [control=['if'], data=[]]
if output:
logger.debug('OUTPUT:\n' + output) # depends on [control=['if'], data=[]]
if status != 0:
raise CommandException(status, output) # depends on [control=['if'], data=['status']]
return output
|
def Task(entry): # pylint: disable=invalid-name
"""
Decorator with which classes, who act as tasks in a `Lane`, must be decorated. When a class is
being decorated, it becomes a child of `LaneTask`.
Parameters
----------
entry: The name of the task's "main" method, i.e. the method which is executed when task is run
Returns
-------
wrapper (function): The actual decorator function
"""
if not isinstance(entry, string_types):
# In the event that no argument is supplied to the decorator, python passes the decorated
# class itself as an argument. That way, we can detect if no argument (or an argument of
# invalid type) was supplied. This allows passing of `entry` as both a named kwarg, and
# as an arg. Isn't neat, but for now it suffices.
raise TypeError('When decorating a class with `Task`, a single string argument must be '
'supplied, which specifies the "main" task method, i.e. the class\'s entry '
'point to the task.')
else:
def wrapper(cls):
"""The actual decorator function"""
if isclass(cls):
if not hasattr(cls, entry): # Check if cls has the specified entry method
raise TypeError('Method `%s` not found in class `%s`.' % (entry, cls.__name__))
# We will have to inspect the task class's `__init__` method later (by inspecting
# the arg signature, before it is instantiated). In various circumstances, classes
# will not have an unbound `__init__` method. Let's deal with that now already, by
# assigning an empty, unbound `__init__` method manually, in order to prevent
# errors later on during method inspection (not an issue in Python 3):
# - Whenever a class is not defined as a new-style class in Python 2.7, i.e. a
# sub-class of object, and it does not have a `__init__` method definition, the
# class will not have an attribute `__init__`
# - If a class misses a `__init__` method definition, but is defined as a
# new-style class, attribute `__init__` will be of type `slot wrapper`, which
# cannot be inspected (and it also doesn't seem possible to check if a method is of
# type `slot wrapper`, which is why we manually define one).
if not hasattr(cls, '__init__') or cls.__init__ == object.__init__:
init = MethodType(lambda self: None, None, cls) \
if PY2 else MethodType(lambda self: None, cls)
setattr(cls, '__init__', init)
# Check for attributes that will be overwritten, in order to warn the user
reserved_attributes = ('__getattr__', '__call__', '_entry_mtd', 'cache', 'uncache',
'clear_cache', '_log_lock')
for attr in dir(cls):
if attr in reserved_attributes:
make_default_logger(INTERNAL_LOGGER_NAME).warning(
'Attribute `%s` of class `%s` will be overwritten when decorated with '
'`sparklanes.Task`! Avoid assigning any of the following attributes '
'`%s`', attr, cls.__name__, str(reserved_attributes)
)
assignments = {'_entry_mtd': entry,
'__getattr__': lambda self, name: TaskCache.get(name),
'__init__': cls.__init__,
'_log_lock': Lock()}
for attr in WRAPPER_ASSIGNMENTS:
try:
assignments[attr] = getattr(cls, attr)
except AttributeError:
pass
# Build task as a subclass of LaneTask
return type('Task_%s' % cls.__name__, (LaneTask, cls, object), assignments)
else:
raise TypeError('Only classes can be decorated with `Task`')
return wrapper
|
def function[Task, parameter[entry]]:
constant[
Decorator with which classes, who act as tasks in a `Lane`, must be decorated. When a class is
being decorated, it becomes a child of `LaneTask`.
Parameters
----------
entry: The name of the task's "main" method, i.e. the method which is executed when task is run
Returns
-------
wrapper (function): The actual decorator function
]
if <ast.UnaryOp object at 0x7da18eb54280> begin[:]
<ast.Raise object at 0x7da18eb561d0>
|
keyword[def] identifier[Task] ( identifier[entry] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[entry] , identifier[string_types] ):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string]
literal[string] )
keyword[else] :
keyword[def] identifier[wrapper] ( identifier[cls] ):
literal[string]
keyword[if] identifier[isclass] ( identifier[cls] ):
keyword[if] keyword[not] identifier[hasattr] ( identifier[cls] , identifier[entry] ):
keyword[raise] identifier[TypeError] ( literal[string] %( identifier[entry] , identifier[cls] . identifier[__name__] ))
keyword[if] keyword[not] identifier[hasattr] ( identifier[cls] , literal[string] ) keyword[or] identifier[cls] . identifier[__init__] == identifier[object] . identifier[__init__] :
identifier[init] = identifier[MethodType] ( keyword[lambda] identifier[self] : keyword[None] , keyword[None] , identifier[cls] ) keyword[if] identifier[PY2] keyword[else] identifier[MethodType] ( keyword[lambda] identifier[self] : keyword[None] , identifier[cls] )
identifier[setattr] ( identifier[cls] , literal[string] , identifier[init] )
identifier[reserved_attributes] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] )
keyword[for] identifier[attr] keyword[in] identifier[dir] ( identifier[cls] ):
keyword[if] identifier[attr] keyword[in] identifier[reserved_attributes] :
identifier[make_default_logger] ( identifier[INTERNAL_LOGGER_NAME] ). identifier[warning] (
literal[string]
literal[string]
literal[string] , identifier[attr] , identifier[cls] . identifier[__name__] , identifier[str] ( identifier[reserved_attributes] )
)
identifier[assignments] ={ literal[string] : identifier[entry] ,
literal[string] : keyword[lambda] identifier[self] , identifier[name] : identifier[TaskCache] . identifier[get] ( identifier[name] ),
literal[string] : identifier[cls] . identifier[__init__] ,
literal[string] : identifier[Lock] ()}
keyword[for] identifier[attr] keyword[in] identifier[WRAPPER_ASSIGNMENTS] :
keyword[try] :
identifier[assignments] [ identifier[attr] ]= identifier[getattr] ( identifier[cls] , identifier[attr] )
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[return] identifier[type] ( literal[string] % identifier[cls] . identifier[__name__] ,( identifier[LaneTask] , identifier[cls] , identifier[object] ), identifier[assignments] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] identifier[wrapper]
|
def Task(entry): # pylint: disable=invalid-name
'\n Decorator with which classes, who act as tasks in a `Lane`, must be decorated. When a class is\n being decorated, it becomes a child of `LaneTask`.\n\n Parameters\n ----------\n entry: The name of the task\'s "main" method, i.e. the method which is executed when task is run\n\n Returns\n -------\n wrapper (function): The actual decorator function\n '
if not isinstance(entry, string_types):
# In the event that no argument is supplied to the decorator, python passes the decorated
# class itself as an argument. That way, we can detect if no argument (or an argument of
# invalid type) was supplied. This allows passing of `entry` as both a named kwarg, and
# as an arg. Isn't neat, but for now it suffices.
raise TypeError('When decorating a class with `Task`, a single string argument must be supplied, which specifies the "main" task method, i.e. the class\'s entry point to the task.') # depends on [control=['if'], data=[]]
else:
def wrapper(cls):
"""The actual decorator function"""
if isclass(cls):
if not hasattr(cls, entry): # Check if cls has the specified entry method
raise TypeError('Method `%s` not found in class `%s`.' % (entry, cls.__name__)) # depends on [control=['if'], data=[]]
# We will have to inspect the task class's `__init__` method later (by inspecting
# the arg signature, before it is instantiated). In various circumstances, classes
# will not have an unbound `__init__` method. Let's deal with that now already, by
# assigning an empty, unbound `__init__` method manually, in order to prevent
# errors later on during method inspection (not an issue in Python 3):
# - Whenever a class is not defined as a new-style class in Python 2.7, i.e. a
# sub-class of object, and it does not have a `__init__` method definition, the
# class will not have an attribute `__init__`
# - If a class misses a `__init__` method definition, but is defined as a
# new-style class, attribute `__init__` will be of type `slot wrapper`, which
# cannot be inspected (and it also doesn't seem possible to check if a method is of
# type `slot wrapper`, which is why we manually define one).
if not hasattr(cls, '__init__') or cls.__init__ == object.__init__:
init = MethodType(lambda self: None, None, cls) if PY2 else MethodType(lambda self: None, cls)
setattr(cls, '__init__', init) # depends on [control=['if'], data=[]]
# Check for attributes that will be overwritten, in order to warn the user
reserved_attributes = ('__getattr__', '__call__', '_entry_mtd', 'cache', 'uncache', 'clear_cache', '_log_lock')
for attr in dir(cls):
if attr in reserved_attributes:
make_default_logger(INTERNAL_LOGGER_NAME).warning('Attribute `%s` of class `%s` will be overwritten when decorated with `sparklanes.Task`! Avoid assigning any of the following attributes `%s`', attr, cls.__name__, str(reserved_attributes)) # depends on [control=['if'], data=['attr', 'reserved_attributes']] # depends on [control=['for'], data=['attr']]
assignments = {'_entry_mtd': entry, '__getattr__': lambda self, name: TaskCache.get(name), '__init__': cls.__init__, '_log_lock': Lock()}
for attr in WRAPPER_ASSIGNMENTS:
try:
assignments[attr] = getattr(cls, attr) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['attr']]
# Build task as a subclass of LaneTask
return type('Task_%s' % cls.__name__, (LaneTask, cls, object), assignments) # depends on [control=['if'], data=[]]
else:
raise TypeError('Only classes can be decorated with `Task`')
return wrapper
|
def required_unique(objects, key):
"""
A pyrsistent invariant which requires all objects in the given iterable to
have a unique key.
:param objects: The objects to check.
:param key: A one-argument callable to compute the key of an object.
:return: An invariant failure if any two or more objects have the same key
computed. An invariant success otherwise.
"""
keys = {}
duplicate = set()
for k in map(key, objects):
keys[k] = keys.get(k, 0) + 1
if keys[k] > 1:
duplicate.add(k)
if duplicate:
return (False, u"Duplicate object keys: {}".format(duplicate))
return (True, u"")
|
def function[required_unique, parameter[objects, key]]:
constant[
A pyrsistent invariant which requires all objects in the given iterable to
have a unique key.
:param objects: The objects to check.
:param key: A one-argument callable to compute the key of an object.
:return: An invariant failure if any two or more objects have the same key
computed. An invariant success otherwise.
]
variable[keys] assign[=] dictionary[[], []]
variable[duplicate] assign[=] call[name[set], parameter[]]
for taget[name[k]] in starred[call[name[map], parameter[name[key], name[objects]]]] begin[:]
call[name[keys]][name[k]] assign[=] binary_operation[call[name[keys].get, parameter[name[k], constant[0]]] + constant[1]]
if compare[call[name[keys]][name[k]] greater[>] constant[1]] begin[:]
call[name[duplicate].add, parameter[name[k]]]
if name[duplicate] begin[:]
return[tuple[[<ast.Constant object at 0x7da18fe93430>, <ast.Call object at 0x7da18fe92b90>]]]
return[tuple[[<ast.Constant object at 0x7da18fe93e20>, <ast.Constant object at 0x7da18fe90400>]]]
|
keyword[def] identifier[required_unique] ( identifier[objects] , identifier[key] ):
literal[string]
identifier[keys] ={}
identifier[duplicate] = identifier[set] ()
keyword[for] identifier[k] keyword[in] identifier[map] ( identifier[key] , identifier[objects] ):
identifier[keys] [ identifier[k] ]= identifier[keys] . identifier[get] ( identifier[k] , literal[int] )+ literal[int]
keyword[if] identifier[keys] [ identifier[k] ]> literal[int] :
identifier[duplicate] . identifier[add] ( identifier[k] )
keyword[if] identifier[duplicate] :
keyword[return] ( keyword[False] , literal[string] . identifier[format] ( identifier[duplicate] ))
keyword[return] ( keyword[True] , literal[string] )
|
def required_unique(objects, key):
"""
A pyrsistent invariant which requires all objects in the given iterable to
have a unique key.
:param objects: The objects to check.
:param key: A one-argument callable to compute the key of an object.
:return: An invariant failure if any two or more objects have the same key
computed. An invariant success otherwise.
"""
keys = {}
duplicate = set()
for k in map(key, objects):
keys[k] = keys.get(k, 0) + 1
if keys[k] > 1:
duplicate.add(k) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
if duplicate:
return (False, u'Duplicate object keys: {}'.format(duplicate)) # depends on [control=['if'], data=[]]
return (True, u'')
|
def _record_score(self, history, current_score):
"""Record the current store and, if applicable, if it's the best score
yet.
"""
history.record(self.name_, current_score)
is_best = self._is_best_score(current_score)
if is_best is None:
return
history.record(self.name_ + '_best', bool(is_best))
if is_best:
self.best_score_ = current_score
|
def function[_record_score, parameter[self, history, current_score]]:
constant[Record the current store and, if applicable, if it's the best score
yet.
]
call[name[history].record, parameter[name[self].name_, name[current_score]]]
variable[is_best] assign[=] call[name[self]._is_best_score, parameter[name[current_score]]]
if compare[name[is_best] is constant[None]] begin[:]
return[None]
call[name[history].record, parameter[binary_operation[name[self].name_ + constant[_best]], call[name[bool], parameter[name[is_best]]]]]
if name[is_best] begin[:]
name[self].best_score_ assign[=] name[current_score]
|
keyword[def] identifier[_record_score] ( identifier[self] , identifier[history] , identifier[current_score] ):
literal[string]
identifier[history] . identifier[record] ( identifier[self] . identifier[name_] , identifier[current_score] )
identifier[is_best] = identifier[self] . identifier[_is_best_score] ( identifier[current_score] )
keyword[if] identifier[is_best] keyword[is] keyword[None] :
keyword[return]
identifier[history] . identifier[record] ( identifier[self] . identifier[name_] + literal[string] , identifier[bool] ( identifier[is_best] ))
keyword[if] identifier[is_best] :
identifier[self] . identifier[best_score_] = identifier[current_score]
|
def _record_score(self, history, current_score):
"""Record the current store and, if applicable, if it's the best score
yet.
"""
history.record(self.name_, current_score)
is_best = self._is_best_score(current_score)
if is_best is None:
return # depends on [control=['if'], data=[]]
history.record(self.name_ + '_best', bool(is_best))
if is_best:
self.best_score_ = current_score # depends on [control=['if'], data=[]]
|
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 2:
return self.print_help()
source_url = self.actual_arguments[0]
output_file_path = self.actual_arguments[1]
download = not self.has_option("--list")
# largest_audio = True by default or if explicitly given
if self.has_option("--largest-audio"):
largest_audio = True
else:
largest_audio = not self.has_option("--smallest-audio")
download_format = self.has_option_with_value("--format")
try:
if download:
self.print_info(u"Downloading audio stream from '%s' ..." % source_url)
downloader = Downloader(logger=self.logger)
result = downloader.audio_from_youtube(
source_url,
download=True,
output_file_path=output_file_path,
download_format=download_format,
largest_audio=largest_audio,
)
self.print_info(u"Downloading audio stream from '%s' ... done" % source_url)
self.print_success(u"Downloaded file '%s'" % result)
else:
self.print_info(u"Downloading stream info from '%s' ..." % source_url)
downloader = Downloader(logger=self.logger)
result = downloader.audio_from_youtube(
source_url,
download=False
)
self.print_info(u"Downloading stream info from '%s' ... done" % source_url)
msg = []
msg.append(u"%s\t%s\t%s\t%s" % ("Format", "Extension", "Bitrate", "Size"))
for r in result:
filesize = gf.human_readable_number(r["filesize"])
msg.append(u"%s\t%s\t%s\t%s" % (r["format"], r["ext"], r["abr"], filesize))
self.print_generic(u"Available audio streams:")
self.print_generic(u"\n".join(msg))
return self.NO_ERROR_EXIT_CODE
except ImportError:
self.print_no_dependency_error()
except Exception as exc:
self.print_error(u"An unexpected error occurred while downloading audio from YouTube:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
|
def function[perform_command, parameter[self]]:
constant[
Perform command and return the appropriate exit code.
:rtype: int
]
if compare[call[name[len], parameter[name[self].actual_arguments]] less[<] constant[2]] begin[:]
return[call[name[self].print_help, parameter[]]]
variable[source_url] assign[=] call[name[self].actual_arguments][constant[0]]
variable[output_file_path] assign[=] call[name[self].actual_arguments][constant[1]]
variable[download] assign[=] <ast.UnaryOp object at 0x7da207f03bb0>
if call[name[self].has_option, parameter[constant[--largest-audio]]] begin[:]
variable[largest_audio] assign[=] constant[True]
variable[download_format] assign[=] call[name[self].has_option_with_value, parameter[constant[--format]]]
<ast.Try object at 0x7da207f039a0>
return[name[self].ERROR_EXIT_CODE]
|
keyword[def] identifier[perform_command] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[actual_arguments] )< literal[int] :
keyword[return] identifier[self] . identifier[print_help] ()
identifier[source_url] = identifier[self] . identifier[actual_arguments] [ literal[int] ]
identifier[output_file_path] = identifier[self] . identifier[actual_arguments] [ literal[int] ]
identifier[download] = keyword[not] identifier[self] . identifier[has_option] ( literal[string] )
keyword[if] identifier[self] . identifier[has_option] ( literal[string] ):
identifier[largest_audio] = keyword[True]
keyword[else] :
identifier[largest_audio] = keyword[not] identifier[self] . identifier[has_option] ( literal[string] )
identifier[download_format] = identifier[self] . identifier[has_option_with_value] ( literal[string] )
keyword[try] :
keyword[if] identifier[download] :
identifier[self] . identifier[print_info] ( literal[string] % identifier[source_url] )
identifier[downloader] = identifier[Downloader] ( identifier[logger] = identifier[self] . identifier[logger] )
identifier[result] = identifier[downloader] . identifier[audio_from_youtube] (
identifier[source_url] ,
identifier[download] = keyword[True] ,
identifier[output_file_path] = identifier[output_file_path] ,
identifier[download_format] = identifier[download_format] ,
identifier[largest_audio] = identifier[largest_audio] ,
)
identifier[self] . identifier[print_info] ( literal[string] % identifier[source_url] )
identifier[self] . identifier[print_success] ( literal[string] % identifier[result] )
keyword[else] :
identifier[self] . identifier[print_info] ( literal[string] % identifier[source_url] )
identifier[downloader] = identifier[Downloader] ( identifier[logger] = identifier[self] . identifier[logger] )
identifier[result] = identifier[downloader] . identifier[audio_from_youtube] (
identifier[source_url] ,
identifier[download] = keyword[False]
)
identifier[self] . identifier[print_info] ( literal[string] % identifier[source_url] )
identifier[msg] =[]
identifier[msg] . identifier[append] ( literal[string] %( literal[string] , literal[string] , literal[string] , literal[string] ))
keyword[for] identifier[r] keyword[in] identifier[result] :
identifier[filesize] = identifier[gf] . identifier[human_readable_number] ( identifier[r] [ literal[string] ])
identifier[msg] . identifier[append] ( literal[string] %( identifier[r] [ literal[string] ], identifier[r] [ literal[string] ], identifier[r] [ literal[string] ], identifier[filesize] ))
identifier[self] . identifier[print_generic] ( literal[string] )
identifier[self] . identifier[print_generic] ( literal[string] . identifier[join] ( identifier[msg] ))
keyword[return] identifier[self] . identifier[NO_ERROR_EXIT_CODE]
keyword[except] identifier[ImportError] :
identifier[self] . identifier[print_no_dependency_error] ()
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[self] . identifier[print_error] ( literal[string] )
identifier[self] . identifier[print_error] ( literal[string] % identifier[exc] )
keyword[return] identifier[self] . identifier[ERROR_EXIT_CODE]
|
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 2:
return self.print_help() # depends on [control=['if'], data=[]]
source_url = self.actual_arguments[0]
output_file_path = self.actual_arguments[1]
download = not self.has_option('--list')
# largest_audio = True by default or if explicitly given
if self.has_option('--largest-audio'):
largest_audio = True # depends on [control=['if'], data=[]]
else:
largest_audio = not self.has_option('--smallest-audio')
download_format = self.has_option_with_value('--format')
try:
if download:
self.print_info(u"Downloading audio stream from '%s' ..." % source_url)
downloader = Downloader(logger=self.logger)
result = downloader.audio_from_youtube(source_url, download=True, output_file_path=output_file_path, download_format=download_format, largest_audio=largest_audio)
self.print_info(u"Downloading audio stream from '%s' ... done" % source_url)
self.print_success(u"Downloaded file '%s'" % result) # depends on [control=['if'], data=[]]
else:
self.print_info(u"Downloading stream info from '%s' ..." % source_url)
downloader = Downloader(logger=self.logger)
result = downloader.audio_from_youtube(source_url, download=False)
self.print_info(u"Downloading stream info from '%s' ... done" % source_url)
msg = []
msg.append(u'%s\t%s\t%s\t%s' % ('Format', 'Extension', 'Bitrate', 'Size'))
for r in result:
filesize = gf.human_readable_number(r['filesize'])
msg.append(u'%s\t%s\t%s\t%s' % (r['format'], r['ext'], r['abr'], filesize)) # depends on [control=['for'], data=['r']]
self.print_generic(u'Available audio streams:')
self.print_generic(u'\n'.join(msg))
return self.NO_ERROR_EXIT_CODE # depends on [control=['try'], data=[]]
except ImportError:
self.print_no_dependency_error() # depends on [control=['except'], data=[]]
except Exception as exc:
self.print_error(u'An unexpected error occurred while downloading audio from YouTube:')
self.print_error(u'%s' % exc) # depends on [control=['except'], data=['exc']]
return self.ERROR_EXIT_CODE
|
def worker(self, fifo):
'''Worker thread continuously filtering and converting data when data becomes available.
'''
logging.debug('Starting worker thread for %s', fifo)
self._fifo_conditions[fifo].acquire()
while True:
try:
data_tuple = self._fifo_data_deque[fifo].popleft()
except IndexError:
self._fifo_conditions[fifo].wait(self.readout_interval) # sleep a little bit, reducing CPU usage
else:
if data_tuple is None: # if None then exit
break
else:
for index, (filter_func, converter_func, fifo_select) in enumerate(izip(self.filter_func, self.converter_func, self.fifo_select)):
if fifo_select is None or fifo_select == fifo:
# filter and do the conversion
converted_data_tuple = convert_data_iterable((data_tuple,), filter_func=filter_func, converter_func=converter_func)[0]
n_data_words = converted_data_tuple[0].shape[0]
with self.data_words_per_second_lock:
self._words_per_read[index].append((n_data_words, converted_data_tuple[1], converted_data_tuple[2]))
self._data_deque[index].append(converted_data_tuple)
with self._data_conditions[index]:
self._data_conditions[index].notify_all()
for index, fifo_select in enumerate(self.fifo_select):
if fifo_select is None or fifo_select == fifo:
self._data_deque[index].append(None)
with self._data_conditions[index]:
self._data_conditions[index].notify_all()
self._fifo_conditions[fifo].release()
logging.debug('Stopping worker thread for %s', fifo)
|
def function[worker, parameter[self, fifo]]:
constant[Worker thread continuously filtering and converting data when data becomes available.
]
call[name[logging].debug, parameter[constant[Starting worker thread for %s], name[fifo]]]
call[call[name[self]._fifo_conditions][name[fifo]].acquire, parameter[]]
while constant[True] begin[:]
<ast.Try object at 0x7da1b11e1600>
for taget[tuple[[<ast.Name object at 0x7da1b11e3460>, <ast.Name object at 0x7da1b11e0130>]]] in starred[call[name[enumerate], parameter[name[self].fifo_select]]] begin[:]
if <ast.BoolOp object at 0x7da1b11e1b40> begin[:]
call[call[name[self]._data_deque][name[index]].append, parameter[constant[None]]]
with call[name[self]._data_conditions][name[index]] begin[:]
call[call[name[self]._data_conditions][name[index]].notify_all, parameter[]]
call[call[name[self]._fifo_conditions][name[fifo]].release, parameter[]]
call[name[logging].debug, parameter[constant[Stopping worker thread for %s], name[fifo]]]
|
keyword[def] identifier[worker] ( identifier[self] , identifier[fifo] ):
literal[string]
identifier[logging] . identifier[debug] ( literal[string] , identifier[fifo] )
identifier[self] . identifier[_fifo_conditions] [ identifier[fifo] ]. identifier[acquire] ()
keyword[while] keyword[True] :
keyword[try] :
identifier[data_tuple] = identifier[self] . identifier[_fifo_data_deque] [ identifier[fifo] ]. identifier[popleft] ()
keyword[except] identifier[IndexError] :
identifier[self] . identifier[_fifo_conditions] [ identifier[fifo] ]. identifier[wait] ( identifier[self] . identifier[readout_interval] )
keyword[else] :
keyword[if] identifier[data_tuple] keyword[is] keyword[None] :
keyword[break]
keyword[else] :
keyword[for] identifier[index] ,( identifier[filter_func] , identifier[converter_func] , identifier[fifo_select] ) keyword[in] identifier[enumerate] ( identifier[izip] ( identifier[self] . identifier[filter_func] , identifier[self] . identifier[converter_func] , identifier[self] . identifier[fifo_select] )):
keyword[if] identifier[fifo_select] keyword[is] keyword[None] keyword[or] identifier[fifo_select] == identifier[fifo] :
identifier[converted_data_tuple] = identifier[convert_data_iterable] (( identifier[data_tuple] ,), identifier[filter_func] = identifier[filter_func] , identifier[converter_func] = identifier[converter_func] )[ literal[int] ]
identifier[n_data_words] = identifier[converted_data_tuple] [ literal[int] ]. identifier[shape] [ literal[int] ]
keyword[with] identifier[self] . identifier[data_words_per_second_lock] :
identifier[self] . identifier[_words_per_read] [ identifier[index] ]. identifier[append] (( identifier[n_data_words] , identifier[converted_data_tuple] [ literal[int] ], identifier[converted_data_tuple] [ literal[int] ]))
identifier[self] . identifier[_data_deque] [ identifier[index] ]. identifier[append] ( identifier[converted_data_tuple] )
keyword[with] identifier[self] . identifier[_data_conditions] [ identifier[index] ]:
identifier[self] . identifier[_data_conditions] [ identifier[index] ]. identifier[notify_all] ()
keyword[for] identifier[index] , identifier[fifo_select] keyword[in] identifier[enumerate] ( identifier[self] . identifier[fifo_select] ):
keyword[if] identifier[fifo_select] keyword[is] keyword[None] keyword[or] identifier[fifo_select] == identifier[fifo] :
identifier[self] . identifier[_data_deque] [ identifier[index] ]. identifier[append] ( keyword[None] )
keyword[with] identifier[self] . identifier[_data_conditions] [ identifier[index] ]:
identifier[self] . identifier[_data_conditions] [ identifier[index] ]. identifier[notify_all] ()
identifier[self] . identifier[_fifo_conditions] [ identifier[fifo] ]. identifier[release] ()
identifier[logging] . identifier[debug] ( literal[string] , identifier[fifo] )
|
def worker(self, fifo):
"""Worker thread continuously filtering and converting data when data becomes available.
"""
logging.debug('Starting worker thread for %s', fifo)
self._fifo_conditions[fifo].acquire()
while True:
try:
data_tuple = self._fifo_data_deque[fifo].popleft() # depends on [control=['try'], data=[]]
except IndexError:
self._fifo_conditions[fifo].wait(self.readout_interval) # sleep a little bit, reducing CPU usage # depends on [control=['except'], data=[]]
else:
if data_tuple is None: # if None then exit
break # depends on [control=['if'], data=[]]
else:
for (index, (filter_func, converter_func, fifo_select)) in enumerate(izip(self.filter_func, self.converter_func, self.fifo_select)):
if fifo_select is None or fifo_select == fifo: # filter and do the conversion
converted_data_tuple = convert_data_iterable((data_tuple,), filter_func=filter_func, converter_func=converter_func)[0]
n_data_words = converted_data_tuple[0].shape[0]
with self.data_words_per_second_lock:
self._words_per_read[index].append((n_data_words, converted_data_tuple[1], converted_data_tuple[2])) # depends on [control=['with'], data=[]]
self._data_deque[index].append(converted_data_tuple)
with self._data_conditions[index]:
self._data_conditions[index].notify_all() # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['while'], data=[]]
for (index, fifo_select) in enumerate(self.fifo_select):
if fifo_select is None or fifo_select == fifo:
self._data_deque[index].append(None)
with self._data_conditions[index]:
self._data_conditions[index].notify_all() # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
self._fifo_conditions[fifo].release()
logging.debug('Stopping worker thread for %s', fifo)
|
def from_config(cls, filename):
"""
Create a SlackObserver from a given configuration file.
The file can be in any format supported by Sacred
(.json, .pickle, [.yaml]).
It has to specify a ``webhook_url`` and can optionally set
``bot_name``, ``icon``, ``completed_text``, ``interrupted_text``, and
``failed_text``.
"""
d = load_config_file(filename)
obs = None
if 'webhook_url' in d:
obs = cls(d['webhook_url'])
else:
raise ValueError("Slack configuration file must contain "
"an entry for 'webhook_url'!")
for k in ['completed_text', 'interrupted_text', 'failed_text',
'bot_name', 'icon']:
if k in d:
setattr(obs, k, d[k])
return obs
|
def function[from_config, parameter[cls, filename]]:
constant[
Create a SlackObserver from a given configuration file.
The file can be in any format supported by Sacred
(.json, .pickle, [.yaml]).
It has to specify a ``webhook_url`` and can optionally set
``bot_name``, ``icon``, ``completed_text``, ``interrupted_text``, and
``failed_text``.
]
variable[d] assign[=] call[name[load_config_file], parameter[name[filename]]]
variable[obs] assign[=] constant[None]
if compare[constant[webhook_url] in name[d]] begin[:]
variable[obs] assign[=] call[name[cls], parameter[call[name[d]][constant[webhook_url]]]]
for taget[name[k]] in starred[list[[<ast.Constant object at 0x7da1b18faf20>, <ast.Constant object at 0x7da1b18fa260>, <ast.Constant object at 0x7da1b18fa200>, <ast.Constant object at 0x7da1b18fa650>, <ast.Constant object at 0x7da1b18fbdc0>]]] begin[:]
if compare[name[k] in name[d]] begin[:]
call[name[setattr], parameter[name[obs], name[k], call[name[d]][name[k]]]]
return[name[obs]]
|
keyword[def] identifier[from_config] ( identifier[cls] , identifier[filename] ):
literal[string]
identifier[d] = identifier[load_config_file] ( identifier[filename] )
identifier[obs] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[obs] = identifier[cls] ( identifier[d] [ literal[string] ])
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[for] identifier[k] keyword[in] [ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ]:
keyword[if] identifier[k] keyword[in] identifier[d] :
identifier[setattr] ( identifier[obs] , identifier[k] , identifier[d] [ identifier[k] ])
keyword[return] identifier[obs]
|
def from_config(cls, filename):
"""
Create a SlackObserver from a given configuration file.
The file can be in any format supported by Sacred
(.json, .pickle, [.yaml]).
It has to specify a ``webhook_url`` and can optionally set
``bot_name``, ``icon``, ``completed_text``, ``interrupted_text``, and
``failed_text``.
"""
d = load_config_file(filename)
obs = None
if 'webhook_url' in d:
obs = cls(d['webhook_url']) # depends on [control=['if'], data=['d']]
else:
raise ValueError("Slack configuration file must contain an entry for 'webhook_url'!")
for k in ['completed_text', 'interrupted_text', 'failed_text', 'bot_name', 'icon']:
if k in d:
setattr(obs, k, d[k]) # depends on [control=['if'], data=['k', 'd']] # depends on [control=['for'], data=['k']]
return obs
|
def receive_data(socket):
"""Receive an answer from the daemon and return the response.
Args:
socket (socket.socket): A socket that is connected to the daemon.
Returns:
dir or string: The unpickled answer.
"""
answer = b""
while True:
packet = socket.recv(4096)
if not packet: break
answer += packet
response = pickle.loads(answer)
socket.close()
return response
|
def function[receive_data, parameter[socket]]:
constant[Receive an answer from the daemon and return the response.
Args:
socket (socket.socket): A socket that is connected to the daemon.
Returns:
dir or string: The unpickled answer.
]
variable[answer] assign[=] constant[b'']
while constant[True] begin[:]
variable[packet] assign[=] call[name[socket].recv, parameter[constant[4096]]]
if <ast.UnaryOp object at 0x7da1b0e78700> begin[:]
break
<ast.AugAssign object at 0x7da1b0e79ae0>
variable[response] assign[=] call[name[pickle].loads, parameter[name[answer]]]
call[name[socket].close, parameter[]]
return[name[response]]
|
keyword[def] identifier[receive_data] ( identifier[socket] ):
literal[string]
identifier[answer] = literal[string]
keyword[while] keyword[True] :
identifier[packet] = identifier[socket] . identifier[recv] ( literal[int] )
keyword[if] keyword[not] identifier[packet] : keyword[break]
identifier[answer] += identifier[packet]
identifier[response] = identifier[pickle] . identifier[loads] ( identifier[answer] )
identifier[socket] . identifier[close] ()
keyword[return] identifier[response]
|
def receive_data(socket):
"""Receive an answer from the daemon and return the response.
Args:
socket (socket.socket): A socket that is connected to the daemon.
Returns:
dir or string: The unpickled answer.
"""
answer = b''
while True:
packet = socket.recv(4096)
if not packet:
break # depends on [control=['if'], data=[]]
answer += packet # depends on [control=['while'], data=[]]
response = pickle.loads(answer)
socket.close()
return response
|
def _check_success(self):
"""
Returns True if task has been completed.
"""
# remember objects that are on the correct pegs
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
for i in range(len(self.ob_inits)):
obj_str = str(self.item_names[i]) + "0"
obj_pos = self.sim.data.body_xpos[self.obj_body_id[obj_str]]
dist = np.linalg.norm(gripper_site_pos - obj_pos)
r_reach = 1 - np.tanh(10.0 * dist)
self.objects_on_pegs[i] = int(self.on_peg(obj_pos, i) and r_reach < 0.6)
if self.single_object_mode > 0:
return np.sum(self.objects_on_pegs) > 0 # need one object on peg
# returns True if all objects are on correct pegs
return np.sum(self.objects_on_pegs) == len(self.ob_inits)
|
def function[_check_success, parameter[self]]:
constant[
Returns True if task has been completed.
]
variable[gripper_site_pos] assign[=] call[name[self].sim.data.site_xpos][name[self].eef_site_id]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].ob_inits]]]]] begin[:]
variable[obj_str] assign[=] binary_operation[call[name[str], parameter[call[name[self].item_names][name[i]]]] + constant[0]]
variable[obj_pos] assign[=] call[name[self].sim.data.body_xpos][call[name[self].obj_body_id][name[obj_str]]]
variable[dist] assign[=] call[name[np].linalg.norm, parameter[binary_operation[name[gripper_site_pos] - name[obj_pos]]]]
variable[r_reach] assign[=] binary_operation[constant[1] - call[name[np].tanh, parameter[binary_operation[constant[10.0] * name[dist]]]]]
call[name[self].objects_on_pegs][name[i]] assign[=] call[name[int], parameter[<ast.BoolOp object at 0x7da18eb57400>]]
if compare[name[self].single_object_mode greater[>] constant[0]] begin[:]
return[compare[call[name[np].sum, parameter[name[self].objects_on_pegs]] greater[>] constant[0]]]
return[compare[call[name[np].sum, parameter[name[self].objects_on_pegs]] equal[==] call[name[len], parameter[name[self].ob_inits]]]]
|
keyword[def] identifier[_check_success] ( identifier[self] ):
literal[string]
identifier[gripper_site_pos] = identifier[self] . identifier[sim] . identifier[data] . identifier[site_xpos] [ identifier[self] . identifier[eef_site_id] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[ob_inits] )):
identifier[obj_str] = identifier[str] ( identifier[self] . identifier[item_names] [ identifier[i] ])+ literal[string]
identifier[obj_pos] = identifier[self] . identifier[sim] . identifier[data] . identifier[body_xpos] [ identifier[self] . identifier[obj_body_id] [ identifier[obj_str] ]]
identifier[dist] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[gripper_site_pos] - identifier[obj_pos] )
identifier[r_reach] = literal[int] - identifier[np] . identifier[tanh] ( literal[int] * identifier[dist] )
identifier[self] . identifier[objects_on_pegs] [ identifier[i] ]= identifier[int] ( identifier[self] . identifier[on_peg] ( identifier[obj_pos] , identifier[i] ) keyword[and] identifier[r_reach] < literal[int] )
keyword[if] identifier[self] . identifier[single_object_mode] > literal[int] :
keyword[return] identifier[np] . identifier[sum] ( identifier[self] . identifier[objects_on_pegs] )> literal[int]
keyword[return] identifier[np] . identifier[sum] ( identifier[self] . identifier[objects_on_pegs] )== identifier[len] ( identifier[self] . identifier[ob_inits] )
|
def _check_success(self):
"""
Returns True if task has been completed.
"""
# remember objects that are on the correct pegs
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
for i in range(len(self.ob_inits)):
obj_str = str(self.item_names[i]) + '0'
obj_pos = self.sim.data.body_xpos[self.obj_body_id[obj_str]]
dist = np.linalg.norm(gripper_site_pos - obj_pos)
r_reach = 1 - np.tanh(10.0 * dist)
self.objects_on_pegs[i] = int(self.on_peg(obj_pos, i) and r_reach < 0.6) # depends on [control=['for'], data=['i']]
if self.single_object_mode > 0:
return np.sum(self.objects_on_pegs) > 0 # need one object on peg # depends on [control=['if'], data=[]]
# returns True if all objects are on correct pegs
return np.sum(self.objects_on_pegs) == len(self.ob_inits)
|
def _parse (self):
"""Parse the BDF mime structure and record the locations of the binary
blobs. Sets up various data fields in the BDFData object."""
feedparser = FeedParser (Message)
binarychunks = {}
sizeinfo = None
headxml = None
self.fp.seek (0, 0)
while True:
data = self.fp.readline ()
if not data:
break
feedparser.feed (data)
skip = (data == '\n' and
len (feedparser._msgstack) == 3 and
feedparser._msgstack[-1].get_content_type () in ('application/octet-stream',
'binary/octet-stream'))
if skip:
# We just finished reading the headers for a huge binary blob.
# Time to remember where the data chunk is and pretend it doesn't
# exist.
msg = feedparser._msgstack[-1]
ident = msg['Content-Location']
assert ident.endswith ('.bin'), 'confusion #1 in hacky MIME parsing!'
binarychunks[ident] = self.fp.tell ()
if sizeinfo is None:
headxml, sizeinfo, tagpfx = _extract_size_info (feedparser)
kind = ident.split ('/')[-1]
assert kind in sizeinfo, 'no size info for binary chunk kind %s in MIME!' % kind
self.fp.seek (sizeinfo[kind] + 1, 1) # skip ahead by data chunk size
sample = self.fp.read (16)
assert sample.startswith ('--MIME'), 'crap, unexpected chunk size in MIME parsing: %r' % sample
self.fp.seek (-16, 1) # go back
# check that two major kinds of data are read at least once
if any([k.split('/')[3] == '3' for k in binarychunks.iterkeys()]):
break
if headxml is None:
raise RuntimeError ('never found any binary data')
self.mimemsg = feedparser.close ()
self.headxml = headxml
self.sizeinfo = sizeinfo
self.binarychunks = binarychunks
headsize, intsize = self.calc_intsize()
# Compute some miscellaneous parameters that we'll need.
# self.n_integrations = len (self.mimemsg.get_payload ()) - 1
self.n_integrations = os.stat(self.fp.name).st_size/intsize
self.n_antennas = int (headxml.find (tagpfx + nanttag).text)
self.n_baselines = (self.n_antennas * (self.n_antennas - 1)) // 2
ds = headxml.find (tagpfx + dstag)
nbb = 0
nspw = 0
nchan = 0
crosspolstr = None
for bb in ds.findall (tagpfx + basebandtag):
nbb += 1
for spw in bb.getchildren ():
nspw += 1
nchan += int (spw.get ('numSpectralPoint'))
if crosspolstr is None:
crosspolstr = spw.get ('crossPolProducts')
elif spw.get ('crossPolProducts') != crosspolstr:
raise Exception ('can only handle spectral windows with identical cross pol products')
self.n_basebands = nbb
self.n_spws = nspw
self.n_channels = nchan
self.crosspols = crosspolstr.split ()
self.n_pols = len(self.crosspols)
# if bdf info pkl not present, write it
if os.path.exists(os.path.dirname(self.pklname)) and self.pklname and (not os.path.exists(self.pklname)):
logger.info('Writing bdf pkl info to %s...' % (self.pklname))
with open(self.pklname,'wb') as pkl:
# Compute some miscellaneous parameters that we'll need.
pickle.dump( (self.mimemsg, self.headxml, self.sizeinfo, self.binarychunks, self.n_integrations, self.n_antennas, self.n_baselines, self.n_basebands, self.n_spws, self.n_channels, self.crosspols), pkl)
return self
|
def function[_parse, parameter[self]]:
constant[Parse the BDF mime structure and record the locations of the binary
blobs. Sets up various data fields in the BDFData object.]
variable[feedparser] assign[=] call[name[FeedParser], parameter[name[Message]]]
variable[binarychunks] assign[=] dictionary[[], []]
variable[sizeinfo] assign[=] constant[None]
variable[headxml] assign[=] constant[None]
call[name[self].fp.seek, parameter[constant[0], constant[0]]]
while constant[True] begin[:]
variable[data] assign[=] call[name[self].fp.readline, parameter[]]
if <ast.UnaryOp object at 0x7da18f721db0> begin[:]
break
call[name[feedparser].feed, parameter[name[data]]]
variable[skip] assign[=] <ast.BoolOp object at 0x7da18f720d60>
if name[skip] begin[:]
variable[msg] assign[=] call[name[feedparser]._msgstack][<ast.UnaryOp object at 0x7da18f720bb0>]
variable[ident] assign[=] call[name[msg]][constant[Content-Location]]
assert[call[name[ident].endswith, parameter[constant[.bin]]]]
call[name[binarychunks]][name[ident]] assign[=] call[name[self].fp.tell, parameter[]]
if compare[name[sizeinfo] is constant[None]] begin[:]
<ast.Tuple object at 0x7da18f720c70> assign[=] call[name[_extract_size_info], parameter[name[feedparser]]]
variable[kind] assign[=] call[call[name[ident].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da18f723dc0>]
assert[compare[name[kind] in name[sizeinfo]]]
call[name[self].fp.seek, parameter[binary_operation[call[name[sizeinfo]][name[kind]] + constant[1]], constant[1]]]
variable[sample] assign[=] call[name[self].fp.read, parameter[constant[16]]]
assert[call[name[sample].startswith, parameter[constant[--MIME]]]]
call[name[self].fp.seek, parameter[<ast.UnaryOp object at 0x7da18f723fa0>, constant[1]]]
if call[name[any], parameter[<ast.ListComp object at 0x7da18f7239d0>]] begin[:]
break
if compare[name[headxml] is constant[None]] begin[:]
<ast.Raise object at 0x7da18f722dd0>
name[self].mimemsg assign[=] call[name[feedparser].close, parameter[]]
name[self].headxml assign[=] name[headxml]
name[self].sizeinfo assign[=] name[sizeinfo]
name[self].binarychunks assign[=] name[binarychunks]
<ast.Tuple object at 0x7da18f7215d0> assign[=] call[name[self].calc_intsize, parameter[]]
name[self].n_integrations assign[=] binary_operation[call[name[os].stat, parameter[name[self].fp.name]].st_size / name[intsize]]
name[self].n_antennas assign[=] call[name[int], parameter[call[name[headxml].find, parameter[binary_operation[name[tagpfx] + name[nanttag]]]].text]]
name[self].n_baselines assign[=] binary_operation[binary_operation[name[self].n_antennas * binary_operation[name[self].n_antennas - constant[1]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[ds] assign[=] call[name[headxml].find, parameter[binary_operation[name[tagpfx] + name[dstag]]]]
variable[nbb] assign[=] constant[0]
variable[nspw] assign[=] constant[0]
variable[nchan] assign[=] constant[0]
variable[crosspolstr] assign[=] constant[None]
for taget[name[bb]] in starred[call[name[ds].findall, parameter[binary_operation[name[tagpfx] + name[basebandtag]]]]] begin[:]
<ast.AugAssign object at 0x7da18f723790>
for taget[name[spw]] in starred[call[name[bb].getchildren, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da18f721090>
<ast.AugAssign object at 0x7da18f7217e0>
if compare[name[crosspolstr] is constant[None]] begin[:]
variable[crosspolstr] assign[=] call[name[spw].get, parameter[constant[crossPolProducts]]]
name[self].n_basebands assign[=] name[nbb]
name[self].n_spws assign[=] name[nspw]
name[self].n_channels assign[=] name[nchan]
name[self].crosspols assign[=] call[name[crosspolstr].split, parameter[]]
name[self].n_pols assign[=] call[name[len], parameter[name[self].crosspols]]
if <ast.BoolOp object at 0x7da20cabd7e0> begin[:]
call[name[logger].info, parameter[binary_operation[constant[Writing bdf pkl info to %s...] <ast.Mod object at 0x7da2590d6920> name[self].pklname]]]
with call[name[open], parameter[name[self].pklname, constant[wb]]] begin[:]
call[name[pickle].dump, parameter[tuple[[<ast.Attribute object at 0x7da20cabfb20>, <ast.Attribute object at 0x7da20cabf460>, <ast.Attribute object at 0x7da20cabe590>, <ast.Attribute object at 0x7da20cabce80>, <ast.Attribute object at 0x7da20cabc640>, <ast.Attribute object at 0x7da20cabcd00>, <ast.Attribute object at 0x7da20cabe0b0>, <ast.Attribute object at 0x7da20cabf2b0>, <ast.Attribute object at 0x7da20cabc940>, <ast.Attribute object at 0x7da20cabee60>, <ast.Attribute object at 0x7da20cabeb30>]], name[pkl]]]
return[name[self]]
|
keyword[def] identifier[_parse] ( identifier[self] ):
literal[string]
identifier[feedparser] = identifier[FeedParser] ( identifier[Message] )
identifier[binarychunks] ={}
identifier[sizeinfo] = keyword[None]
identifier[headxml] = keyword[None]
identifier[self] . identifier[fp] . identifier[seek] ( literal[int] , literal[int] )
keyword[while] keyword[True] :
identifier[data] = identifier[self] . identifier[fp] . identifier[readline] ()
keyword[if] keyword[not] identifier[data] :
keyword[break]
identifier[feedparser] . identifier[feed] ( identifier[data] )
identifier[skip] =( identifier[data] == literal[string] keyword[and]
identifier[len] ( identifier[feedparser] . identifier[_msgstack] )== literal[int] keyword[and]
identifier[feedparser] . identifier[_msgstack] [- literal[int] ]. identifier[get_content_type] () keyword[in] ( literal[string] ,
literal[string] ))
keyword[if] identifier[skip] :
identifier[msg] = identifier[feedparser] . identifier[_msgstack] [- literal[int] ]
identifier[ident] = identifier[msg] [ literal[string] ]
keyword[assert] identifier[ident] . identifier[endswith] ( literal[string] ), literal[string]
identifier[binarychunks] [ identifier[ident] ]= identifier[self] . identifier[fp] . identifier[tell] ()
keyword[if] identifier[sizeinfo] keyword[is] keyword[None] :
identifier[headxml] , identifier[sizeinfo] , identifier[tagpfx] = identifier[_extract_size_info] ( identifier[feedparser] )
identifier[kind] = identifier[ident] . identifier[split] ( literal[string] )[- literal[int] ]
keyword[assert] identifier[kind] keyword[in] identifier[sizeinfo] , literal[string] % identifier[kind]
identifier[self] . identifier[fp] . identifier[seek] ( identifier[sizeinfo] [ identifier[kind] ]+ literal[int] , literal[int] )
identifier[sample] = identifier[self] . identifier[fp] . identifier[read] ( literal[int] )
keyword[assert] identifier[sample] . identifier[startswith] ( literal[string] ), literal[string] % identifier[sample]
identifier[self] . identifier[fp] . identifier[seek] (- literal[int] , literal[int] )
keyword[if] identifier[any] ([ identifier[k] . identifier[split] ( literal[string] )[ literal[int] ]== literal[string] keyword[for] identifier[k] keyword[in] identifier[binarychunks] . identifier[iterkeys] ()]):
keyword[break]
keyword[if] identifier[headxml] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[self] . identifier[mimemsg] = identifier[feedparser] . identifier[close] ()
identifier[self] . identifier[headxml] = identifier[headxml]
identifier[self] . identifier[sizeinfo] = identifier[sizeinfo]
identifier[self] . identifier[binarychunks] = identifier[binarychunks]
identifier[headsize] , identifier[intsize] = identifier[self] . identifier[calc_intsize] ()
identifier[self] . identifier[n_integrations] = identifier[os] . identifier[stat] ( identifier[self] . identifier[fp] . identifier[name] ). identifier[st_size] / identifier[intsize]
identifier[self] . identifier[n_antennas] = identifier[int] ( identifier[headxml] . identifier[find] ( identifier[tagpfx] + identifier[nanttag] ). identifier[text] )
identifier[self] . identifier[n_baselines] =( identifier[self] . identifier[n_antennas] *( identifier[self] . identifier[n_antennas] - literal[int] ))// literal[int]
identifier[ds] = identifier[headxml] . identifier[find] ( identifier[tagpfx] + identifier[dstag] )
identifier[nbb] = literal[int]
identifier[nspw] = literal[int]
identifier[nchan] = literal[int]
identifier[crosspolstr] = keyword[None]
keyword[for] identifier[bb] keyword[in] identifier[ds] . identifier[findall] ( identifier[tagpfx] + identifier[basebandtag] ):
identifier[nbb] += literal[int]
keyword[for] identifier[spw] keyword[in] identifier[bb] . identifier[getchildren] ():
identifier[nspw] += literal[int]
identifier[nchan] += identifier[int] ( identifier[spw] . identifier[get] ( literal[string] ))
keyword[if] identifier[crosspolstr] keyword[is] keyword[None] :
identifier[crosspolstr] = identifier[spw] . identifier[get] ( literal[string] )
keyword[elif] identifier[spw] . identifier[get] ( literal[string] )!= identifier[crosspolstr] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[n_basebands] = identifier[nbb]
identifier[self] . identifier[n_spws] = identifier[nspw]
identifier[self] . identifier[n_channels] = identifier[nchan]
identifier[self] . identifier[crosspols] = identifier[crosspolstr] . identifier[split] ()
identifier[self] . identifier[n_pols] = identifier[len] ( identifier[self] . identifier[crosspols] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[self] . identifier[pklname] )) keyword[and] identifier[self] . identifier[pklname] keyword[and] ( keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[pklname] )):
identifier[logger] . identifier[info] ( literal[string] %( identifier[self] . identifier[pklname] ))
keyword[with] identifier[open] ( identifier[self] . identifier[pklname] , literal[string] ) keyword[as] identifier[pkl] :
identifier[pickle] . identifier[dump] (( identifier[self] . identifier[mimemsg] , identifier[self] . identifier[headxml] , identifier[self] . identifier[sizeinfo] , identifier[self] . identifier[binarychunks] , identifier[self] . identifier[n_integrations] , identifier[self] . identifier[n_antennas] , identifier[self] . identifier[n_baselines] , identifier[self] . identifier[n_basebands] , identifier[self] . identifier[n_spws] , identifier[self] . identifier[n_channels] , identifier[self] . identifier[crosspols] ), identifier[pkl] )
keyword[return] identifier[self]
|
def _parse(self):
"""Parse the BDF mime structure and record the locations of the binary
blobs. Sets up various data fields in the BDFData object."""
feedparser = FeedParser(Message)
binarychunks = {}
sizeinfo = None
headxml = None
self.fp.seek(0, 0)
while True:
data = self.fp.readline()
if not data:
break # depends on [control=['if'], data=[]]
feedparser.feed(data)
skip = data == '\n' and len(feedparser._msgstack) == 3 and (feedparser._msgstack[-1].get_content_type() in ('application/octet-stream', 'binary/octet-stream'))
if skip: # We just finished reading the headers for a huge binary blob.
# Time to remember where the data chunk is and pretend it doesn't
# exist.
msg = feedparser._msgstack[-1]
ident = msg['Content-Location']
assert ident.endswith('.bin'), 'confusion #1 in hacky MIME parsing!'
binarychunks[ident] = self.fp.tell()
if sizeinfo is None:
(headxml, sizeinfo, tagpfx) = _extract_size_info(feedparser) # depends on [control=['if'], data=['sizeinfo']]
kind = ident.split('/')[-1]
assert kind in sizeinfo, 'no size info for binary chunk kind %s in MIME!' % kind
self.fp.seek(sizeinfo[kind] + 1, 1) # skip ahead by data chunk size
sample = self.fp.read(16)
assert sample.startswith('--MIME'), 'crap, unexpected chunk size in MIME parsing: %r' % sample
self.fp.seek(-16, 1) # go back # depends on [control=['if'], data=[]] # check that two major kinds of data are read at least once
if any([k.split('/')[3] == '3' for k in binarychunks.iterkeys()]):
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if headxml is None:
raise RuntimeError('never found any binary data') # depends on [control=['if'], data=[]]
self.mimemsg = feedparser.close()
self.headxml = headxml
self.sizeinfo = sizeinfo
self.binarychunks = binarychunks
(headsize, intsize) = self.calc_intsize() # Compute some miscellaneous parameters that we'll need.
# self.n_integrations = len (self.mimemsg.get_payload ()) - 1
self.n_integrations = os.stat(self.fp.name).st_size / intsize
self.n_antennas = int(headxml.find(tagpfx + nanttag).text)
self.n_baselines = self.n_antennas * (self.n_antennas - 1) // 2
ds = headxml.find(tagpfx + dstag)
nbb = 0
nspw = 0
nchan = 0
crosspolstr = None
for bb in ds.findall(tagpfx + basebandtag):
nbb += 1
for spw in bb.getchildren():
nspw += 1
nchan += int(spw.get('numSpectralPoint'))
if crosspolstr is None:
crosspolstr = spw.get('crossPolProducts') # depends on [control=['if'], data=['crosspolstr']]
elif spw.get('crossPolProducts') != crosspolstr:
raise Exception('can only handle spectral windows with identical cross pol products') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['spw']] # depends on [control=['for'], data=['bb']]
self.n_basebands = nbb
self.n_spws = nspw
self.n_channels = nchan
self.crosspols = crosspolstr.split()
self.n_pols = len(self.crosspols) # if bdf info pkl not present, write it
if os.path.exists(os.path.dirname(self.pklname)) and self.pklname and (not os.path.exists(self.pklname)):
logger.info('Writing bdf pkl info to %s...' % self.pklname)
with open(self.pklname, 'wb') as pkl: # Compute some miscellaneous parameters that we'll need.
pickle.dump((self.mimemsg, self.headxml, self.sizeinfo, self.binarychunks, self.n_integrations, self.n_antennas, self.n_baselines, self.n_basebands, self.n_spws, self.n_channels, self.crosspols), pkl) # depends on [control=['with'], data=['pkl']] # depends on [control=['if'], data=[]]
return self
|
def settings(self, key=None, section=None):
"""The settings overridden from the wandb/settings file.
Args:
key (str, optional): If provided only this setting is returned
section (str, optional): If provided this section of the setting file is
used, defaults to "default"
Returns:
A dict with the current settings
{
"entity": "models",
"base_url": "https://api.wandb.ai",
"project": None
}
"""
if not self._settings:
self._settings = self.default_settings.copy()
section = section or self._settings['section']
try:
if section in self.settings_parser.sections():
for option in self.settings_parser.options(section):
self._settings[option] = self.settings_parser.get(
section, option)
except configparser.InterpolationSyntaxError:
print("WARNING: Unable to parse settings file")
self._settings["project"] = env.get_project(
self._settings.get("project"))
self._settings["entity"] = env.get_entity(
self._settings.get("entity"))
self._settings["base_url"] = env.get_base_url(
self._settings.get("base_url"))
self._settings["ignore_globs"] = env.get_ignore(
self._settings.get("ignore_globs")
)
return self._settings if key is None else self._settings[key]
|
def function[settings, parameter[self, key, section]]:
constant[The settings overridden from the wandb/settings file.
Args:
key (str, optional): If provided only this setting is returned
section (str, optional): If provided this section of the setting file is
used, defaults to "default"
Returns:
A dict with the current settings
{
"entity": "models",
"base_url": "https://api.wandb.ai",
"project": None
}
]
if <ast.UnaryOp object at 0x7da207f02d70> begin[:]
name[self]._settings assign[=] call[name[self].default_settings.copy, parameter[]]
variable[section] assign[=] <ast.BoolOp object at 0x7da207f03c70>
<ast.Try object at 0x7da207f02e60>
call[name[self]._settings][constant[project]] assign[=] call[name[env].get_project, parameter[call[name[self]._settings.get, parameter[constant[project]]]]]
call[name[self]._settings][constant[entity]] assign[=] call[name[env].get_entity, parameter[call[name[self]._settings.get, parameter[constant[entity]]]]]
call[name[self]._settings][constant[base_url]] assign[=] call[name[env].get_base_url, parameter[call[name[self]._settings.get, parameter[constant[base_url]]]]]
call[name[self]._settings][constant[ignore_globs]] assign[=] call[name[env].get_ignore, parameter[call[name[self]._settings.get, parameter[constant[ignore_globs]]]]]
return[<ast.IfExp object at 0x7da1b08a5ea0>]
|
keyword[def] identifier[settings] ( identifier[self] , identifier[key] = keyword[None] , identifier[section] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_settings] :
identifier[self] . identifier[_settings] = identifier[self] . identifier[default_settings] . identifier[copy] ()
identifier[section] = identifier[section] keyword[or] identifier[self] . identifier[_settings] [ literal[string] ]
keyword[try] :
keyword[if] identifier[section] keyword[in] identifier[self] . identifier[settings_parser] . identifier[sections] ():
keyword[for] identifier[option] keyword[in] identifier[self] . identifier[settings_parser] . identifier[options] ( identifier[section] ):
identifier[self] . identifier[_settings] [ identifier[option] ]= identifier[self] . identifier[settings_parser] . identifier[get] (
identifier[section] , identifier[option] )
keyword[except] identifier[configparser] . identifier[InterpolationSyntaxError] :
identifier[print] ( literal[string] )
identifier[self] . identifier[_settings] [ literal[string] ]= identifier[env] . identifier[get_project] (
identifier[self] . identifier[_settings] . identifier[get] ( literal[string] ))
identifier[self] . identifier[_settings] [ literal[string] ]= identifier[env] . identifier[get_entity] (
identifier[self] . identifier[_settings] . identifier[get] ( literal[string] ))
identifier[self] . identifier[_settings] [ literal[string] ]= identifier[env] . identifier[get_base_url] (
identifier[self] . identifier[_settings] . identifier[get] ( literal[string] ))
identifier[self] . identifier[_settings] [ literal[string] ]= identifier[env] . identifier[get_ignore] (
identifier[self] . identifier[_settings] . identifier[get] ( literal[string] )
)
keyword[return] identifier[self] . identifier[_settings] keyword[if] identifier[key] keyword[is] keyword[None] keyword[else] identifier[self] . identifier[_settings] [ identifier[key] ]
|
def settings(self, key=None, section=None):
"""The settings overridden from the wandb/settings file.
Args:
key (str, optional): If provided only this setting is returned
section (str, optional): If provided this section of the setting file is
used, defaults to "default"
Returns:
A dict with the current settings
{
"entity": "models",
"base_url": "https://api.wandb.ai",
"project": None
}
"""
if not self._settings:
self._settings = self.default_settings.copy()
section = section or self._settings['section']
try:
if section in self.settings_parser.sections():
for option in self.settings_parser.options(section):
self._settings[option] = self.settings_parser.get(section, option) # depends on [control=['for'], data=['option']] # depends on [control=['if'], data=['section']] # depends on [control=['try'], data=[]]
except configparser.InterpolationSyntaxError:
print('WARNING: Unable to parse settings file') # depends on [control=['except'], data=[]]
self._settings['project'] = env.get_project(self._settings.get('project'))
self._settings['entity'] = env.get_entity(self._settings.get('entity'))
self._settings['base_url'] = env.get_base_url(self._settings.get('base_url'))
self._settings['ignore_globs'] = env.get_ignore(self._settings.get('ignore_globs')) # depends on [control=['if'], data=[]]
return self._settings if key is None else self._settings[key]
|
def calc_distribution_stats(x):
"""Calculate various summary statistics of data.
Parameters
----------
x : numpy.ndarray or pandas.Series
Array to compute summary statistics for.
Returns
-------
pandas.Series
Series containing mean, median, std, as well as 5, 25, 75 and
95 percentiles of passed in values.
"""
return pd.Series({'mean': np.mean(x),
'median': np.median(x),
'std': np.std(x),
'5%': np.percentile(x, 5),
'25%': np.percentile(x, 25),
'75%': np.percentile(x, 75),
'95%': np.percentile(x, 95),
'IQR': np.subtract.reduce(
np.percentile(x, [75, 25])),
})
|
def function[calc_distribution_stats, parameter[x]]:
constant[Calculate various summary statistics of data.
Parameters
----------
x : numpy.ndarray or pandas.Series
Array to compute summary statistics for.
Returns
-------
pandas.Series
Series containing mean, median, std, as well as 5, 25, 75 and
95 percentiles of passed in values.
]
return[call[name[pd].Series, parameter[dictionary[[<ast.Constant object at 0x7da1b02940d0>, <ast.Constant object at 0x7da1b0294460>, <ast.Constant object at 0x7da1b0294cd0>, <ast.Constant object at 0x7da1b0294430>, <ast.Constant object at 0x7da1b0294400>, <ast.Constant object at 0x7da1b0294a90>, <ast.Constant object at 0x7da1b0294130>, <ast.Constant object at 0x7da1b0294490>], [<ast.Call object at 0x7da1b0294c40>, <ast.Call object at 0x7da1b0294910>, <ast.Call object at 0x7da1b0294ac0>, <ast.Call object at 0x7da1b003c850>, <ast.Call object at 0x7da1b003f7f0>, <ast.Call object at 0x7da1b003f820>, <ast.Call object at 0x7da1b003f880>, <ast.Call object at 0x7da1b003cb80>]]]]]
|
keyword[def] identifier[calc_distribution_stats] ( identifier[x] ):
literal[string]
keyword[return] identifier[pd] . identifier[Series] ({ literal[string] : identifier[np] . identifier[mean] ( identifier[x] ),
literal[string] : identifier[np] . identifier[median] ( identifier[x] ),
literal[string] : identifier[np] . identifier[std] ( identifier[x] ),
literal[string] : identifier[np] . identifier[percentile] ( identifier[x] , literal[int] ),
literal[string] : identifier[np] . identifier[percentile] ( identifier[x] , literal[int] ),
literal[string] : identifier[np] . identifier[percentile] ( identifier[x] , literal[int] ),
literal[string] : identifier[np] . identifier[percentile] ( identifier[x] , literal[int] ),
literal[string] : identifier[np] . identifier[subtract] . identifier[reduce] (
identifier[np] . identifier[percentile] ( identifier[x] ,[ literal[int] , literal[int] ])),
})
|
def calc_distribution_stats(x):
"""Calculate various summary statistics of data.
Parameters
----------
x : numpy.ndarray or pandas.Series
Array to compute summary statistics for.
Returns
-------
pandas.Series
Series containing mean, median, std, as well as 5, 25, 75 and
95 percentiles of passed in values.
"""
return pd.Series({'mean': np.mean(x), 'median': np.median(x), 'std': np.std(x), '5%': np.percentile(x, 5), '25%': np.percentile(x, 25), '75%': np.percentile(x, 75), '95%': np.percentile(x, 95), 'IQR': np.subtract.reduce(np.percentile(x, [75, 25]))})
|
def get_error(self, errstr):
'''
Parse out an error and return a targeted error string
'''
for line in errstr.split('\n'):
if line.startswith('ssh:'):
return line
if line.startswith('Pseudo-terminal'):
continue
if 'to the list of known hosts.' in line:
continue
return line
return errstr
|
def function[get_error, parameter[self, errstr]]:
constant[
Parse out an error and return a targeted error string
]
for taget[name[line]] in starred[call[name[errstr].split, parameter[constant[
]]]] begin[:]
if call[name[line].startswith, parameter[constant[ssh:]]] begin[:]
return[name[line]]
if call[name[line].startswith, parameter[constant[Pseudo-terminal]]] begin[:]
continue
if compare[constant[to the list of known hosts.] in name[line]] begin[:]
continue
return[name[line]]
return[name[errstr]]
|
keyword[def] identifier[get_error] ( identifier[self] , identifier[errstr] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[errstr] . identifier[split] ( literal[string] ):
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[line]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[line] :
keyword[continue]
keyword[return] identifier[line]
keyword[return] identifier[errstr]
|
def get_error(self, errstr):
"""
Parse out an error and return a targeted error string
"""
for line in errstr.split('\n'):
if line.startswith('ssh:'):
return line # depends on [control=['if'], data=[]]
if line.startswith('Pseudo-terminal'):
continue # depends on [control=['if'], data=[]]
if 'to the list of known hosts.' in line:
continue # depends on [control=['if'], data=[]]
return line # depends on [control=['for'], data=['line']]
return errstr
|
def _set_interface_dynamic_bypass_name_prefix(self, v, load=False):
"""
Setter method for interface_dynamic_bypass_name_prefix, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass/mpls_interface_dynamic_bypass_sub_cmds/interface_dynamic_bypass_name_prefix (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_dynamic_bypass_name_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_dynamic_bypass_name_prefix() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), default=unicode("dbyp"), is_leaf=True, yang_name="interface-dynamic-bypass-name-prefix", rest_name="name-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'interface level dynamic bypass name prefix', u'alt-name': u'name-prefix', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_dynamic_bypass_name_prefix must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), default=unicode("dbyp"), is_leaf=True, yang_name="interface-dynamic-bypass-name-prefix", rest_name="name-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'interface level dynamic bypass name prefix', u'alt-name': u'name-prefix', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)""",
})
self.__interface_dynamic_bypass_name_prefix = t
if hasattr(self, '_set'):
self._set()
|
def function[_set_interface_dynamic_bypass_name_prefix, parameter[self, v, load]]:
constant[
Setter method for interface_dynamic_bypass_name_prefix, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass/mpls_interface_dynamic_bypass_sub_cmds/interface_dynamic_bypass_name_prefix (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_dynamic_bypass_name_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_dynamic_bypass_name_prefix() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20c6c6ec0>
name[self].__interface_dynamic_bypass_name_prefix assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]]
|
keyword[def] identifier[_set_interface_dynamic_bypass_name_prefix] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[unicode] , identifier[restriction_dict] ={ literal[string] :[ literal[string] ]}), identifier[default] = identifier[unicode] ( literal[string] ), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__interface_dynamic_bypass_name_prefix] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] ()
|
def _set_interface_dynamic_bypass_name_prefix(self, v, load=False):
"""
Setter method for interface_dynamic_bypass_name_prefix, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass/mpls_interface_dynamic_bypass_sub_cmds/interface_dynamic_bypass_name_prefix (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_dynamic_bypass_name_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_dynamic_bypass_name_prefix() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), default=unicode('dbyp'), is_leaf=True, yang_name='interface-dynamic-bypass-name-prefix', rest_name='name-prefix', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'interface level dynamic bypass name prefix', u'alt-name': u'name-prefix', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'interface_dynamic_bypass_name_prefix must be of a type compatible with string', 'defined-type': 'string', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={\'length\': [u\'1..21\']}), default=unicode("dbyp"), is_leaf=True, yang_name="interface-dynamic-bypass-name-prefix", rest_name="name-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-full-command\': None, u\'info\': u\'interface level dynamic bypass name prefix\', u\'alt-name\': u\'name-prefix\', u\'cli-full-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'string\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__interface_dynamic_bypass_name_prefix = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]]
|
def get_version(name, dirs=None, config=None):
"""Retrieve the current version of the given program from cached names.
"""
if dirs:
p = _get_program_file(dirs)
else:
p = tz.get_in(["resources", "program_versions"], config)
if p:
with open(p) as in_handle:
for line in in_handle:
prog, version = line.rstrip().split(",")
if prog == name and version:
return version
raise KeyError("Version information not found for %s in %s" % (name, p))
|
def function[get_version, parameter[name, dirs, config]]:
constant[Retrieve the current version of the given program from cached names.
]
if name[dirs] begin[:]
variable[p] assign[=] call[name[_get_program_file], parameter[name[dirs]]]
if name[p] begin[:]
with call[name[open], parameter[name[p]]] begin[:]
for taget[name[line]] in starred[name[in_handle]] begin[:]
<ast.Tuple object at 0x7da20c76fd00> assign[=] call[call[name[line].rstrip, parameter[]].split, parameter[constant[,]]]
if <ast.BoolOp object at 0x7da1b2345540> begin[:]
return[name[version]]
<ast.Raise object at 0x7da1b2347490>
|
keyword[def] identifier[get_version] ( identifier[name] , identifier[dirs] = keyword[None] , identifier[config] = keyword[None] ):
literal[string]
keyword[if] identifier[dirs] :
identifier[p] = identifier[_get_program_file] ( identifier[dirs] )
keyword[else] :
identifier[p] = identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] ], identifier[config] )
keyword[if] identifier[p] :
keyword[with] identifier[open] ( identifier[p] ) keyword[as] identifier[in_handle] :
keyword[for] identifier[line] keyword[in] identifier[in_handle] :
identifier[prog] , identifier[version] = identifier[line] . identifier[rstrip] (). identifier[split] ( literal[string] )
keyword[if] identifier[prog] == identifier[name] keyword[and] identifier[version] :
keyword[return] identifier[version]
keyword[raise] identifier[KeyError] ( literal[string] %( identifier[name] , identifier[p] ))
|
def get_version(name, dirs=None, config=None):
"""Retrieve the current version of the given program from cached names.
"""
if dirs:
p = _get_program_file(dirs) # depends on [control=['if'], data=[]]
else:
p = tz.get_in(['resources', 'program_versions'], config)
if p:
with open(p) as in_handle:
for line in in_handle:
(prog, version) = line.rstrip().split(',')
if prog == name and version:
return version # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['in_handle']]
raise KeyError('Version information not found for %s in %s' % (name, p)) # depends on [control=['if'], data=[]]
|
def cache_cluster_present(name, wait=900, security_groups=None, region=None, key=None,
keyid=None, profile=None, **args):
'''
Ensure a given cache cluster exists.
name
Name of the cache cluster (cache cluster id).
wait
Integer describing how long, in seconds, to wait for confirmation from AWS that the
resource is in the desired state. Zero meaning to return success or failure immediately
of course. Note that waiting for the cluster to become available is generally the
better course, as failure to do so will often lead to subsequent failures when managing
dependent resources.
security_groups
One or more VPC security groups (names and/or IDs) associated with the cache cluster.
.. note::
This is additive with any sec groups provided via the
SecurityGroupIds parameter below. Use this parameter ONLY when you
are creating a cluster in a VPC.
CacheClusterId
The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
- A name must contain from 1 to 20 alphanumeric characters or hyphens.
- The first character must be a letter.
- A name cannot end with a hyphen or contain two consecutive hyphens.
.. note::
In general this parameter is not needed, as 'name' is used if it's
not provided.
ReplicationGroupId
The ID of the replication group to which this cache cluster should belong. If this
parameter is specified, the cache cluster is added to the specified replication
group as a read replica; otherwise, the cache cluster is a standalone primary that
is not part of any replication group. If the specified replication group is
Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is
created in Availability Zones that provide the best spread of read replicas across
Availability Zones.
.. note:
This parameter is ONLY valid if the Engine parameter is redis. Due
to current limitations on Redis (cluster mode disabled), this
parameter is not supported on Redis (cluster mode enabled)
replication groups.
AZMode
Specifies whether the nodes in this Memcached cluster are created in a single
Availability Zone or created across multiple Availability Zones in the cluster's
region. If the AZMode and PreferredAvailabilityZones are not specified,
ElastiCache assumes single-az mode.
.. note::
This parameter is ONLY supported for Memcached cache clusters.
PreferredAvailabilityZone
The EC2 Availability Zone in which the cache cluster is created. All nodes
belonging to this Memcached cache cluster are placed in the preferred Availability
Zone. If you want to create your nodes across multiple Availability Zones, use
PreferredAvailabilityZones.
Default: System chosen Availability Zone.
PreferredAvailabilityZones
A list of the Availability Zones in which cache nodes are created. The order of
the zones in the list is not important. The number of Availability Zones listed
must equal the value of NumCacheNodes. If you want all the nodes in the same
Availability Zone, use PreferredAvailabilityZone instead, or repeat the
Availability Zone multiple times in the list.
Default: System chosen Availability Zones.
.. note::
This option is ONLY supported on Memcached.
If you are creating your cache cluster in an Amazon VPC
(recommended) you can only locate nodes in Availability Zones that
are associated with the subnets in the selected subnet group.
NumCacheNodes
The initial (integer) number of cache nodes that the cache cluster has.
.. note::
For clusters running Redis, this value must be 1.
For clusters running Memcached, this value must be between 1 and 20.
CacheNodeType
The compute and memory capacity of the nodes in the node group (shard).
Valid node types (and pricing for them) are exhaustively described at
https://aws.amazon.com/elasticache/pricing/
.. note::
All T2 instances must be created in a VPC
Redis backup/restore is not supported for Redis (cluster mode
disabled) T1 and T2 instances. Backup/restore is supported on Redis
(cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1
or T2 instances.
Engine
The name of the cache engine to be used for this cache cluster. Valid values for
this parameter are: memcached | redis
EngineVersion
The version number of the cache engine to be used for this cache cluster. To view
the supported cache engine versions, use the DescribeCacheEngineVersions operation.
.. note::
You can upgrade to a newer engine version but you cannot downgrade
to an earlier engine version. If you want to use an earlier engine
version, you must delete the existing cache cluster or replication
group and create it anew with the earlier engine version.
CacheParameterGroupName
The name of the parameter group to associate with this cache cluster. If this
argument is omitted, the default parameter group for the specified engine is used.
You cannot use any parameter group which has cluster-enabled='yes' when creating
a cluster.
CacheSubnetGroupName
The name of the Cache Subnet Group to be used for the cache cluster. Use this
parameter ONLY when you are creating a cache cluster within a VPC.
.. note::
If you're going to launch your cluster in an Amazon VPC, you need
to create a subnet group before you start creating a cluster.
CacheSecurityGroupNames
A list of Cache Security Group names to associate with this cache cluster. Use
this parameter ONLY when you are creating a cache cluster outside of a VPC.
SecurityGroupIds
One or more VPC security groups associated with the cache cluster. Use this
parameter ONLY when you are creating a cache cluster within a VPC.
Tags
A list of tags to be added to this resource. Note that due to shortcomings in the
AWS API for Elasticache, these can only be set during resource creation - later
modification is not (currently) supported.
SnapshotArns
A single-element string list containing an Amazon Resource Name (ARN) that
uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot
file is used to populate the node group (shard). The Amazon S3 object name in
the ARN cannot contain any commas.
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
SnapshotName
The name of a Redis snapshot from which to restore data into the new node group
(shard). The snapshot status changes to restoring while the new node group (shard)
is being created.
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
PreferredMaintenanceWindow
Specifies the weekly time range during which maintenance on the cache cluster is
permitted. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
(24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are: sun, mon, tue, wed, thu, fri, sat
Example: sun:23:00-mon:01:30
Port
The port number on which each of the cache nodes accepts connections.
Default: 6379
NotificationTopicArn
The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS)
topic to which notifications are sent.
.. note::
The Amazon SNS topic owner must be the same as the cache cluster
owner.
AutoMinorVersionUpgrade
This (boolean) parameter is currently disabled.
SnapshotRetentionLimit
The number of days for which ElastiCache retains automatic snapshots before
deleting them.
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
SnapshotWindow
The daily time range (in UTC) during which ElastiCache begins taking a daily
snapshot of your node group (shard). If you do not specify this parameter,
ElastiCache automatically chooses an appropriate time range.
Example: 05:00-09:00
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
AuthToken
The password used to access a password protected server.
Password constraints:
- Must be only printable ASCII characters.
- Must be at least 16 characters and no more than 128 characters in length.
- Cannot contain any of the following characters: '/', '"', or "@".
CacheNodeIdsToRemove
A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002,
etc.). This parameter is only valid when NumCacheNodes is less than the existing number of
cache nodes. The number of cache node IDs supplied in this parameter must match the
difference between the existing number of cache nodes in the cluster or pending cache nodes,
whichever is greater, and the value of NumCacheNodes in the request.
NewAvailabilityZones
The list of Availability Zones where the new Memcached cache nodes are created.
This parameter is only valid when NumCacheNodes in the request is greater than the sum of
the number of active cache nodes and the number of cache nodes pending creation (which may
be zero). The number of Availability Zones supplied in this list must match the cache nodes
being added in this request.
Note: This option is only supported on Memcached clusters.
NotificationTopicStatus
The status of the SNS notification topic. Notifications are sent only if the status is active.
Valid values: active | inactive
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
current = __salt__['boto3_elasticache.'
'describe_cache_clusters'](name, region=region, key=key,
keyid=keyid, profile=profile)
if current:
check_update = True
else:
check_update = False
only_on_modify = [
'CacheNodeIdsToRemove',
'NewAvailabilityZones',
'NotificationTopicStatus'
]
create_args = {}
for k, v in args.items():
if k in only_on_modify:
check_update = True
else:
create_args[k] = v
if __opts__['test']:
ret['comment'] = 'Cache cluster {0} would be created.'.format(name)
ret['result'] = None
return ret
created = __salt__['boto3_elasticache.'
'create_cache_cluster'](name, wait=wait, security_groups=security_groups,
region=region, key=key, keyid=keyid,
profile=profile, **create_args)
if created:
new = __salt__['boto3_elasticache.'
'describe_cache_clusters'](name, region=region, key=key,
keyid=keyid, profile=profile)
ret['comment'] = 'Cache cluster {0} was created.'.format(name)
ret['changes']['old'] = None
ret['changes']['new'] = new[0]
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} cache cluster.'.format(name)
if check_update:
# Refresh this in case we're updating from 'only_on_modify' above...
updated = __salt__['boto3_elasticache.'
'describe_cache_clusters'](name, region=region, key=key,
keyid=keyid, profile=profile)
need_update = _diff_cache_cluster(updated['CacheClusters'][0], args)
if need_update:
if __opts__['test']:
ret['comment'] = 'Cache cluster {0} would be modified.'.format(name)
ret['result'] = None
return ret
modified = __salt__['boto3_elasticache.'
'modify_cache_cluster'](name, wait=wait,
security_groups=security_groups,
region=region, key=key, keyid=keyid,
profile=profile, **need_update)
if modified:
new = __salt__['boto3_elasticache.'
'describe_cache_clusters'](name, region=region, key=key,
keyid=keyid, profile=profile)
if ret['comment']: # 'create' just ran...
ret['comment'] += ' ... and then immediately modified.'
else:
ret['comment'] = 'Cache cluster {0} was modified.'.format(name)
ret['changes']['old'] = current
ret['changes']['new'] = new[0]
else:
ret['result'] = False
ret['comment'] = 'Failed to modify cache cluster {0}.'.format(name)
else:
ret['comment'] = 'Cache cluster {0} is in the desired state.'.format(name)
return ret
|
def function[cache_cluster_present, parameter[name, wait, security_groups, region, key, keyid, profile]]:
constant[
Ensure a given cache cluster exists.
name
Name of the cache cluster (cache cluster id).
wait
Integer describing how long, in seconds, to wait for confirmation from AWS that the
resource is in the desired state. Zero meaning to return success or failure immediately
of course. Note that waiting for the cluster to become available is generally the
better course, as failure to do so will often lead to subsequent failures when managing
dependent resources.
security_groups
One or more VPC security groups (names and/or IDs) associated with the cache cluster.
.. note::
This is additive with any sec groups provided via the
SecurityGroupIds parameter below. Use this parameter ONLY when you
are creating a cluster in a VPC.
CacheClusterId
The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
- A name must contain from 1 to 20 alphanumeric characters or hyphens.
- The first character must be a letter.
- A name cannot end with a hyphen or contain two consecutive hyphens.
.. note::
In general this parameter is not needed, as 'name' is used if it's
not provided.
ReplicationGroupId
The ID of the replication group to which this cache cluster should belong. If this
parameter is specified, the cache cluster is added to the specified replication
group as a read replica; otherwise, the cache cluster is a standalone primary that
is not part of any replication group. If the specified replication group is
Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is
created in Availability Zones that provide the best spread of read replicas across
Availability Zones.
.. note:
This parameter is ONLY valid if the Engine parameter is redis. Due
to current limitations on Redis (cluster mode disabled), this
parameter is not supported on Redis (cluster mode enabled)
replication groups.
AZMode
Specifies whether the nodes in this Memcached cluster are created in a single
Availability Zone or created across multiple Availability Zones in the cluster's
region. If the AZMode and PreferredAvailabilityZones are not specified,
ElastiCache assumes single-az mode.
.. note::
This parameter is ONLY supported for Memcached cache clusters.
PreferredAvailabilityZone
The EC2 Availability Zone in which the cache cluster is created. All nodes
belonging to this Memcached cache cluster are placed in the preferred Availability
Zone. If you want to create your nodes across multiple Availability Zones, use
PreferredAvailabilityZones.
Default: System chosen Availability Zone.
PreferredAvailabilityZones
A list of the Availability Zones in which cache nodes are created. The order of
the zones in the list is not important. The number of Availability Zones listed
must equal the value of NumCacheNodes. If you want all the nodes in the same
Availability Zone, use PreferredAvailabilityZone instead, or repeat the
Availability Zone multiple times in the list.
Default: System chosen Availability Zones.
.. note::
This option is ONLY supported on Memcached.
If you are creating your cache cluster in an Amazon VPC
(recommended) you can only locate nodes in Availability Zones that
are associated with the subnets in the selected subnet group.
NumCacheNodes
The initial (integer) number of cache nodes that the cache cluster has.
.. note::
For clusters running Redis, this value must be 1.
For clusters running Memcached, this value must be between 1 and 20.
CacheNodeType
The compute and memory capacity of the nodes in the node group (shard).
Valid node types (and pricing for them) are exhaustively described at
https://aws.amazon.com/elasticache/pricing/
.. note::
All T2 instances must be created in a VPC
Redis backup/restore is not supported for Redis (cluster mode
disabled) T1 and T2 instances. Backup/restore is supported on Redis
(cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1
or T2 instances.
Engine
The name of the cache engine to be used for this cache cluster. Valid values for
this parameter are: memcached | redis
EngineVersion
The version number of the cache engine to be used for this cache cluster. To view
the supported cache engine versions, use the DescribeCacheEngineVersions operation.
.. note::
You can upgrade to a newer engine version but you cannot downgrade
to an earlier engine version. If you want to use an earlier engine
version, you must delete the existing cache cluster or replication
group and create it anew with the earlier engine version.
CacheParameterGroupName
The name of the parameter group to associate with this cache cluster. If this
argument is omitted, the default parameter group for the specified engine is used.
You cannot use any parameter group which has cluster-enabled='yes' when creating
a cluster.
CacheSubnetGroupName
The name of the Cache Subnet Group to be used for the cache cluster. Use this
parameter ONLY when you are creating a cache cluster within a VPC.
.. note::
If you're going to launch your cluster in an Amazon VPC, you need
to create a subnet group before you start creating a cluster.
CacheSecurityGroupNames
A list of Cache Security Group names to associate with this cache cluster. Use
this parameter ONLY when you are creating a cache cluster outside of a VPC.
SecurityGroupIds
One or more VPC security groups associated with the cache cluster. Use this
parameter ONLY when you are creating a cache cluster within a VPC.
Tags
A list of tags to be added to this resource. Note that due to shortcomings in the
AWS API for Elasticache, these can only be set during resource creation - later
modification is not (currently) supported.
SnapshotArns
A single-element string list containing an Amazon Resource Name (ARN) that
uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot
file is used to populate the node group (shard). The Amazon S3 object name in
the ARN cannot contain any commas.
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
SnapshotName
The name of a Redis snapshot from which to restore data into the new node group
(shard). The snapshot status changes to restoring while the new node group (shard)
is being created.
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
PreferredMaintenanceWindow
Specifies the weekly time range during which maintenance on the cache cluster is
permitted. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
(24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are: sun, mon, tue, wed, thu, fri, sat
Example: sun:23:00-mon:01:30
Port
The port number on which each of the cache nodes accepts connections.
Default: 6379
NotificationTopicArn
The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS)
topic to which notifications are sent.
.. note::
The Amazon SNS topic owner must be the same as the cache cluster
owner.
AutoMinorVersionUpgrade
This (boolean) parameter is currently disabled.
SnapshotRetentionLimit
The number of days for which ElastiCache retains automatic snapshots before
deleting them.
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
SnapshotWindow
The daily time range (in UTC) during which ElastiCache begins taking a daily
snapshot of your node group (shard). If you do not specify this parameter,
ElastiCache automatically chooses an appropriate time range.
Example: 05:00-09:00
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
AuthToken
The password used to access a password protected server.
Password constraints:
- Must be only printable ASCII characters.
- Must be at least 16 characters and no more than 128 characters in length.
- Cannot contain any of the following characters: '/', '"', or "@".
CacheNodeIdsToRemove
A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002,
etc.). This parameter is only valid when NumCacheNodes is less than the existing number of
cache nodes. The number of cache node IDs supplied in this parameter must match the
difference between the existing number of cache nodes in the cluster or pending cache nodes,
whichever is greater, and the value of NumCacheNodes in the request.
NewAvailabilityZones
The list of Availability Zones where the new Memcached cache nodes are created.
This parameter is only valid when NumCacheNodes in the request is greater than the sum of
the number of active cache nodes and the number of cache nodes pending creation (which may
be zero). The number of Availability Zones supplied in this list must match the cache nodes
being added in this request.
Note: This option is only supported on Memcached clusters.
NotificationTopicStatus
The status of the SNS notification topic. Notifications are sent only if the status is active.
Valid values: active | inactive
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da20c7c8bb0>, <ast.Constant object at 0x7da20c7cad10>, <ast.Constant object at 0x7da20c7c9f90>, <ast.Constant object at 0x7da20c7cb010>], [<ast.Name object at 0x7da20c7cbac0>, <ast.Constant object at 0x7da20c7c8070>, <ast.Constant object at 0x7da20c7ca410>, <ast.Dict object at 0x7da20c7ca800>]]
variable[args] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da20c7c8610>]]
variable[current] assign[=] call[call[name[__salt__]][constant[boto3_elasticache.describe_cache_clusters]], parameter[name[name]]]
if name[current] begin[:]
variable[check_update] assign[=] constant[True]
if name[check_update] begin[:]
variable[updated] assign[=] call[call[name[__salt__]][constant[boto3_elasticache.describe_cache_clusters]], parameter[name[name]]]
variable[need_update] assign[=] call[name[_diff_cache_cluster], parameter[call[call[name[updated]][constant[CacheClusters]]][constant[0]], name[args]]]
if name[need_update] begin[:]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Cache cluster {0} would be modified.].format, parameter[name[name]]]
call[name[ret]][constant[result]] assign[=] constant[None]
return[name[ret]]
variable[modified] assign[=] call[call[name[__salt__]][constant[boto3_elasticache.modify_cache_cluster]], parameter[name[name]]]
if name[modified] begin[:]
variable[new] assign[=] call[call[name[__salt__]][constant[boto3_elasticache.describe_cache_clusters]], parameter[name[name]]]
if call[name[ret]][constant[comment]] begin[:]
<ast.AugAssign object at 0x7da1b208c970>
call[call[name[ret]][constant[changes]]][constant[new]] assign[=] call[name[new]][constant[0]]
return[name[ret]]
|
keyword[def] identifier[cache_cluster_present] ( identifier[name] , identifier[wait] = literal[int] , identifier[security_groups] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] ,
identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ,** identifier[args] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] :{}}
identifier[args] = identifier[dict] ([( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[args] . identifier[items] () keyword[if] keyword[not] identifier[k] . identifier[startswith] ( literal[string] )])
identifier[current] = identifier[__salt__] [ literal[string]
literal[string] ]( identifier[name] , identifier[region] = identifier[region] , identifier[key] = identifier[key] ,
identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] identifier[current] :
identifier[check_update] = keyword[True]
keyword[else] :
identifier[check_update] = keyword[False]
identifier[only_on_modify] =[
literal[string] ,
literal[string] ,
literal[string]
]
identifier[create_args] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[args] . identifier[items] ():
keyword[if] identifier[k] keyword[in] identifier[only_on_modify] :
identifier[check_update] = keyword[True]
keyword[else] :
identifier[create_args] [ identifier[k] ]= identifier[v]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
identifier[created] = identifier[__salt__] [ literal[string]
literal[string] ]( identifier[name] , identifier[wait] = identifier[wait] , identifier[security_groups] = identifier[security_groups] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] ,
identifier[profile] = identifier[profile] ,** identifier[create_args] )
keyword[if] identifier[created] :
identifier[new] = identifier[__salt__] [ literal[string]
literal[string] ]( identifier[name] , identifier[region] = identifier[region] , identifier[key] = identifier[key] ,
identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ][ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[new] [ literal[int] ]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[if] identifier[check_update] :
identifier[updated] = identifier[__salt__] [ literal[string]
literal[string] ]( identifier[name] , identifier[region] = identifier[region] , identifier[key] = identifier[key] ,
identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
identifier[need_update] = identifier[_diff_cache_cluster] ( identifier[updated] [ literal[string] ][ literal[int] ], identifier[args] )
keyword[if] identifier[need_update] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
identifier[modified] = identifier[__salt__] [ literal[string]
literal[string] ]( identifier[name] , identifier[wait] = identifier[wait] ,
identifier[security_groups] = identifier[security_groups] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] ,
identifier[profile] = identifier[profile] ,** identifier[need_update] )
keyword[if] identifier[modified] :
identifier[new] = identifier[__salt__] [ literal[string]
literal[string] ]( identifier[name] , identifier[region] = identifier[region] , identifier[key] = identifier[key] ,
identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] identifier[ret] [ literal[string] ]:
identifier[ret] [ literal[string] ]+= literal[string]
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[current]
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[new] [ literal[int] ]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
|
def cache_cluster_present(name, wait=900, security_groups=None, region=None, key=None, keyid=None, profile=None, **args):
"""
Ensure a given cache cluster exists.
name
Name of the cache cluster (cache cluster id).
wait
Integer describing how long, in seconds, to wait for confirmation from AWS that the
resource is in the desired state. Zero meaning to return success or failure immediately
of course. Note that waiting for the cluster to become available is generally the
better course, as failure to do so will often lead to subsequent failures when managing
dependent resources.
security_groups
One or more VPC security groups (names and/or IDs) associated with the cache cluster.
.. note::
This is additive with any sec groups provided via the
SecurityGroupIds parameter below. Use this parameter ONLY when you
are creating a cluster in a VPC.
CacheClusterId
The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
- A name must contain from 1 to 20 alphanumeric characters or hyphens.
- The first character must be a letter.
- A name cannot end with a hyphen or contain two consecutive hyphens.
.. note::
In general this parameter is not needed, as 'name' is used if it's
not provided.
ReplicationGroupId
The ID of the replication group to which this cache cluster should belong. If this
parameter is specified, the cache cluster is added to the specified replication
group as a read replica; otherwise, the cache cluster is a standalone primary that
is not part of any replication group. If the specified replication group is
Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is
created in Availability Zones that provide the best spread of read replicas across
Availability Zones.
.. note:
This parameter is ONLY valid if the Engine parameter is redis. Due
to current limitations on Redis (cluster mode disabled), this
parameter is not supported on Redis (cluster mode enabled)
replication groups.
AZMode
Specifies whether the nodes in this Memcached cluster are created in a single
Availability Zone or created across multiple Availability Zones in the cluster's
region. If the AZMode and PreferredAvailabilityZones are not specified,
ElastiCache assumes single-az mode.
.. note::
This parameter is ONLY supported for Memcached cache clusters.
PreferredAvailabilityZone
The EC2 Availability Zone in which the cache cluster is created. All nodes
belonging to this Memcached cache cluster are placed in the preferred Availability
Zone. If you want to create your nodes across multiple Availability Zones, use
PreferredAvailabilityZones.
Default: System chosen Availability Zone.
PreferredAvailabilityZones
A list of the Availability Zones in which cache nodes are created. The order of
the zones in the list is not important. The number of Availability Zones listed
must equal the value of NumCacheNodes. If you want all the nodes in the same
Availability Zone, use PreferredAvailabilityZone instead, or repeat the
Availability Zone multiple times in the list.
Default: System chosen Availability Zones.
.. note::
This option is ONLY supported on Memcached.
If you are creating your cache cluster in an Amazon VPC
(recommended) you can only locate nodes in Availability Zones that
are associated with the subnets in the selected subnet group.
NumCacheNodes
The initial (integer) number of cache nodes that the cache cluster has.
.. note::
For clusters running Redis, this value must be 1.
For clusters running Memcached, this value must be between 1 and 20.
CacheNodeType
The compute and memory capacity of the nodes in the node group (shard).
Valid node types (and pricing for them) are exhaustively described at
https://aws.amazon.com/elasticache/pricing/
.. note::
All T2 instances must be created in a VPC
Redis backup/restore is not supported for Redis (cluster mode
disabled) T1 and T2 instances. Backup/restore is supported on Redis
(cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1
or T2 instances.
Engine
The name of the cache engine to be used for this cache cluster. Valid values for
this parameter are: memcached | redis
EngineVersion
The version number of the cache engine to be used for this cache cluster. To view
the supported cache engine versions, use the DescribeCacheEngineVersions operation.
.. note::
You can upgrade to a newer engine version but you cannot downgrade
to an earlier engine version. If you want to use an earlier engine
version, you must delete the existing cache cluster or replication
group and create it anew with the earlier engine version.
CacheParameterGroupName
The name of the parameter group to associate with this cache cluster. If this
argument is omitted, the default parameter group for the specified engine is used.
You cannot use any parameter group which has cluster-enabled='yes' when creating
a cluster.
CacheSubnetGroupName
The name of the Cache Subnet Group to be used for the cache cluster. Use this
parameter ONLY when you are creating a cache cluster within a VPC.
.. note::
If you're going to launch your cluster in an Amazon VPC, you need
to create a subnet group before you start creating a cluster.
CacheSecurityGroupNames
A list of Cache Security Group names to associate with this cache cluster. Use
this parameter ONLY when you are creating a cache cluster outside of a VPC.
SecurityGroupIds
One or more VPC security groups associated with the cache cluster. Use this
parameter ONLY when you are creating a cache cluster within a VPC.
Tags
A list of tags to be added to this resource. Note that due to shortcomings in the
AWS API for Elasticache, these can only be set during resource creation - later
modification is not (currently) supported.
SnapshotArns
A single-element string list containing an Amazon Resource Name (ARN) that
uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot
file is used to populate the node group (shard). The Amazon S3 object name in
the ARN cannot contain any commas.
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
SnapshotName
The name of a Redis snapshot from which to restore data into the new node group
(shard). The snapshot status changes to restoring while the new node group (shard)
is being created.
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
PreferredMaintenanceWindow
Specifies the weekly time range during which maintenance on the cache cluster is
permitted. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
(24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are: sun, mon, tue, wed, thu, fri, sat
Example: sun:23:00-mon:01:30
Port
The port number on which each of the cache nodes accepts connections.
Default: 6379
NotificationTopicArn
The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS)
topic to which notifications are sent.
.. note::
The Amazon SNS topic owner must be the same as the cache cluster
owner.
AutoMinorVersionUpgrade
This (boolean) parameter is currently disabled.
SnapshotRetentionLimit
The number of days for which ElastiCache retains automatic snapshots before
deleting them.
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
SnapshotWindow
The daily time range (in UTC) during which ElastiCache begins taking a daily
snapshot of your node group (shard). If you do not specify this parameter,
ElastiCache automatically chooses an appropriate time range.
Example: 05:00-09:00
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
AuthToken
The password used to access a password protected server.
Password constraints:
- Must be only printable ASCII characters.
- Must be at least 16 characters and no more than 128 characters in length.
- Cannot contain any of the following characters: '/', '"', or "@".
CacheNodeIdsToRemove
A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002,
etc.). This parameter is only valid when NumCacheNodes is less than the existing number of
cache nodes. The number of cache node IDs supplied in this parameter must match the
difference between the existing number of cache nodes in the cluster or pending cache nodes,
whichever is greater, and the value of NumCacheNodes in the request.
NewAvailabilityZones
The list of Availability Zones where the new Memcached cache nodes are created.
This parameter is only valid when NumCacheNodes in the request is greater than the sum of
the number of active cache nodes and the number of cache nodes pending creation (which may
be zero). The number of Availability Zones supplied in this list must match the cache nodes
being added in this request.
Note: This option is only supported on Memcached clusters.
NotificationTopicStatus
The status of the SNS notification topic. Notifications are sent only if the status is active.
Valid values: active | inactive
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
args = dict([(k, v) for (k, v) in args.items() if not k.startswith('_')])
current = __salt__['boto3_elasticache.describe_cache_clusters'](name, region=region, key=key, keyid=keyid, profile=profile)
if current:
check_update = True # depends on [control=['if'], data=[]]
else:
check_update = False
only_on_modify = ['CacheNodeIdsToRemove', 'NewAvailabilityZones', 'NotificationTopicStatus']
create_args = {}
for (k, v) in args.items():
if k in only_on_modify:
check_update = True # depends on [control=['if'], data=[]]
else:
create_args[k] = v # depends on [control=['for'], data=[]]
if __opts__['test']:
ret['comment'] = 'Cache cluster {0} would be created.'.format(name)
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
created = __salt__['boto3_elasticache.create_cache_cluster'](name, wait=wait, security_groups=security_groups, region=region, key=key, keyid=keyid, profile=profile, **create_args)
if created:
new = __salt__['boto3_elasticache.describe_cache_clusters'](name, region=region, key=key, keyid=keyid, profile=profile)
ret['comment'] = 'Cache cluster {0} was created.'.format(name)
ret['changes']['old'] = None
ret['changes']['new'] = new[0] # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} cache cluster.'.format(name)
if check_update:
# Refresh this in case we're updating from 'only_on_modify' above...
updated = __salt__['boto3_elasticache.describe_cache_clusters'](name, region=region, key=key, keyid=keyid, profile=profile)
need_update = _diff_cache_cluster(updated['CacheClusters'][0], args)
if need_update:
if __opts__['test']:
ret['comment'] = 'Cache cluster {0} would be modified.'.format(name)
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
modified = __salt__['boto3_elasticache.modify_cache_cluster'](name, wait=wait, security_groups=security_groups, region=region, key=key, keyid=keyid, profile=profile, **need_update)
if modified:
new = __salt__['boto3_elasticache.describe_cache_clusters'](name, region=region, key=key, keyid=keyid, profile=profile)
if ret['comment']: # 'create' just ran...
ret['comment'] += ' ... and then immediately modified.' # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Cache cluster {0} was modified.'.format(name)
ret['changes']['old'] = current
ret['changes']['new'] = new[0] # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Failed to modify cache cluster {0}.'.format(name) # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Cache cluster {0} is in the desired state.'.format(name) # depends on [control=['if'], data=[]]
return ret
|
def pad_release(release_to_pad, num_sections=4):
'''
Pad out package and kernel release versions so that
``LooseVersion`` comparisons will be correct.
Release versions with less than num_sections will
be padded in front of the last section with zeros.
For example ::
pad_release("390.el6", 4)
will return ``390.0.0.el6`` and ::
pad_release("390.11.el6", 4)
will return ``390.11.0.el6``.
If the number of sections of the release to be padded is
greater than num_sections, a ``ValueError`` will be raised.
'''
parts = release_to_pad.split('.')
if len(parts) > num_sections:
raise ValueError("Too many sections encountered ({found} > {num} in release string {rel}".format(
found=len(parts), num=num_sections, rel=release_to_pad
))
pad_count = num_sections - len(parts)
return ".".join(parts[:-1] + ['0'] * pad_count + parts[-1:])
|
def function[pad_release, parameter[release_to_pad, num_sections]]:
constant[
Pad out package and kernel release versions so that
``LooseVersion`` comparisons will be correct.
Release versions with less than num_sections will
be padded in front of the last section with zeros.
For example ::
pad_release("390.el6", 4)
will return ``390.0.0.el6`` and ::
pad_release("390.11.el6", 4)
will return ``390.11.0.el6``.
If the number of sections of the release to be padded is
greater than num_sections, a ``ValueError`` will be raised.
]
variable[parts] assign[=] call[name[release_to_pad].split, parameter[constant[.]]]
if compare[call[name[len], parameter[name[parts]]] greater[>] name[num_sections]] begin[:]
<ast.Raise object at 0x7da18f811720>
variable[pad_count] assign[=] binary_operation[name[num_sections] - call[name[len], parameter[name[parts]]]]
return[call[constant[.].join, parameter[binary_operation[binary_operation[call[name[parts]][<ast.Slice object at 0x7da1b184b550>] + binary_operation[list[[<ast.Constant object at 0x7da1b1849600>]] * name[pad_count]]] + call[name[parts]][<ast.Slice object at 0x7da1b184b1c0>]]]]]
|
keyword[def] identifier[pad_release] ( identifier[release_to_pad] , identifier[num_sections] = literal[int] ):
literal[string]
identifier[parts] = identifier[release_to_pad] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[parts] )> identifier[num_sections] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[found] = identifier[len] ( identifier[parts] ), identifier[num] = identifier[num_sections] , identifier[rel] = identifier[release_to_pad]
))
identifier[pad_count] = identifier[num_sections] - identifier[len] ( identifier[parts] )
keyword[return] literal[string] . identifier[join] ( identifier[parts] [:- literal[int] ]+[ literal[string] ]* identifier[pad_count] + identifier[parts] [- literal[int] :])
|
def pad_release(release_to_pad, num_sections=4):
"""
Pad out package and kernel release versions so that
``LooseVersion`` comparisons will be correct.
Release versions with less than num_sections will
be padded in front of the last section with zeros.
For example ::
pad_release("390.el6", 4)
will return ``390.0.0.el6`` and ::
pad_release("390.11.el6", 4)
will return ``390.11.0.el6``.
If the number of sections of the release to be padded is
greater than num_sections, a ``ValueError`` will be raised.
"""
parts = release_to_pad.split('.')
if len(parts) > num_sections:
raise ValueError('Too many sections encountered ({found} > {num} in release string {rel}'.format(found=len(parts), num=num_sections, rel=release_to_pad)) # depends on [control=['if'], data=['num_sections']]
pad_count = num_sections - len(parts)
return '.'.join(parts[:-1] + ['0'] * pad_count + parts[-1:])
|
def _reg_sighandlers(self):
""" Registers signal handlers to this class.
"""
# SIGCHLD, so we shutdown when any of the child processes exit
_handler = lambda signo, frame: self.shutdown()
signal.signal(signal.SIGCHLD, _handler)
signal.signal(signal.SIGTERM, _handler)
|
def function[_reg_sighandlers, parameter[self]]:
constant[ Registers signal handlers to this class.
]
variable[_handler] assign[=] <ast.Lambda object at 0x7da20e9b1780>
call[name[signal].signal, parameter[name[signal].SIGCHLD, name[_handler]]]
call[name[signal].signal, parameter[name[signal].SIGTERM, name[_handler]]]
|
keyword[def] identifier[_reg_sighandlers] ( identifier[self] ):
literal[string]
identifier[_handler] = keyword[lambda] identifier[signo] , identifier[frame] : identifier[self] . identifier[shutdown] ()
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGCHLD] , identifier[_handler] )
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGTERM] , identifier[_handler] )
|
def _reg_sighandlers(self):
""" Registers signal handlers to this class.
"""
# SIGCHLD, so we shutdown when any of the child processes exit
_handler = lambda signo, frame: self.shutdown()
signal.signal(signal.SIGCHLD, _handler)
signal.signal(signal.SIGTERM, _handler)
|
async def update(self, records: Iterable[DataRecord], values: SQLValuesToWrite, returning=False) -> Union[int, Iterable[DataRecord]]:
"""
:param records:
:param values:
:param returning:
:return: return count if returning is False, otherwise records
"""
raise NotImplementedError()
|
<ast.AsyncFunctionDef object at 0x7da1b0012e00>
|
keyword[async] keyword[def] identifier[update] ( identifier[self] , identifier[records] : identifier[Iterable] [ identifier[DataRecord] ], identifier[values] : identifier[SQLValuesToWrite] , identifier[returning] = keyword[False] )-> identifier[Union] [ identifier[int] , identifier[Iterable] [ identifier[DataRecord] ]]:
literal[string]
keyword[raise] identifier[NotImplementedError] ()
|
async def update(self, records: Iterable[DataRecord], values: SQLValuesToWrite, returning=False) -> Union[int, Iterable[DataRecord]]:
"""
:param records:
:param values:
:param returning:
:return: return count if returning is False, otherwise records
"""
raise NotImplementedError()
|
def minimize_spec(spec_dict, operations=None, resources=None):
"""
Trims down a source spec dict to only the operations or resources indicated.
:param spec_dict: The source spec dict to minimize.
:type spec_dict: dict
:param operations: A list of opertion IDs to retain.
:type operations: list of str
:param resources: A list of resource names to retain.
:type resources: list of str
:return: Minimized swagger spec dict
:rtype: dict
"""
operations = operations or []
resources = resources or []
# keep the ugly overhead for now but only add paths we need
minimized = {key: value for key, value in spec_dict.items() if key != 'paths'}
minimized['paths'] = {}
for path_name, path in spec_dict['paths'].items():
for method, data in path.items():
if data['operationId'] in operations or any(tag in resources for tag in data['tags']):
if path_name not in minimized['paths']:
minimized['paths'][path_name] = {}
minimized['paths'][path_name][method] = data
return minimized
|
def function[minimize_spec, parameter[spec_dict, operations, resources]]:
constant[
Trims down a source spec dict to only the operations or resources indicated.
:param spec_dict: The source spec dict to minimize.
:type spec_dict: dict
:param operations: A list of opertion IDs to retain.
:type operations: list of str
:param resources: A list of resource names to retain.
:type resources: list of str
:return: Minimized swagger spec dict
:rtype: dict
]
variable[operations] assign[=] <ast.BoolOp object at 0x7da1b0a1eaa0>
variable[resources] assign[=] <ast.BoolOp object at 0x7da1b0a1f010>
variable[minimized] assign[=] <ast.DictComp object at 0x7da1b0a1e950>
call[name[minimized]][constant[paths]] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0a1d570>, <ast.Name object at 0x7da1b0a1e860>]]] in starred[call[call[name[spec_dict]][constant[paths]].items, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0a1cf40>, <ast.Name object at 0x7da1b0a1e2f0>]]] in starred[call[name[path].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b0a1ed40> begin[:]
if compare[name[path_name] <ast.NotIn object at 0x7da2590d7190> call[name[minimized]][constant[paths]]] begin[:]
call[call[name[minimized]][constant[paths]]][name[path_name]] assign[=] dictionary[[], []]
call[call[call[name[minimized]][constant[paths]]][name[path_name]]][name[method]] assign[=] name[data]
return[name[minimized]]
|
keyword[def] identifier[minimize_spec] ( identifier[spec_dict] , identifier[operations] = keyword[None] , identifier[resources] = keyword[None] ):
literal[string]
identifier[operations] = identifier[operations] keyword[or] []
identifier[resources] = identifier[resources] keyword[or] []
identifier[minimized] ={ identifier[key] : identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[spec_dict] . identifier[items] () keyword[if] identifier[key] != literal[string] }
identifier[minimized] [ literal[string] ]={}
keyword[for] identifier[path_name] , identifier[path] keyword[in] identifier[spec_dict] [ literal[string] ]. identifier[items] ():
keyword[for] identifier[method] , identifier[data] keyword[in] identifier[path] . identifier[items] ():
keyword[if] identifier[data] [ literal[string] ] keyword[in] identifier[operations] keyword[or] identifier[any] ( identifier[tag] keyword[in] identifier[resources] keyword[for] identifier[tag] keyword[in] identifier[data] [ literal[string] ]):
keyword[if] identifier[path_name] keyword[not] keyword[in] identifier[minimized] [ literal[string] ]:
identifier[minimized] [ literal[string] ][ identifier[path_name] ]={}
identifier[minimized] [ literal[string] ][ identifier[path_name] ][ identifier[method] ]= identifier[data]
keyword[return] identifier[minimized]
|
def minimize_spec(spec_dict, operations=None, resources=None):
"""
Trims down a source spec dict to only the operations or resources indicated.
:param spec_dict: The source spec dict to minimize.
:type spec_dict: dict
:param operations: A list of opertion IDs to retain.
:type operations: list of str
:param resources: A list of resource names to retain.
:type resources: list of str
:return: Minimized swagger spec dict
:rtype: dict
"""
operations = operations or []
resources = resources or []
# keep the ugly overhead for now but only add paths we need
minimized = {key: value for (key, value) in spec_dict.items() if key != 'paths'}
minimized['paths'] = {}
for (path_name, path) in spec_dict['paths'].items():
for (method, data) in path.items():
if data['operationId'] in operations or any((tag in resources for tag in data['tags'])):
if path_name not in minimized['paths']:
minimized['paths'][path_name] = {} # depends on [control=['if'], data=['path_name']]
minimized['paths'][path_name][method] = data # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return minimized
|
def getVolInfo(*paths):
'''
Retrieve volume usage info for the given path.
'''
path = os.path.join(*paths)
path = os.path.expanduser(path)
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
return {
'free': free,
'used': total - free,
'total': total,
}
|
def function[getVolInfo, parameter[]]:
constant[
Retrieve volume usage info for the given path.
]
variable[path] assign[=] call[name[os].path.join, parameter[<ast.Starred object at 0x7da18eb54310>]]
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[path]]]
variable[st] assign[=] call[name[os].statvfs, parameter[name[path]]]
variable[free] assign[=] binary_operation[name[st].f_bavail * name[st].f_frsize]
variable[total] assign[=] binary_operation[name[st].f_blocks * name[st].f_frsize]
return[dictionary[[<ast.Constant object at 0x7da18eb57820>, <ast.Constant object at 0x7da18eb577c0>, <ast.Constant object at 0x7da18eb55a20>], [<ast.Name object at 0x7da18eb57b20>, <ast.BinOp object at 0x7da18eb56a70>, <ast.Name object at 0x7da18eb54730>]]]
|
keyword[def] identifier[getVolInfo] (* identifier[paths] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[join] (* identifier[paths] )
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] )
identifier[st] = identifier[os] . identifier[statvfs] ( identifier[path] )
identifier[free] = identifier[st] . identifier[f_bavail] * identifier[st] . identifier[f_frsize]
identifier[total] = identifier[st] . identifier[f_blocks] * identifier[st] . identifier[f_frsize]
keyword[return] {
literal[string] : identifier[free] ,
literal[string] : identifier[total] - identifier[free] ,
literal[string] : identifier[total] ,
}
|
def getVolInfo(*paths):
"""
Retrieve volume usage info for the given path.
"""
path = os.path.join(*paths)
path = os.path.expanduser(path)
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
return {'free': free, 'used': total - free, 'total': total}
|
def unite_dict(a, b):
"""
>>> a = {'name': 'Sylvanas'}
>>> b = {'gender': 'Man'}
>>> unite_dict(a, b)
{'name': 'Sylvanas', 'gender': 'Man'}
"""
c = {}
c.update(a)
c.update(b)
return c
|
def function[unite_dict, parameter[a, b]]:
constant[
>>> a = {'name': 'Sylvanas'}
>>> b = {'gender': 'Man'}
>>> unite_dict(a, b)
{'name': 'Sylvanas', 'gender': 'Man'}
]
variable[c] assign[=] dictionary[[], []]
call[name[c].update, parameter[name[a]]]
call[name[c].update, parameter[name[b]]]
return[name[c]]
|
keyword[def] identifier[unite_dict] ( identifier[a] , identifier[b] ):
literal[string]
identifier[c] ={}
identifier[c] . identifier[update] ( identifier[a] )
identifier[c] . identifier[update] ( identifier[b] )
keyword[return] identifier[c]
|
def unite_dict(a, b):
"""
>>> a = {'name': 'Sylvanas'}
>>> b = {'gender': 'Man'}
>>> unite_dict(a, b)
{'name': 'Sylvanas', 'gender': 'Man'}
"""
c = {}
c.update(a)
c.update(b)
return c
|
def displacements(self):
"""Return displacements
Returns
-------
There are two types of displacement dataset. See the docstring
of set_displacement_dataset about types 1 and 2 for displacement
dataset format.
Type-1, List of list
The internal list has 4 elements such as [32, 0.01, 0.0, 0.0]].
The first element is the supercell atom index starting with 0.
The remaining three elements give the displacement in Cartesian
coordinates.
Type-2, array_like
Displacements of all atoms of all supercells in Cartesian
coordinates.
shape=(supercells, natom, 3)
dtype='double'
"""
disps = []
if 'first_atoms' in self._displacement_dataset:
for disp in self._displacement_dataset['first_atoms']:
x = disp['displacement']
disps.append([disp['number'], x[0], x[1], x[2]])
elif 'displacements' in self._displacement_dataset:
disps = self._displacement_dataset['displacements']
return disps
|
def function[displacements, parameter[self]]:
constant[Return displacements
Returns
-------
There are two types of displacement dataset. See the docstring
of set_displacement_dataset about types 1 and 2 for displacement
dataset format.
Type-1, List of list
The internal list has 4 elements such as [32, 0.01, 0.0, 0.0]].
The first element is the supercell atom index starting with 0.
The remaining three elements give the displacement in Cartesian
coordinates.
Type-2, array_like
Displacements of all atoms of all supercells in Cartesian
coordinates.
shape=(supercells, natom, 3)
dtype='double'
]
variable[disps] assign[=] list[[]]
if compare[constant[first_atoms] in name[self]._displacement_dataset] begin[:]
for taget[name[disp]] in starred[call[name[self]._displacement_dataset][constant[first_atoms]]] begin[:]
variable[x] assign[=] call[name[disp]][constant[displacement]]
call[name[disps].append, parameter[list[[<ast.Subscript object at 0x7da18fe925c0>, <ast.Subscript object at 0x7da18fe92320>, <ast.Subscript object at 0x7da18fe90910>, <ast.Subscript object at 0x7da18fe90460>]]]]
return[name[disps]]
|
keyword[def] identifier[displacements] ( identifier[self] ):
literal[string]
identifier[disps] =[]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[_displacement_dataset] :
keyword[for] identifier[disp] keyword[in] identifier[self] . identifier[_displacement_dataset] [ literal[string] ]:
identifier[x] = identifier[disp] [ literal[string] ]
identifier[disps] . identifier[append] ([ identifier[disp] [ literal[string] ], identifier[x] [ literal[int] ], identifier[x] [ literal[int] ], identifier[x] [ literal[int] ]])
keyword[elif] literal[string] keyword[in] identifier[self] . identifier[_displacement_dataset] :
identifier[disps] = identifier[self] . identifier[_displacement_dataset] [ literal[string] ]
keyword[return] identifier[disps]
|
def displacements(self):
"""Return displacements
Returns
-------
There are two types of displacement dataset. See the docstring
of set_displacement_dataset about types 1 and 2 for displacement
dataset format.
Type-1, List of list
The internal list has 4 elements such as [32, 0.01, 0.0, 0.0]].
The first element is the supercell atom index starting with 0.
The remaining three elements give the displacement in Cartesian
coordinates.
Type-2, array_like
Displacements of all atoms of all supercells in Cartesian
coordinates.
shape=(supercells, natom, 3)
dtype='double'
"""
disps = []
if 'first_atoms' in self._displacement_dataset:
for disp in self._displacement_dataset['first_atoms']:
x = disp['displacement']
disps.append([disp['number'], x[0], x[1], x[2]]) # depends on [control=['for'], data=['disp']] # depends on [control=['if'], data=[]]
elif 'displacements' in self._displacement_dataset:
disps = self._displacement_dataset['displacements'] # depends on [control=['if'], data=[]]
return disps
|
def comma(value):
"""
FORMAT WITH THOUSANDS COMMA (,) SEPARATOR
"""
try:
if float(value) == _round(float(value), 0):
output = "{:,}".format(int(value))
else:
output = "{:,}".format(float(value))
except Exception:
output = text_type(value)
return output
|
def function[comma, parameter[value]]:
constant[
FORMAT WITH THOUSANDS COMMA (,) SEPARATOR
]
<ast.Try object at 0x7da1b1d20bb0>
return[name[output]]
|
keyword[def] identifier[comma] ( identifier[value] ):
literal[string]
keyword[try] :
keyword[if] identifier[float] ( identifier[value] )== identifier[_round] ( identifier[float] ( identifier[value] ), literal[int] ):
identifier[output] = literal[string] . identifier[format] ( identifier[int] ( identifier[value] ))
keyword[else] :
identifier[output] = literal[string] . identifier[format] ( identifier[float] ( identifier[value] ))
keyword[except] identifier[Exception] :
identifier[output] = identifier[text_type] ( identifier[value] )
keyword[return] identifier[output]
|
def comma(value):
"""
FORMAT WITH THOUSANDS COMMA (,) SEPARATOR
"""
try:
if float(value) == _round(float(value), 0):
output = '{:,}'.format(int(value)) # depends on [control=['if'], data=[]]
else:
output = '{:,}'.format(float(value)) # depends on [control=['try'], data=[]]
except Exception:
output = text_type(value) # depends on [control=['except'], data=[]]
return output
|
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
|
def function[create_content, parameter[self]]:
constant[
Render the template, apply options on it, and save it to the cache.
]
call[name[self].render_node, parameter[]]
if name[self].options.compress_spaces begin[:]
name[self].content assign[=] call[name[self].RE_SPACELESS.sub, parameter[constant[ ], name[self].content]]
if name[self].options.compress begin[:]
variable[to_cache] assign[=] call[name[self].encode_content, parameter[]]
variable[to_cache] assign[=] call[name[self].join_content_version, parameter[name[to_cache]]]
<ast.Try object at 0x7da18eb57b50>
|
keyword[def] identifier[create_content] ( identifier[self] ):
literal[string]
identifier[self] . identifier[render_node] ()
keyword[if] identifier[self] . identifier[options] . identifier[compress_spaces] :
identifier[self] . identifier[content] = identifier[self] . identifier[RE_SPACELESS] . identifier[sub] ( literal[string] , identifier[self] . identifier[content] )
keyword[if] identifier[self] . identifier[options] . identifier[compress] :
identifier[to_cache] = identifier[self] . identifier[encode_content] ()
keyword[else] :
identifier[to_cache] = identifier[self] . identifier[content]
identifier[to_cache] = identifier[self] . identifier[join_content_version] ( identifier[to_cache] )
keyword[try] :
identifier[self] . identifier[cache_set] ( identifier[to_cache] )
keyword[except] identifier[Exception] :
keyword[if] identifier[is_template_debug_activated] ():
keyword[raise]
identifier[logger] . identifier[exception] ( literal[string] )
|
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content) # depends on [control=['if'], data=[]]
if self.options.compress:
to_cache = self.encode_content() # depends on [control=['if'], data=[]]
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache) # depends on [control=['try'], data=[]]
except Exception:
if is_template_debug_activated():
raise # depends on [control=['if'], data=[]]
logger.exception('Error when saving the cached template fragment') # depends on [control=['except'], data=[]]
|
def PCA(Y, components):
"""
run PCA, retrieving the first (components) principle components
return [s0, eig, w0]
s0: factors
w0: weights
"""
N,D = Y.shape
sv = linalg.svd(Y, full_matrices=0);
[s0, w0] = [sv[0][:, 0:components], np.dot(np.diag(sv[1]), sv[2]).T[:, 0:components]]
v = s0.std(axis=0)
s0 /= v;
w0 *= v;
return [s0, w0]
if N>D:
sv = linalg.svd(Y, full_matrices=0);
[s0, w0] = [sv[0][:, 0:components], np.dot(np.diag(sv[1]), sv[2]).T[:, 0:components]]
v = s0.std(axis=0)
s0 /= v;
w0 *= v;
return [s0, w0]
else:
K=np.cov(Y)
sv = linalg.eigh(K)
std_var = np.sqrt(sv[0])
pc = sv[1]*std_var[np.newaxis(),0]
#import ipdb
#ipdb.set_trace()
return [pc,std_var]
|
def function[PCA, parameter[Y, components]]:
constant[
run PCA, retrieving the first (components) principle components
return [s0, eig, w0]
s0: factors
w0: weights
]
<ast.Tuple object at 0x7da1b09052a0> assign[=] name[Y].shape
variable[sv] assign[=] call[name[linalg].svd, parameter[name[Y]]]
<ast.List object at 0x7da1b09044f0> assign[=] list[[<ast.Subscript object at 0x7da1b09063e0>, <ast.Subscript object at 0x7da1b09142b0>]]
variable[v] assign[=] call[name[s0].std, parameter[]]
<ast.AugAssign object at 0x7da1b0ad85e0>
<ast.AugAssign object at 0x7da1b0ad86a0>
return[list[[<ast.Name object at 0x7da1b0ada2c0>, <ast.Name object at 0x7da1b0ad8610>]]]
if compare[name[N] greater[>] name[D]] begin[:]
variable[sv] assign[=] call[name[linalg].svd, parameter[name[Y]]]
<ast.List object at 0x7da1b0ad85b0> assign[=] list[[<ast.Subscript object at 0x7da1b0ad8700>, <ast.Subscript object at 0x7da1b0adb040>]]
variable[v] assign[=] call[name[s0].std, parameter[]]
<ast.AugAssign object at 0x7da1b0ada6b0>
<ast.AugAssign object at 0x7da1b0ad9f00>
return[list[[<ast.Name object at 0x7da1b0adb6d0>, <ast.Name object at 0x7da1b0ad8a30>]]]
|
keyword[def] identifier[PCA] ( identifier[Y] , identifier[components] ):
literal[string]
identifier[N] , identifier[D] = identifier[Y] . identifier[shape]
identifier[sv] = identifier[linalg] . identifier[svd] ( identifier[Y] , identifier[full_matrices] = literal[int] );
[ identifier[s0] , identifier[w0] ]=[ identifier[sv] [ literal[int] ][:, literal[int] : identifier[components] ], identifier[np] . identifier[dot] ( identifier[np] . identifier[diag] ( identifier[sv] [ literal[int] ]), identifier[sv] [ literal[int] ]). identifier[T] [:, literal[int] : identifier[components] ]]
identifier[v] = identifier[s0] . identifier[std] ( identifier[axis] = literal[int] )
identifier[s0] /= identifier[v] ;
identifier[w0] *= identifier[v] ;
keyword[return] [ identifier[s0] , identifier[w0] ]
keyword[if] identifier[N] > identifier[D] :
identifier[sv] = identifier[linalg] . identifier[svd] ( identifier[Y] , identifier[full_matrices] = literal[int] );
[ identifier[s0] , identifier[w0] ]=[ identifier[sv] [ literal[int] ][:, literal[int] : identifier[components] ], identifier[np] . identifier[dot] ( identifier[np] . identifier[diag] ( identifier[sv] [ literal[int] ]), identifier[sv] [ literal[int] ]). identifier[T] [:, literal[int] : identifier[components] ]]
identifier[v] = identifier[s0] . identifier[std] ( identifier[axis] = literal[int] )
identifier[s0] /= identifier[v] ;
identifier[w0] *= identifier[v] ;
keyword[return] [ identifier[s0] , identifier[w0] ]
keyword[else] :
identifier[K] = identifier[np] . identifier[cov] ( identifier[Y] )
identifier[sv] = identifier[linalg] . identifier[eigh] ( identifier[K] )
identifier[std_var] = identifier[np] . identifier[sqrt] ( identifier[sv] [ literal[int] ])
identifier[pc] = identifier[sv] [ literal[int] ]* identifier[std_var] [ identifier[np] . identifier[newaxis] (), literal[int] ]
keyword[return] [ identifier[pc] , identifier[std_var] ]
|
def PCA(Y, components):
"""
run PCA, retrieving the first (components) principle components
return [s0, eig, w0]
s0: factors
w0: weights
"""
(N, D) = Y.shape
sv = linalg.svd(Y, full_matrices=0)
[s0, w0] = [sv[0][:, 0:components], np.dot(np.diag(sv[1]), sv[2]).T[:, 0:components]]
v = s0.std(axis=0)
s0 /= v
w0 *= v
return [s0, w0]
if N > D:
sv = linalg.svd(Y, full_matrices=0)
[s0, w0] = [sv[0][:, 0:components], np.dot(np.diag(sv[1]), sv[2]).T[:, 0:components]]
v = s0.std(axis=0)
s0 /= v
w0 *= v
return [s0, w0] # depends on [control=['if'], data=[]]
else:
K = np.cov(Y)
sv = linalg.eigh(K)
std_var = np.sqrt(sv[0])
pc = sv[1] * std_var[np.newaxis(), 0] #import ipdb
#ipdb.set_trace()
return [pc, std_var]
|
def dics_to_itunessd(header_dic, tracks_dics, playlists_dics_and_indexes):
"""
:param header_dic: dic of header_table
:param tracks_dics: list of all track_table's dics
:param playlists_dics_and_indexes: list of all playlists and all their track's indexes
:return: the whole iTunesSD bytes data
"""
############################################
# header
######
header_dic['length'] = get_table_size(header_table)
header_dic['number_of_tracks'] = len(tracks_dics)
header_dic['number_of_playlists'] = len(playlists_dics_and_indexes)
header_dic['number_of_tracks2'] = 0
header_part_size = get_table_size(header_table)
####################################################################################################################
# tracks
##########
# Chunk of header
tracks_header_dic = {
'length': get_table_size(tracks_header_table) + 4 * len(tracks_dics),
'number_of_tracks': len(tracks_dics)
}
tracks_header_chunk = dic_to_chunk(tracks_header_dic, tracks_header_table)
# Chunk of all tracks
[track_dic.update({'length': get_table_size(track_table)}) for track_dic in tracks_dics]
_tracks_chunks = [dic_to_chunk(dic, track_table) for dic in tracks_dics]
all_tracks_chunck = b''.join(_tracks_chunks)
# Chunk of offsets
_length_before_tracks_offsets = header_part_size + len(tracks_header_chunk)
tracks_offsets_chunck = get_offsets_chunk(_length_before_tracks_offsets, _tracks_chunks)
# Put chunks together
track_part_chunk = tracks_header_chunk + tracks_offsets_chunck + all_tracks_chunck
####################################################################################################################
# playlists
#############
# Chunk of header
_playlists_dics = [playlist_indexes[0] for playlist_indexes in playlists_dics_and_indexes]
_types = [playlist_dic['type'] for playlist_dic in _playlists_dics]
playlists_header_dic = {
'length': get_table_size(playlists_header_table) + 4 * len(playlists_dics_and_indexes),
'number_of_all_playlists': len(_types),
'flag1': 0xffffffff if _types.count(NORMAL) == 0 else 1,
'number_of_normal_playlists': _types.count(NORMAL),
'flag2': 0xffffffff if _types.count(AUDIOBOOK) == 0 else (_types.count(MASTER) + _types.count(NORMAL) +
_types.count(PODCAST)),
'number_of_audiobook_playlists': _types.count(AUDIOBOOK),
'flag3': 0xffffffff if _types.count(PODCAST) == 0 else _types.count(1) + _types.count(NORMAL),
'number_of_podcast_playlists': _types.count(PODCAST)
}
playlists_header_chunk = dic_to_chunk(playlists_header_dic, playlists_header_table)
# Chunk of all playlists
_playlists_chunks = []
for playlist_header_dic, indexes in playlists_dics_and_indexes:
dic = playlist_header_dic.copy()
dic['length'] = get_table_size(playlist_header_table) + 4 * len(indexes)
dic['number_of_all_track'] = len(indexes)
dic['number_of_normal_track'] = len(indexes) if dic['type'] in (1, 2) else 0
if dic['type'] == MASTER:
header_dic['number_of_tracks2'] = len(indexes)
_playlist_header_chunk = dic_to_chunk(dic, playlist_header_table)
_indexes_chunk = b''.join([i.to_bytes(4, 'little') for i in indexes])
playlist_chunk = _playlist_header_chunk + _indexes_chunk
_playlists_chunks.append(playlist_chunk)
all_playlists_chunk = b''.join(_playlists_chunks)
# Chunk of offsets
_length_before_playlists_offsets = header_part_size + len(track_part_chunk) + len(playlists_header_chunk)
playlists_offsets_chunk = get_offsets_chunk(_length_before_playlists_offsets, _playlists_chunks)
# Put chunks together
playlists_part_chunk = playlists_header_chunk + playlists_offsets_chunk + all_playlists_chunk
########################################################################
header_dic['tracks_header_offset'] = header_part_size
header_dic['playlists_header_offset'] = header_part_size + len(track_part_chunk)
header_part_chunk = dic_to_chunk(header_dic, header_table)
########################################################################
itunessd = header_part_chunk + track_part_chunk + playlists_part_chunk
return itunessd
|
def function[dics_to_itunessd, parameter[header_dic, tracks_dics, playlists_dics_and_indexes]]:
constant[
:param header_dic: dic of header_table
:param tracks_dics: list of all track_table's dics
:param playlists_dics_and_indexes: list of all playlists and all their track's indexes
:return: the whole iTunesSD bytes data
]
call[name[header_dic]][constant[length]] assign[=] call[name[get_table_size], parameter[name[header_table]]]
call[name[header_dic]][constant[number_of_tracks]] assign[=] call[name[len], parameter[name[tracks_dics]]]
call[name[header_dic]][constant[number_of_playlists]] assign[=] call[name[len], parameter[name[playlists_dics_and_indexes]]]
call[name[header_dic]][constant[number_of_tracks2]] assign[=] constant[0]
variable[header_part_size] assign[=] call[name[get_table_size], parameter[name[header_table]]]
variable[tracks_header_dic] assign[=] dictionary[[<ast.Constant object at 0x7da1b10477c0>, <ast.Constant object at 0x7da1b1047790>], [<ast.BinOp object at 0x7da1b1047760>, <ast.Call object at 0x7da1b10475b0>]]
variable[tracks_header_chunk] assign[=] call[name[dic_to_chunk], parameter[name[tracks_header_dic], name[tracks_header_table]]]
<ast.ListComp object at 0x7da1b10473d0>
variable[_tracks_chunks] assign[=] <ast.ListComp object at 0x7da1b10470d0>
variable[all_tracks_chunck] assign[=] call[constant[b''].join, parameter[name[_tracks_chunks]]]
variable[_length_before_tracks_offsets] assign[=] binary_operation[name[header_part_size] + call[name[len], parameter[name[tracks_header_chunk]]]]
variable[tracks_offsets_chunck] assign[=] call[name[get_offsets_chunk], parameter[name[_length_before_tracks_offsets], name[_tracks_chunks]]]
variable[track_part_chunk] assign[=] binary_operation[binary_operation[name[tracks_header_chunk] + name[tracks_offsets_chunck]] + name[all_tracks_chunck]]
variable[_playlists_dics] assign[=] <ast.ListComp object at 0x7da1b1045c00>
variable[_types] assign[=] <ast.ListComp object at 0x7da1b1045a20>
variable[playlists_header_dic] assign[=] dictionary[[<ast.Constant object at 0x7da1b1045810>, <ast.Constant object at 0x7da1b10457e0>, <ast.Constant object at 0x7da1b10457b0>, <ast.Constant object at 0x7da1b1045780>, <ast.Constant object at 0x7da1b1045750>, <ast.Constant object at 0x7da1b1045720>, <ast.Constant object at 0x7da1b10456f0>, <ast.Constant object at 0x7da1b10456c0>], [<ast.BinOp object at 0x7da1b1045690>, <ast.Call object at 0x7da1b10454e0>, <ast.IfExp object at 0x7da1b1045450>, <ast.Call object at 0x7da1b10452a0>, <ast.IfExp object at 0x7da1b10451e0>, <ast.Call object at 0x7da1b1044130>, <ast.IfExp object at 0x7da1b10441f0>, <ast.Call object at 0x7da1b1044520>]]
variable[playlists_header_chunk] assign[=] call[name[dic_to_chunk], parameter[name[playlists_header_dic], name[playlists_header_table]]]
variable[_playlists_chunks] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1044700>, <ast.Name object at 0x7da1b1044730>]]] in starred[name[playlists_dics_and_indexes]] begin[:]
variable[dic] assign[=] call[name[playlist_header_dic].copy, parameter[]]
call[name[dic]][constant[length]] assign[=] binary_operation[call[name[get_table_size], parameter[name[playlist_header_table]]] + binary_operation[constant[4] * call[name[len], parameter[name[indexes]]]]]
call[name[dic]][constant[number_of_all_track]] assign[=] call[name[len], parameter[name[indexes]]]
call[name[dic]][constant[number_of_normal_track]] assign[=] <ast.IfExp object at 0x7da1b1044d00>
if compare[call[name[dic]][constant[type]] equal[==] name[MASTER]] begin[:]
call[name[header_dic]][constant[number_of_tracks2]] assign[=] call[name[len], parameter[name[indexes]]]
variable[_playlist_header_chunk] assign[=] call[name[dic_to_chunk], parameter[name[dic], name[playlist_header_table]]]
variable[_indexes_chunk] assign[=] call[constant[b''].join, parameter[<ast.ListComp object at 0x7da1b10b1a20>]]
variable[playlist_chunk] assign[=] binary_operation[name[_playlist_header_chunk] + name[_indexes_chunk]]
call[name[_playlists_chunks].append, parameter[name[playlist_chunk]]]
variable[all_playlists_chunk] assign[=] call[constant[b''].join, parameter[name[_playlists_chunks]]]
variable[_length_before_playlists_offsets] assign[=] binary_operation[binary_operation[name[header_part_size] + call[name[len], parameter[name[track_part_chunk]]]] + call[name[len], parameter[name[playlists_header_chunk]]]]
variable[playlists_offsets_chunk] assign[=] call[name[get_offsets_chunk], parameter[name[_length_before_playlists_offsets], name[_playlists_chunks]]]
variable[playlists_part_chunk] assign[=] binary_operation[binary_operation[name[playlists_header_chunk] + name[playlists_offsets_chunk]] + name[all_playlists_chunk]]
call[name[header_dic]][constant[tracks_header_offset]] assign[=] name[header_part_size]
call[name[header_dic]][constant[playlists_header_offset]] assign[=] binary_operation[name[header_part_size] + call[name[len], parameter[name[track_part_chunk]]]]
variable[header_part_chunk] assign[=] call[name[dic_to_chunk], parameter[name[header_dic], name[header_table]]]
variable[itunessd] assign[=] binary_operation[binary_operation[name[header_part_chunk] + name[track_part_chunk]] + name[playlists_part_chunk]]
return[name[itunessd]]
|
keyword[def] identifier[dics_to_itunessd] ( identifier[header_dic] , identifier[tracks_dics] , identifier[playlists_dics_and_indexes] ):
literal[string]
identifier[header_dic] [ literal[string] ]= identifier[get_table_size] ( identifier[header_table] )
identifier[header_dic] [ literal[string] ]= identifier[len] ( identifier[tracks_dics] )
identifier[header_dic] [ literal[string] ]= identifier[len] ( identifier[playlists_dics_and_indexes] )
identifier[header_dic] [ literal[string] ]= literal[int]
identifier[header_part_size] = identifier[get_table_size] ( identifier[header_table] )
identifier[tracks_header_dic] ={
literal[string] : identifier[get_table_size] ( identifier[tracks_header_table] )+ literal[int] * identifier[len] ( identifier[tracks_dics] ),
literal[string] : identifier[len] ( identifier[tracks_dics] )
}
identifier[tracks_header_chunk] = identifier[dic_to_chunk] ( identifier[tracks_header_dic] , identifier[tracks_header_table] )
[ identifier[track_dic] . identifier[update] ({ literal[string] : identifier[get_table_size] ( identifier[track_table] )}) keyword[for] identifier[track_dic] keyword[in] identifier[tracks_dics] ]
identifier[_tracks_chunks] =[ identifier[dic_to_chunk] ( identifier[dic] , identifier[track_table] ) keyword[for] identifier[dic] keyword[in] identifier[tracks_dics] ]
identifier[all_tracks_chunck] = literal[string] . identifier[join] ( identifier[_tracks_chunks] )
identifier[_length_before_tracks_offsets] = identifier[header_part_size] + identifier[len] ( identifier[tracks_header_chunk] )
identifier[tracks_offsets_chunck] = identifier[get_offsets_chunk] ( identifier[_length_before_tracks_offsets] , identifier[_tracks_chunks] )
identifier[track_part_chunk] = identifier[tracks_header_chunk] + identifier[tracks_offsets_chunck] + identifier[all_tracks_chunck]
identifier[_playlists_dics] =[ identifier[playlist_indexes] [ literal[int] ] keyword[for] identifier[playlist_indexes] keyword[in] identifier[playlists_dics_and_indexes] ]
identifier[_types] =[ identifier[playlist_dic] [ literal[string] ] keyword[for] identifier[playlist_dic] keyword[in] identifier[_playlists_dics] ]
identifier[playlists_header_dic] ={
literal[string] : identifier[get_table_size] ( identifier[playlists_header_table] )+ literal[int] * identifier[len] ( identifier[playlists_dics_and_indexes] ),
literal[string] : identifier[len] ( identifier[_types] ),
literal[string] : literal[int] keyword[if] identifier[_types] . identifier[count] ( identifier[NORMAL] )== literal[int] keyword[else] literal[int] ,
literal[string] : identifier[_types] . identifier[count] ( identifier[NORMAL] ),
literal[string] : literal[int] keyword[if] identifier[_types] . identifier[count] ( identifier[AUDIOBOOK] )== literal[int] keyword[else] ( identifier[_types] . identifier[count] ( identifier[MASTER] )+ identifier[_types] . identifier[count] ( identifier[NORMAL] )+
identifier[_types] . identifier[count] ( identifier[PODCAST] )),
literal[string] : identifier[_types] . identifier[count] ( identifier[AUDIOBOOK] ),
literal[string] : literal[int] keyword[if] identifier[_types] . identifier[count] ( identifier[PODCAST] )== literal[int] keyword[else] identifier[_types] . identifier[count] ( literal[int] )+ identifier[_types] . identifier[count] ( identifier[NORMAL] ),
literal[string] : identifier[_types] . identifier[count] ( identifier[PODCAST] )
}
identifier[playlists_header_chunk] = identifier[dic_to_chunk] ( identifier[playlists_header_dic] , identifier[playlists_header_table] )
identifier[_playlists_chunks] =[]
keyword[for] identifier[playlist_header_dic] , identifier[indexes] keyword[in] identifier[playlists_dics_and_indexes] :
identifier[dic] = identifier[playlist_header_dic] . identifier[copy] ()
identifier[dic] [ literal[string] ]= identifier[get_table_size] ( identifier[playlist_header_table] )+ literal[int] * identifier[len] ( identifier[indexes] )
identifier[dic] [ literal[string] ]= identifier[len] ( identifier[indexes] )
identifier[dic] [ literal[string] ]= identifier[len] ( identifier[indexes] ) keyword[if] identifier[dic] [ literal[string] ] keyword[in] ( literal[int] , literal[int] ) keyword[else] literal[int]
keyword[if] identifier[dic] [ literal[string] ]== identifier[MASTER] :
identifier[header_dic] [ literal[string] ]= identifier[len] ( identifier[indexes] )
identifier[_playlist_header_chunk] = identifier[dic_to_chunk] ( identifier[dic] , identifier[playlist_header_table] )
identifier[_indexes_chunk] = literal[string] . identifier[join] ([ identifier[i] . identifier[to_bytes] ( literal[int] , literal[string] ) keyword[for] identifier[i] keyword[in] identifier[indexes] ])
identifier[playlist_chunk] = identifier[_playlist_header_chunk] + identifier[_indexes_chunk]
identifier[_playlists_chunks] . identifier[append] ( identifier[playlist_chunk] )
identifier[all_playlists_chunk] = literal[string] . identifier[join] ( identifier[_playlists_chunks] )
identifier[_length_before_playlists_offsets] = identifier[header_part_size] + identifier[len] ( identifier[track_part_chunk] )+ identifier[len] ( identifier[playlists_header_chunk] )
identifier[playlists_offsets_chunk] = identifier[get_offsets_chunk] ( identifier[_length_before_playlists_offsets] , identifier[_playlists_chunks] )
identifier[playlists_part_chunk] = identifier[playlists_header_chunk] + identifier[playlists_offsets_chunk] + identifier[all_playlists_chunk]
identifier[header_dic] [ literal[string] ]= identifier[header_part_size]
identifier[header_dic] [ literal[string] ]= identifier[header_part_size] + identifier[len] ( identifier[track_part_chunk] )
identifier[header_part_chunk] = identifier[dic_to_chunk] ( identifier[header_dic] , identifier[header_table] )
identifier[itunessd] = identifier[header_part_chunk] + identifier[track_part_chunk] + identifier[playlists_part_chunk]
keyword[return] identifier[itunessd]
|
def dics_to_itunessd(header_dic, tracks_dics, playlists_dics_and_indexes):
"""
:param header_dic: dic of header_table
:param tracks_dics: list of all track_table's dics
:param playlists_dics_and_indexes: list of all playlists and all their track's indexes
:return: the whole iTunesSD bytes data
"""
############################################
# header
######
header_dic['length'] = get_table_size(header_table)
header_dic['number_of_tracks'] = len(tracks_dics)
header_dic['number_of_playlists'] = len(playlists_dics_and_indexes)
header_dic['number_of_tracks2'] = 0
header_part_size = get_table_size(header_table)
####################################################################################################################
# tracks
##########
# Chunk of header
tracks_header_dic = {'length': get_table_size(tracks_header_table) + 4 * len(tracks_dics), 'number_of_tracks': len(tracks_dics)}
tracks_header_chunk = dic_to_chunk(tracks_header_dic, tracks_header_table)
# Chunk of all tracks
[track_dic.update({'length': get_table_size(track_table)}) for track_dic in tracks_dics]
_tracks_chunks = [dic_to_chunk(dic, track_table) for dic in tracks_dics]
all_tracks_chunck = b''.join(_tracks_chunks)
# Chunk of offsets
_length_before_tracks_offsets = header_part_size + len(tracks_header_chunk)
tracks_offsets_chunck = get_offsets_chunk(_length_before_tracks_offsets, _tracks_chunks)
# Put chunks together
track_part_chunk = tracks_header_chunk + tracks_offsets_chunck + all_tracks_chunck
####################################################################################################################
# playlists
#############
# Chunk of header
_playlists_dics = [playlist_indexes[0] for playlist_indexes in playlists_dics_and_indexes]
_types = [playlist_dic['type'] for playlist_dic in _playlists_dics]
playlists_header_dic = {'length': get_table_size(playlists_header_table) + 4 * len(playlists_dics_and_indexes), 'number_of_all_playlists': len(_types), 'flag1': 4294967295 if _types.count(NORMAL) == 0 else 1, 'number_of_normal_playlists': _types.count(NORMAL), 'flag2': 4294967295 if _types.count(AUDIOBOOK) == 0 else _types.count(MASTER) + _types.count(NORMAL) + _types.count(PODCAST), 'number_of_audiobook_playlists': _types.count(AUDIOBOOK), 'flag3': 4294967295 if _types.count(PODCAST) == 0 else _types.count(1) + _types.count(NORMAL), 'number_of_podcast_playlists': _types.count(PODCAST)}
playlists_header_chunk = dic_to_chunk(playlists_header_dic, playlists_header_table)
# Chunk of all playlists
_playlists_chunks = []
for (playlist_header_dic, indexes) in playlists_dics_and_indexes:
dic = playlist_header_dic.copy()
dic['length'] = get_table_size(playlist_header_table) + 4 * len(indexes)
dic['number_of_all_track'] = len(indexes)
dic['number_of_normal_track'] = len(indexes) if dic['type'] in (1, 2) else 0
if dic['type'] == MASTER:
header_dic['number_of_tracks2'] = len(indexes) # depends on [control=['if'], data=[]]
_playlist_header_chunk = dic_to_chunk(dic, playlist_header_table)
_indexes_chunk = b''.join([i.to_bytes(4, 'little') for i in indexes])
playlist_chunk = _playlist_header_chunk + _indexes_chunk
_playlists_chunks.append(playlist_chunk) # depends on [control=['for'], data=[]]
all_playlists_chunk = b''.join(_playlists_chunks)
# Chunk of offsets
_length_before_playlists_offsets = header_part_size + len(track_part_chunk) + len(playlists_header_chunk)
playlists_offsets_chunk = get_offsets_chunk(_length_before_playlists_offsets, _playlists_chunks)
# Put chunks together
playlists_part_chunk = playlists_header_chunk + playlists_offsets_chunk + all_playlists_chunk
########################################################################
header_dic['tracks_header_offset'] = header_part_size
header_dic['playlists_header_offset'] = header_part_size + len(track_part_chunk)
header_part_chunk = dic_to_chunk(header_dic, header_table)
########################################################################
itunessd = header_part_chunk + track_part_chunk + playlists_part_chunk
return itunessd
|
def iterable(obj, strok=False):
"""
Checks if the input implements the iterator interface. An exception is made
for strings, which return False unless `strok` is True
Args:
obj (object): a scalar or iterable input
strok (bool): if True allow strings to be interpreted as iterable
Returns:
bool: True if the input is iterable
Example:
>>> obj_list = [3, [3], '3', (3,), [3, 4, 5], {}]
>>> result = [iterable(obj) for obj in obj_list]
>>> assert result == [False, True, False, True, True, True]
>>> result = [iterable(obj, strok=True) for obj in obj_list]
>>> assert result == [False, True, True, True, True, True]
"""
try:
iter(obj)
except Exception:
return False
else:
return strok or not isinstance(obj, six.string_types)
|
def function[iterable, parameter[obj, strok]]:
constant[
Checks if the input implements the iterator interface. An exception is made
for strings, which return False unless `strok` is True
Args:
obj (object): a scalar or iterable input
strok (bool): if True allow strings to be interpreted as iterable
Returns:
bool: True if the input is iterable
Example:
>>> obj_list = [3, [3], '3', (3,), [3, 4, 5], {}]
>>> result = [iterable(obj) for obj in obj_list]
>>> assert result == [False, True, False, True, True, True]
>>> result = [iterable(obj, strok=True) for obj in obj_list]
>>> assert result == [False, True, True, True, True, True]
]
<ast.Try object at 0x7da1b01daec0>
|
keyword[def] identifier[iterable] ( identifier[obj] , identifier[strok] = keyword[False] ):
literal[string]
keyword[try] :
identifier[iter] ( identifier[obj] )
keyword[except] identifier[Exception] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] identifier[strok] keyword[or] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[six] . identifier[string_types] )
|
def iterable(obj, strok=False):
"""
Checks if the input implements the iterator interface. An exception is made
for strings, which return False unless `strok` is True
Args:
obj (object): a scalar or iterable input
strok (bool): if True allow strings to be interpreted as iterable
Returns:
bool: True if the input is iterable
Example:
>>> obj_list = [3, [3], '3', (3,), [3, 4, 5], {}]
>>> result = [iterable(obj) for obj in obj_list]
>>> assert result == [False, True, False, True, True, True]
>>> result = [iterable(obj, strok=True) for obj in obj_list]
>>> assert result == [False, True, True, True, True, True]
"""
try:
iter(obj) # depends on [control=['try'], data=[]]
except Exception:
return False # depends on [control=['except'], data=[]]
else:
return strok or not isinstance(obj, six.string_types)
|
def forward_committor(T, A, B):
r"""Forward committor between given sets.
The forward committor u(x) between sets A and B is the probability
for the chain starting in x to reach B before reaching A.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j L_{ij} u_{j}=0 for i in X\(A u B) (I)
u_{i}=0 for i \in A (II)
u_{i}=1 for i \in B (III)
with generator matrix L=(P-I).
"""
X = set(range(T.shape[0]))
A = set(A)
B = set(B)
AB = A.intersection(B)
notAB = X.difference(A).difference(B)
if len(AB) > 0:
raise ValueError("Sets A and B have to be disjoint")
L = T - np.eye(T.shape[0]) # Generator matrix
"""Assemble left hand-side W for linear system"""
"""Equation (I)"""
W = 1.0 * L
"""Equation (II)"""
W[list(A), :] = 0.0
W[list(A), list(A)] = 1.0
"""Equation (III)"""
W[list(B), :] = 0.0
W[list(B), list(B)] = 1.0
"""Assemble right hand side r for linear system"""
"""Equation (I+II)"""
r = np.zeros(T.shape[0])
"""Equation (III)"""
r[list(B)] = 1.0
u = solve(W, r)
return u
|
def function[forward_committor, parameter[T, A, B]]:
constant[Forward committor between given sets.
The forward committor u(x) between sets A and B is the probability
for the chain starting in x to reach B before reaching A.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j L_{ij} u_{j}=0 for i in X\(A u B) (I)
u_{i}=0 for i \in A (II)
u_{i}=1 for i \in B (III)
with generator matrix L=(P-I).
]
variable[X] assign[=] call[name[set], parameter[call[name[range], parameter[call[name[T].shape][constant[0]]]]]]
variable[A] assign[=] call[name[set], parameter[name[A]]]
variable[B] assign[=] call[name[set], parameter[name[B]]]
variable[AB] assign[=] call[name[A].intersection, parameter[name[B]]]
variable[notAB] assign[=] call[call[name[X].difference, parameter[name[A]]].difference, parameter[name[B]]]
if compare[call[name[len], parameter[name[AB]]] greater[>] constant[0]] begin[:]
<ast.Raise object at 0x7da1b2545810>
variable[L] assign[=] binary_operation[name[T] - call[name[np].eye, parameter[call[name[T].shape][constant[0]]]]]
constant[Assemble left hand-side W for linear system]
constant[Equation (I)]
variable[W] assign[=] binary_operation[constant[1.0] * name[L]]
constant[Equation (II)]
call[name[W]][tuple[[<ast.Call object at 0x7da1b2545c00>, <ast.Slice object at 0x7da1b2544c40>]]] assign[=] constant[0.0]
call[name[W]][tuple[[<ast.Call object at 0x7da1b2546f50>, <ast.Call object at 0x7da1b25451b0>]]] assign[=] constant[1.0]
constant[Equation (III)]
call[name[W]][tuple[[<ast.Call object at 0x7da1b2544eb0>, <ast.Slice object at 0x7da1b2607ee0>]]] assign[=] constant[0.0]
call[name[W]][tuple[[<ast.Call object at 0x7da1b26064a0>, <ast.Call object at 0x7da1b2606ad0>]]] assign[=] constant[1.0]
constant[Assemble right hand side r for linear system]
constant[Equation (I+II)]
variable[r] assign[=] call[name[np].zeros, parameter[call[name[T].shape][constant[0]]]]
constant[Equation (III)]
call[name[r]][call[name[list], parameter[name[B]]]] assign[=] constant[1.0]
variable[u] assign[=] call[name[solve], parameter[name[W], name[r]]]
return[name[u]]
|
keyword[def] identifier[forward_committor] ( identifier[T] , identifier[A] , identifier[B] ):
literal[string]
identifier[X] = identifier[set] ( identifier[range] ( identifier[T] . identifier[shape] [ literal[int] ]))
identifier[A] = identifier[set] ( identifier[A] )
identifier[B] = identifier[set] ( identifier[B] )
identifier[AB] = identifier[A] . identifier[intersection] ( identifier[B] )
identifier[notAB] = identifier[X] . identifier[difference] ( identifier[A] ). identifier[difference] ( identifier[B] )
keyword[if] identifier[len] ( identifier[AB] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[L] = identifier[T] - identifier[np] . identifier[eye] ( identifier[T] . identifier[shape] [ literal[int] ])
literal[string]
literal[string]
identifier[W] = literal[int] * identifier[L]
literal[string]
identifier[W] [ identifier[list] ( identifier[A] ),:]= literal[int]
identifier[W] [ identifier[list] ( identifier[A] ), identifier[list] ( identifier[A] )]= literal[int]
literal[string]
identifier[W] [ identifier[list] ( identifier[B] ),:]= literal[int]
identifier[W] [ identifier[list] ( identifier[B] ), identifier[list] ( identifier[B] )]= literal[int]
literal[string]
literal[string]
identifier[r] = identifier[np] . identifier[zeros] ( identifier[T] . identifier[shape] [ literal[int] ])
literal[string]
identifier[r] [ identifier[list] ( identifier[B] )]= literal[int]
identifier[u] = identifier[solve] ( identifier[W] , identifier[r] )
keyword[return] identifier[u]
|
def forward_committor(T, A, B):
"""Forward committor between given sets.
The forward committor u(x) between sets A and B is the probability
for the chain starting in x to reach B before reaching A.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\\sum_j L_{ij} u_{j}=0 for i in X\\(A u B) (I)
u_{i}=0 for i \\in A (II)
u_{i}=1 for i \\in B (III)
with generator matrix L=(P-I).
"""
X = set(range(T.shape[0]))
A = set(A)
B = set(B)
AB = A.intersection(B)
notAB = X.difference(A).difference(B)
if len(AB) > 0:
raise ValueError('Sets A and B have to be disjoint') # depends on [control=['if'], data=[]]
L = T - np.eye(T.shape[0]) # Generator matrix
'Assemble left hand-side W for linear system'
'Equation (I)'
W = 1.0 * L
'Equation (II)'
W[list(A), :] = 0.0
W[list(A), list(A)] = 1.0
'Equation (III)'
W[list(B), :] = 0.0
W[list(B), list(B)] = 1.0
'Assemble right hand side r for linear system'
'Equation (I+II)'
r = np.zeros(T.shape[0])
'Equation (III)'
r[list(B)] = 1.0
u = solve(W, r)
return u
|
def get(cls, key, default=None, separator='.', global_=False):
""" Retrieve a key value from loaded configuration.
Order of search if global_=False:
1/ environnment variables
2/ local configuration
3/ global configuration
"""
# first check environnment variables
# if we're not in global scope
if not global_:
ret = os.environ.get(key.upper().replace('.', '_'))
if ret is not None:
return ret
# then check in local and global configuration unless global_=True
scopes = ['global'] if global_ else ['local', 'global']
for scope in scopes:
ret = cls._get(scope, key, default, separator)
if ret is not None and ret != default:
return ret
if ret is None or ret == default:
return default
|
def function[get, parameter[cls, key, default, separator, global_]]:
constant[ Retrieve a key value from loaded configuration.
Order of search if global_=False:
1/ environnment variables
2/ local configuration
3/ global configuration
]
if <ast.UnaryOp object at 0x7da18ede69b0> begin[:]
variable[ret] assign[=] call[name[os].environ.get, parameter[call[call[name[key].upper, parameter[]].replace, parameter[constant[.], constant[_]]]]]
if compare[name[ret] is_not constant[None]] begin[:]
return[name[ret]]
variable[scopes] assign[=] <ast.IfExp object at 0x7da18ede7220>
for taget[name[scope]] in starred[name[scopes]] begin[:]
variable[ret] assign[=] call[name[cls]._get, parameter[name[scope], name[key], name[default], name[separator]]]
if <ast.BoolOp object at 0x7da18ede6200> begin[:]
return[name[ret]]
if <ast.BoolOp object at 0x7da18ede5630> begin[:]
return[name[default]]
|
keyword[def] identifier[get] ( identifier[cls] , identifier[key] , identifier[default] = keyword[None] , identifier[separator] = literal[string] , identifier[global_] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[global_] :
identifier[ret] = identifier[os] . identifier[environ] . identifier[get] ( identifier[key] . identifier[upper] (). identifier[replace] ( literal[string] , literal[string] ))
keyword[if] identifier[ret] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[ret]
identifier[scopes] =[ literal[string] ] keyword[if] identifier[global_] keyword[else] [ literal[string] , literal[string] ]
keyword[for] identifier[scope] keyword[in] identifier[scopes] :
identifier[ret] = identifier[cls] . identifier[_get] ( identifier[scope] , identifier[key] , identifier[default] , identifier[separator] )
keyword[if] identifier[ret] keyword[is] keyword[not] keyword[None] keyword[and] identifier[ret] != identifier[default] :
keyword[return] identifier[ret]
keyword[if] identifier[ret] keyword[is] keyword[None] keyword[or] identifier[ret] == identifier[default] :
keyword[return] identifier[default]
|
def get(cls, key, default=None, separator='.', global_=False):
""" Retrieve a key value from loaded configuration.
Order of search if global_=False:
1/ environnment variables
2/ local configuration
3/ global configuration
"""
# first check environnment variables
# if we're not in global scope
if not global_:
ret = os.environ.get(key.upper().replace('.', '_'))
if ret is not None:
return ret # depends on [control=['if'], data=['ret']] # depends on [control=['if'], data=[]]
# then check in local and global configuration unless global_=True
scopes = ['global'] if global_ else ['local', 'global']
for scope in scopes:
ret = cls._get(scope, key, default, separator)
if ret is not None and ret != default:
return ret # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['scope']]
if ret is None or ret == default:
return default # depends on [control=['if'], data=[]]
|
def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
|
def function[set_dword_at_offset, parameter[self, offset, dword]]:
constant[Set the double word value at the given file offset.]
return[call[name[self].set_bytes_at_offset, parameter[name[offset], call[name[self].get_data_from_dword, parameter[name[dword]]]]]]
|
keyword[def] identifier[set_dword_at_offset] ( identifier[self] , identifier[offset] , identifier[dword] ):
literal[string]
keyword[return] identifier[self] . identifier[set_bytes_at_offset] ( identifier[offset] , identifier[self] . identifier[get_data_from_dword] ( identifier[dword] ))
|
def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
|
def calculate_colorbar(self):
"""
Returns the positions and colors of all intervals inside the colorbar.
"""
self._base._process_values()
self._base._find_range()
X, Y = self._base._mesh()
C = self._base._values[:, np.newaxis]
return X, Y, C
|
def function[calculate_colorbar, parameter[self]]:
constant[
Returns the positions and colors of all intervals inside the colorbar.
]
call[name[self]._base._process_values, parameter[]]
call[name[self]._base._find_range, parameter[]]
<ast.Tuple object at 0x7da18f58f010> assign[=] call[name[self]._base._mesh, parameter[]]
variable[C] assign[=] call[name[self]._base._values][tuple[[<ast.Slice object at 0x7da18f58fe50>, <ast.Attribute object at 0x7da18f58e980>]]]
return[tuple[[<ast.Name object at 0x7da18f58d330>, <ast.Name object at 0x7da18f58d630>, <ast.Name object at 0x7da18f58c370>]]]
|
keyword[def] identifier[calculate_colorbar] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_base] . identifier[_process_values] ()
identifier[self] . identifier[_base] . identifier[_find_range] ()
identifier[X] , identifier[Y] = identifier[self] . identifier[_base] . identifier[_mesh] ()
identifier[C] = identifier[self] . identifier[_base] . identifier[_values] [:, identifier[np] . identifier[newaxis] ]
keyword[return] identifier[X] , identifier[Y] , identifier[C]
|
def calculate_colorbar(self):
"""
Returns the positions and colors of all intervals inside the colorbar.
"""
self._base._process_values()
self._base._find_range()
(X, Y) = self._base._mesh()
C = self._base._values[:, np.newaxis]
return (X, Y, C)
|
def hybrid_threaded_worker(selector, workers):
"""Runs a set of workers, each in a separate thread.
:param selector:
A function that takes a hints-tuple and returns a key
indexing a worker in the `workers` dictionary.
:param workers:
A dictionary of workers.
:returns:
A connection for the scheduler.
:rtype: Connection
The hybrid worker dispatches jobs to the different workers
based on the information contained in the hints. If no hints
were given, the job is run in the main thread.
Dispatching is done in the main thread. Retrieving results is
done in a separate thread for each worker. In this design it is
assumed that dispatching a job takes little time, while waiting for
one to return a result may take a long time.
"""
result_queue = Queue()
job_sink = {k: w.sink() for k, w in workers.items()}
@push
def dispatch_job():
default_sink = result_queue.sink()
while True:
msg = yield
if msg is EndOfQueue:
for k in workers.keys():
try:
job_sink[k].send(EndOfQueue)
except StopIteration:
pass
return
if msg is FlushQueue:
for k in workers.keys():
try:
job_sink[k].send(FlushQueue)
except StopIteration:
pass
return
worker = selector(msg.node)
if worker:
job_sink[worker].send(msg)
else:
default_sink.send(run_job(*msg))
for key, worker in workers.items():
t = threading.Thread(
target=patch,
args=(worker.source, result_queue.sink))
t.daemon = True
t.start()
return Connection(result_queue.source, dispatch_job)
|
def function[hybrid_threaded_worker, parameter[selector, workers]]:
constant[Runs a set of workers, each in a separate thread.
:param selector:
A function that takes a hints-tuple and returns a key
indexing a worker in the `workers` dictionary.
:param workers:
A dictionary of workers.
:returns:
A connection for the scheduler.
:rtype: Connection
The hybrid worker dispatches jobs to the different workers
based on the information contained in the hints. If no hints
were given, the job is run in the main thread.
Dispatching is done in the main thread. Retrieving results is
done in a separate thread for each worker. In this design it is
assumed that dispatching a job takes little time, while waiting for
one to return a result may take a long time.
]
variable[result_queue] assign[=] call[name[Queue], parameter[]]
variable[job_sink] assign[=] <ast.DictComp object at 0x7da204346d70>
def function[dispatch_job, parameter[]]:
variable[default_sink] assign[=] call[name[result_queue].sink, parameter[]]
while constant[True] begin[:]
variable[msg] assign[=] <ast.Yield object at 0x7da204344a30>
if compare[name[msg] is name[EndOfQueue]] begin[:]
for taget[name[k]] in starred[call[name[workers].keys, parameter[]]] begin[:]
<ast.Try object at 0x7da204345540>
return[None]
if compare[name[msg] is name[FlushQueue]] begin[:]
for taget[name[k]] in starred[call[name[workers].keys, parameter[]]] begin[:]
<ast.Try object at 0x7da204347280>
return[None]
variable[worker] assign[=] call[name[selector], parameter[name[msg].node]]
if name[worker] begin[:]
call[call[name[job_sink]][name[worker]].send, parameter[name[msg]]]
for taget[tuple[[<ast.Name object at 0x7da204345900>, <ast.Name object at 0x7da204347af0>]]] in starred[call[name[workers].items, parameter[]]] begin[:]
variable[t] assign[=] call[name[threading].Thread, parameter[]]
name[t].daemon assign[=] constant[True]
call[name[t].start, parameter[]]
return[call[name[Connection], parameter[name[result_queue].source, name[dispatch_job]]]]
|
keyword[def] identifier[hybrid_threaded_worker] ( identifier[selector] , identifier[workers] ):
literal[string]
identifier[result_queue] = identifier[Queue] ()
identifier[job_sink] ={ identifier[k] : identifier[w] . identifier[sink] () keyword[for] identifier[k] , identifier[w] keyword[in] identifier[workers] . identifier[items] ()}
@ identifier[push]
keyword[def] identifier[dispatch_job] ():
identifier[default_sink] = identifier[result_queue] . identifier[sink] ()
keyword[while] keyword[True] :
identifier[msg] = keyword[yield]
keyword[if] identifier[msg] keyword[is] identifier[EndOfQueue] :
keyword[for] identifier[k] keyword[in] identifier[workers] . identifier[keys] ():
keyword[try] :
identifier[job_sink] [ identifier[k] ]. identifier[send] ( identifier[EndOfQueue] )
keyword[except] identifier[StopIteration] :
keyword[pass]
keyword[return]
keyword[if] identifier[msg] keyword[is] identifier[FlushQueue] :
keyword[for] identifier[k] keyword[in] identifier[workers] . identifier[keys] ():
keyword[try] :
identifier[job_sink] [ identifier[k] ]. identifier[send] ( identifier[FlushQueue] )
keyword[except] identifier[StopIteration] :
keyword[pass]
keyword[return]
identifier[worker] = identifier[selector] ( identifier[msg] . identifier[node] )
keyword[if] identifier[worker] :
identifier[job_sink] [ identifier[worker] ]. identifier[send] ( identifier[msg] )
keyword[else] :
identifier[default_sink] . identifier[send] ( identifier[run_job] (* identifier[msg] ))
keyword[for] identifier[key] , identifier[worker] keyword[in] identifier[workers] . identifier[items] ():
identifier[t] = identifier[threading] . identifier[Thread] (
identifier[target] = identifier[patch] ,
identifier[args] =( identifier[worker] . identifier[source] , identifier[result_queue] . identifier[sink] ))
identifier[t] . identifier[daemon] = keyword[True]
identifier[t] . identifier[start] ()
keyword[return] identifier[Connection] ( identifier[result_queue] . identifier[source] , identifier[dispatch_job] )
|
def hybrid_threaded_worker(selector, workers):
"""Runs a set of workers, each in a separate thread.
:param selector:
A function that takes a hints-tuple and returns a key
indexing a worker in the `workers` dictionary.
:param workers:
A dictionary of workers.
:returns:
A connection for the scheduler.
:rtype: Connection
The hybrid worker dispatches jobs to the different workers
based on the information contained in the hints. If no hints
were given, the job is run in the main thread.
Dispatching is done in the main thread. Retrieving results is
done in a separate thread for each worker. In this design it is
assumed that dispatching a job takes little time, while waiting for
one to return a result may take a long time.
"""
result_queue = Queue()
job_sink = {k: w.sink() for (k, w) in workers.items()}
@push
def dispatch_job():
default_sink = result_queue.sink()
while True:
msg = (yield)
if msg is EndOfQueue:
for k in workers.keys():
try:
job_sink[k].send(EndOfQueue) # depends on [control=['try'], data=[]]
except StopIteration:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['k']]
return # depends on [control=['if'], data=['EndOfQueue']]
if msg is FlushQueue:
for k in workers.keys():
try:
job_sink[k].send(FlushQueue) # depends on [control=['try'], data=[]]
except StopIteration:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['k']]
return # depends on [control=['if'], data=['FlushQueue']]
worker = selector(msg.node)
if worker:
job_sink[worker].send(msg) # depends on [control=['if'], data=[]]
else:
default_sink.send(run_job(*msg)) # depends on [control=['while'], data=[]]
for (key, worker) in workers.items():
t = threading.Thread(target=patch, args=(worker.source, result_queue.sink))
t.daemon = True
t.start() # depends on [control=['for'], data=[]]
return Connection(result_queue.source, dispatch_job)
|
def express_route_cross_connections(self):
"""Instance depends on the API version:
* 2018-02-01: :class:`ExpressRouteCrossConnectionsOperations<azure.mgmt.network.v2018_02_01.operations.ExpressRouteCrossConnectionsOperations>`
* 2018-04-01: :class:`ExpressRouteCrossConnectionsOperations<azure.mgmt.network.v2018_04_01.operations.ExpressRouteCrossConnectionsOperations>`
"""
api_version = self._get_api_version('express_route_cross_connections')
if api_version == '2018-02-01':
from .v2018_02_01.operations import ExpressRouteCrossConnectionsOperations as OperationClass
elif api_version == '2018-04-01':
from .v2018_04_01.operations import ExpressRouteCrossConnectionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
|
def function[express_route_cross_connections, parameter[self]]:
constant[Instance depends on the API version:
* 2018-02-01: :class:`ExpressRouteCrossConnectionsOperations<azure.mgmt.network.v2018_02_01.operations.ExpressRouteCrossConnectionsOperations>`
* 2018-04-01: :class:`ExpressRouteCrossConnectionsOperations<azure.mgmt.network.v2018_04_01.operations.ExpressRouteCrossConnectionsOperations>`
]
variable[api_version] assign[=] call[name[self]._get_api_version, parameter[constant[express_route_cross_connections]]]
if compare[name[api_version] equal[==] constant[2018-02-01]] begin[:]
from relative_module[v2018_02_01.operations] import module[ExpressRouteCrossConnectionsOperations]
return[call[name[OperationClass], parameter[name[self]._client, name[self].config, call[name[Serializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]], call[name[Deserializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]]]]]
|
keyword[def] identifier[express_route_cross_connections] ( identifier[self] ):
literal[string]
identifier[api_version] = identifier[self] . identifier[_get_api_version] ( literal[string] )
keyword[if] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_02_01] . identifier[operations] keyword[import] identifier[ExpressRouteCrossConnectionsOperations] keyword[as] identifier[OperationClass]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_04_01] . identifier[operations] keyword[import] identifier[ExpressRouteCrossConnectionsOperations] keyword[as] identifier[OperationClass]
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[api_version] ))
keyword[return] identifier[OperationClass] ( identifier[self] . identifier[_client] , identifier[self] . identifier[config] , identifier[Serializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )), identifier[Deserializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )))
|
def express_route_cross_connections(self):
"""Instance depends on the API version:
* 2018-02-01: :class:`ExpressRouteCrossConnectionsOperations<azure.mgmt.network.v2018_02_01.operations.ExpressRouteCrossConnectionsOperations>`
* 2018-04-01: :class:`ExpressRouteCrossConnectionsOperations<azure.mgmt.network.v2018_04_01.operations.ExpressRouteCrossConnectionsOperations>`
"""
api_version = self._get_api_version('express_route_cross_connections')
if api_version == '2018-02-01':
from .v2018_02_01.operations import ExpressRouteCrossConnectionsOperations as OperationClass # depends on [control=['if'], data=[]]
elif api_version == '2018-04-01':
from .v2018_04_01.operations import ExpressRouteCrossConnectionsOperations as OperationClass # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('APIVersion {} is not available'.format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
|
def uri_to_iri(uri, charset='utf-8', errors='replace'):
r"""
Converts a URI in a given charset to a IRI.
Examples for URI versus IRI:
>>> uri_to_iri(b'http://xn--n3h.net/')
u'http://\u2603.net/'
>>> uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th'
Query strings are left unchanged:
>>> uri_to_iri('/?foo=24&x=%26%2f')
u'/?foo=24&x=%26%2f'
.. versionadded:: 0.6
:param uri: The URI to convert.
:param charset: The charset of the URI.
:param errors: The error handling on decode.
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, '/;?')
query = url_unquote(uri.query, charset, errors, ';/?:@&=+,$')
fragment = url_unquote(uri.fragment, charset, errors, ';/?:@&=+,$')
return url_unparse((uri.scheme, uri.decode_netloc(),
path, query, fragment))
|
def function[uri_to_iri, parameter[uri, charset, errors]]:
constant[
Converts a URI in a given charset to a IRI.
Examples for URI versus IRI:
>>> uri_to_iri(b'http://xn--n3h.net/')
u'http://\u2603.net/'
>>> uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th'
Query strings are left unchanged:
>>> uri_to_iri('/?foo=24&x=%26%2f')
u'/?foo=24&x=%26%2f'
.. versionadded:: 0.6
:param uri: The URI to convert.
:param charset: The charset of the URI.
:param errors: The error handling on decode.
]
if call[name[isinstance], parameter[name[uri], name[tuple]]] begin[:]
variable[uri] assign[=] call[name[url_unparse], parameter[name[uri]]]
variable[uri] assign[=] call[name[url_parse], parameter[call[name[to_unicode], parameter[name[uri], name[charset]]]]]
variable[path] assign[=] call[name[url_unquote], parameter[name[uri].path, name[charset], name[errors], constant[/;?]]]
variable[query] assign[=] call[name[url_unquote], parameter[name[uri].query, name[charset], name[errors], constant[;/?:@&=+,$]]]
variable[fragment] assign[=] call[name[url_unquote], parameter[name[uri].fragment, name[charset], name[errors], constant[;/?:@&=+,$]]]
return[call[name[url_unparse], parameter[tuple[[<ast.Attribute object at 0x7da2044c1a80>, <ast.Call object at 0x7da2044c25f0>, <ast.Name object at 0x7da2044c0b50>, <ast.Name object at 0x7da2044c21d0>, <ast.Name object at 0x7da2044c2050>]]]]]
|
keyword[def] identifier[uri_to_iri] ( identifier[uri] , identifier[charset] = literal[string] , identifier[errors] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[uri] , identifier[tuple] ):
identifier[uri] = identifier[url_unparse] ( identifier[uri] )
identifier[uri] = identifier[url_parse] ( identifier[to_unicode] ( identifier[uri] , identifier[charset] ))
identifier[path] = identifier[url_unquote] ( identifier[uri] . identifier[path] , identifier[charset] , identifier[errors] , literal[string] )
identifier[query] = identifier[url_unquote] ( identifier[uri] . identifier[query] , identifier[charset] , identifier[errors] , literal[string] )
identifier[fragment] = identifier[url_unquote] ( identifier[uri] . identifier[fragment] , identifier[charset] , identifier[errors] , literal[string] )
keyword[return] identifier[url_unparse] (( identifier[uri] . identifier[scheme] , identifier[uri] . identifier[decode_netloc] (),
identifier[path] , identifier[query] , identifier[fragment] ))
|
def uri_to_iri(uri, charset='utf-8', errors='replace'):
"""
Converts a URI in a given charset to a IRI.
Examples for URI versus IRI:
>>> uri_to_iri(b'http://xn--n3h.net/')
u'http://\\u2603.net/'
>>> uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
u'http://\\xfcser:p\\xe4ssword@\\u2603.net/p\\xe5th'
Query strings are left unchanged:
>>> uri_to_iri('/?foo=24&x=%26%2f')
u'/?foo=24&x=%26%2f'
.. versionadded:: 0.6
:param uri: The URI to convert.
:param charset: The charset of the URI.
:param errors: The error handling on decode.
"""
if isinstance(uri, tuple):
uri = url_unparse(uri) # depends on [control=['if'], data=[]]
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, '/;?')
query = url_unquote(uri.query, charset, errors, ';/?:@&=+,$')
fragment = url_unquote(uri.fragment, charset, errors, ';/?:@&=+,$')
return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))
|
def find_prekeyed(self, value, key):
"""
Find a value in a node, using a key function. The value is already a
key.
"""
while self is not NULL:
direction = cmp(value, key(self.value))
if direction < 0:
self = self.left
elif direction > 0:
self = self.right
elif direction == 0:
return self.value
|
def function[find_prekeyed, parameter[self, value, key]]:
constant[
Find a value in a node, using a key function. The value is already a
key.
]
while compare[name[self] is_not name[NULL]] begin[:]
variable[direction] assign[=] call[name[cmp], parameter[name[value], call[name[key], parameter[name[self].value]]]]
if compare[name[direction] less[<] constant[0]] begin[:]
variable[self] assign[=] name[self].left
|
keyword[def] identifier[find_prekeyed] ( identifier[self] , identifier[value] , identifier[key] ):
literal[string]
keyword[while] identifier[self] keyword[is] keyword[not] identifier[NULL] :
identifier[direction] = identifier[cmp] ( identifier[value] , identifier[key] ( identifier[self] . identifier[value] ))
keyword[if] identifier[direction] < literal[int] :
identifier[self] = identifier[self] . identifier[left]
keyword[elif] identifier[direction] > literal[int] :
identifier[self] = identifier[self] . identifier[right]
keyword[elif] identifier[direction] == literal[int] :
keyword[return] identifier[self] . identifier[value]
|
def find_prekeyed(self, value, key):
"""
Find a value in a node, using a key function. The value is already a
key.
"""
while self is not NULL:
direction = cmp(value, key(self.value))
if direction < 0:
self = self.left # depends on [control=['if'], data=[]]
elif direction > 0:
self = self.right # depends on [control=['if'], data=[]]
elif direction == 0:
return self.value # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['self']]
|
def dump_json(data, indent=None):
"""
:param list | dict data:
:param Optional[int] indent:
:rtype: unicode
"""
return json.dumps(data,
indent=indent,
ensure_ascii=False,
sort_keys=True,
separators=(',', ': '))
|
def function[dump_json, parameter[data, indent]]:
constant[
:param list | dict data:
:param Optional[int] indent:
:rtype: unicode
]
return[call[name[json].dumps, parameter[name[data]]]]
|
keyword[def] identifier[dump_json] ( identifier[data] , identifier[indent] = keyword[None] ):
literal[string]
keyword[return] identifier[json] . identifier[dumps] ( identifier[data] ,
identifier[indent] = identifier[indent] ,
identifier[ensure_ascii] = keyword[False] ,
identifier[sort_keys] = keyword[True] ,
identifier[separators] =( literal[string] , literal[string] ))
|
def dump_json(data, indent=None):
"""
:param list | dict data:
:param Optional[int] indent:
:rtype: unicode
"""
return json.dumps(data, indent=indent, ensure_ascii=False, sort_keys=True, separators=(',', ': '))
|
def _step_to_marker_frame(self, frame_no, dt=None):
'''Update the simulator to a specific frame of marker data.
This method returns a generator of body states for the skeleton! This
generator must be exhausted (e.g., by consuming this call in a for loop)
for the simulator to work properly.
This process involves the following steps:
- Move the markers to their new location:
- Detach from the skeleton
- Update marker locations
- Reattach to the skeleton
- Detect ODE collisions
- Yield the states of the bodies in the skeleton
- Advance the ODE world one step
Parameters
----------
frame_no : int
Step to this frame of marker data.
dt : float, optional
Step with this time duration. Defaults to ``self.dt``.
Returns
-------
states : sequence of state tuples
A generator of a sequence of one body state for the skeleton. This
generator must be exhausted for the simulation to work properly.
'''
# update the positions and velocities of the markers.
self.markers.detach()
self.markers.reposition(frame_no)
self.markers.attach(frame_no)
# detect collisions.
self.ode_space.collide(None, self.on_collision)
# record the state of each skeleton body.
states = self.skeleton.get_body_states()
self.skeleton.set_body_states(states)
# yield the current simulation state to our caller.
yield states
# update the ode world.
self.ode_world.step(dt or self.dt)
# clear out contact joints to prepare for the next frame.
self.ode_contactgroup.empty()
|
def function[_step_to_marker_frame, parameter[self, frame_no, dt]]:
constant[Update the simulator to a specific frame of marker data.
This method returns a generator of body states for the skeleton! This
generator must be exhausted (e.g., by consuming this call in a for loop)
for the simulator to work properly.
This process involves the following steps:
- Move the markers to their new location:
- Detach from the skeleton
- Update marker locations
- Reattach to the skeleton
- Detect ODE collisions
- Yield the states of the bodies in the skeleton
- Advance the ODE world one step
Parameters
----------
frame_no : int
Step to this frame of marker data.
dt : float, optional
Step with this time duration. Defaults to ``self.dt``.
Returns
-------
states : sequence of state tuples
A generator of a sequence of one body state for the skeleton. This
generator must be exhausted for the simulation to work properly.
]
call[name[self].markers.detach, parameter[]]
call[name[self].markers.reposition, parameter[name[frame_no]]]
call[name[self].markers.attach, parameter[name[frame_no]]]
call[name[self].ode_space.collide, parameter[constant[None], name[self].on_collision]]
variable[states] assign[=] call[name[self].skeleton.get_body_states, parameter[]]
call[name[self].skeleton.set_body_states, parameter[name[states]]]
<ast.Yield object at 0x7da1afe0fd00>
call[name[self].ode_world.step, parameter[<ast.BoolOp object at 0x7da1afe0d090>]]
call[name[self].ode_contactgroup.empty, parameter[]]
|
keyword[def] identifier[_step_to_marker_frame] ( identifier[self] , identifier[frame_no] , identifier[dt] = keyword[None] ):
literal[string]
identifier[self] . identifier[markers] . identifier[detach] ()
identifier[self] . identifier[markers] . identifier[reposition] ( identifier[frame_no] )
identifier[self] . identifier[markers] . identifier[attach] ( identifier[frame_no] )
identifier[self] . identifier[ode_space] . identifier[collide] ( keyword[None] , identifier[self] . identifier[on_collision] )
identifier[states] = identifier[self] . identifier[skeleton] . identifier[get_body_states] ()
identifier[self] . identifier[skeleton] . identifier[set_body_states] ( identifier[states] )
keyword[yield] identifier[states]
identifier[self] . identifier[ode_world] . identifier[step] ( identifier[dt] keyword[or] identifier[self] . identifier[dt] )
identifier[self] . identifier[ode_contactgroup] . identifier[empty] ()
|
def _step_to_marker_frame(self, frame_no, dt=None):
"""Update the simulator to a specific frame of marker data.
This method returns a generator of body states for the skeleton! This
generator must be exhausted (e.g., by consuming this call in a for loop)
for the simulator to work properly.
This process involves the following steps:
- Move the markers to their new location:
- Detach from the skeleton
- Update marker locations
- Reattach to the skeleton
- Detect ODE collisions
- Yield the states of the bodies in the skeleton
- Advance the ODE world one step
Parameters
----------
frame_no : int
Step to this frame of marker data.
dt : float, optional
Step with this time duration. Defaults to ``self.dt``.
Returns
-------
states : sequence of state tuples
A generator of a sequence of one body state for the skeleton. This
generator must be exhausted for the simulation to work properly.
"""
# update the positions and velocities of the markers.
self.markers.detach()
self.markers.reposition(frame_no)
self.markers.attach(frame_no)
# detect collisions.
self.ode_space.collide(None, self.on_collision)
# record the state of each skeleton body.
states = self.skeleton.get_body_states()
self.skeleton.set_body_states(states)
# yield the current simulation state to our caller.
yield states
# update the ode world.
self.ode_world.step(dt or self.dt)
# clear out contact joints to prepare for the next frame.
self.ode_contactgroup.empty()
|
def require_ajax_logged_in(func):
"""Check if ajax API is logged in and login if not
"""
@functools.wraps(func)
def inner_func(self, *pargs, **kwargs):
if not self._ajax_api.logged_in:
logger.info('Logging into AJAX API for required meta method')
if not self.has_credentials:
raise ApiLoginFailure(
'Login is required but no credentials were provided')
self._ajax_api.User_Login(name=self._state['username'],
password=self._state['password'])
return func(self, *pargs, **kwargs)
return inner_func
|
def function[require_ajax_logged_in, parameter[func]]:
constant[Check if ajax API is logged in and login if not
]
def function[inner_func, parameter[self]]:
if <ast.UnaryOp object at 0x7da1b0fe5cf0> begin[:]
call[name[logger].info, parameter[constant[Logging into AJAX API for required meta method]]]
if <ast.UnaryOp object at 0x7da1b0fe5ea0> begin[:]
<ast.Raise object at 0x7da1b0fe5db0>
call[name[self]._ajax_api.User_Login, parameter[]]
return[call[name[func], parameter[name[self], <ast.Starred object at 0x7da1b0fe7550>]]]
return[name[inner_func]]
|
keyword[def] identifier[require_ajax_logged_in] ( identifier[func] ):
literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[inner_func] ( identifier[self] ,* identifier[pargs] ,** identifier[kwargs] ):
keyword[if] keyword[not] identifier[self] . identifier[_ajax_api] . identifier[logged_in] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[has_credentials] :
keyword[raise] identifier[ApiLoginFailure] (
literal[string] )
identifier[self] . identifier[_ajax_api] . identifier[User_Login] ( identifier[name] = identifier[self] . identifier[_state] [ literal[string] ],
identifier[password] = identifier[self] . identifier[_state] [ literal[string] ])
keyword[return] identifier[func] ( identifier[self] ,* identifier[pargs] ,** identifier[kwargs] )
keyword[return] identifier[inner_func]
|
def require_ajax_logged_in(func):
"""Check if ajax API is logged in and login if not
"""
@functools.wraps(func)
def inner_func(self, *pargs, **kwargs):
if not self._ajax_api.logged_in:
logger.info('Logging into AJAX API for required meta method')
if not self.has_credentials:
raise ApiLoginFailure('Login is required but no credentials were provided') # depends on [control=['if'], data=[]]
self._ajax_api.User_Login(name=self._state['username'], password=self._state['password']) # depends on [control=['if'], data=[]]
return func(self, *pargs, **kwargs)
return inner_func
|
def output(self, stream, value):
"""SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
"""
if stream not in self.outputs:
raise ValueError("Stream is not an output of this operator.")
e = self.expression(value)
e._stream = stream
return e
|
def function[output, parameter[self, stream, value]]:
constant[SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
]
if compare[name[stream] <ast.NotIn object at 0x7da2590d7190> name[self].outputs] begin[:]
<ast.Raise object at 0x7da20c6ab8b0>
variable[e] assign[=] call[name[self].expression, parameter[name[value]]]
name[e]._stream assign[=] name[stream]
return[name[e]]
|
keyword[def] identifier[output] ( identifier[self] , identifier[stream] , identifier[value] ):
literal[string]
keyword[if] identifier[stream] keyword[not] keyword[in] identifier[self] . identifier[outputs] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[e] = identifier[self] . identifier[expression] ( identifier[value] )
identifier[e] . identifier[_stream] = identifier[stream]
keyword[return] identifier[e]
|
def output(self, stream, value):
"""SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
"""
if stream not in self.outputs:
raise ValueError('Stream is not an output of this operator.') # depends on [control=['if'], data=[]]
e = self.expression(value)
e._stream = stream
return e
|
def get_permalink_ids_iter(self):
'''
Method to get permalink ids from content. To be bound to the class last
thing.
'''
permalink_id_key = self.settings['PERMALINK_ID_METADATA_KEY']
permalink_ids = self.metadata.get(permalink_id_key, '')
for permalink_id in permalink_ids.split(','):
if permalink_id:
yield permalink_id.strip()
|
def function[get_permalink_ids_iter, parameter[self]]:
constant[
Method to get permalink ids from content. To be bound to the class last
thing.
]
variable[permalink_id_key] assign[=] call[name[self].settings][constant[PERMALINK_ID_METADATA_KEY]]
variable[permalink_ids] assign[=] call[name[self].metadata.get, parameter[name[permalink_id_key], constant[]]]
for taget[name[permalink_id]] in starred[call[name[permalink_ids].split, parameter[constant[,]]]] begin[:]
if name[permalink_id] begin[:]
<ast.Yield object at 0x7da1b1d764a0>
|
keyword[def] identifier[get_permalink_ids_iter] ( identifier[self] ):
literal[string]
identifier[permalink_id_key] = identifier[self] . identifier[settings] [ literal[string] ]
identifier[permalink_ids] = identifier[self] . identifier[metadata] . identifier[get] ( identifier[permalink_id_key] , literal[string] )
keyword[for] identifier[permalink_id] keyword[in] identifier[permalink_ids] . identifier[split] ( literal[string] ):
keyword[if] identifier[permalink_id] :
keyword[yield] identifier[permalink_id] . identifier[strip] ()
|
def get_permalink_ids_iter(self):
"""
Method to get permalink ids from content. To be bound to the class last
thing.
"""
permalink_id_key = self.settings['PERMALINK_ID_METADATA_KEY']
permalink_ids = self.metadata.get(permalink_id_key, '')
for permalink_id in permalink_ids.split(','):
if permalink_id:
yield permalink_id.strip() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['permalink_id']]
|
def hdrtxt(xmethod, dmethod, opt):
"""Return ``hdrtxt`` argument for ``.IterStatsConfig`` initialiser.
"""
txt = ['Itn', 'Fnc', 'DFid', u('ℓ1'), 'Cnstr']
if xmethod == 'admm':
txt.extend(['r_X', 's_X', u('ρ_X')])
else:
if opt['CBPDN', 'BackTrack', 'Enabled']:
txt.extend(['F_X', 'Q_X', 'It_X', 'L_X'])
else:
txt.append('L_X')
if dmethod != 'fista':
txt.extend(['r_D', 's_D', u('ρ_D')])
else:
if opt['CCMOD', 'BackTrack', 'Enabled']:
txt.extend(['F_D', 'Q_D', 'It_D', 'L_D'])
else:
txt.append('L_D')
return txt
|
def function[hdrtxt, parameter[xmethod, dmethod, opt]]:
constant[Return ``hdrtxt`` argument for ``.IterStatsConfig`` initialiser.
]
variable[txt] assign[=] list[[<ast.Constant object at 0x7da1b069b070>, <ast.Constant object at 0x7da1b0698c40>, <ast.Constant object at 0x7da1b069a590>, <ast.Call object at 0x7da1b069a710>, <ast.Constant object at 0x7da1b069a6e0>]]
if compare[name[xmethod] equal[==] constant[admm]] begin[:]
call[name[txt].extend, parameter[list[[<ast.Constant object at 0x7da1b06d25c0>, <ast.Constant object at 0x7da1b06d2110>, <ast.Call object at 0x7da1b06d3580>]]]]
if compare[name[dmethod] not_equal[!=] constant[fista]] begin[:]
call[name[txt].extend, parameter[list[[<ast.Constant object at 0x7da1b06d1cf0>, <ast.Constant object at 0x7da1b06d0dc0>, <ast.Call object at 0x7da1b06d3be0>]]]]
return[name[txt]]
|
keyword[def] identifier[hdrtxt] ( identifier[xmethod] , identifier[dmethod] , identifier[opt] ):
literal[string]
identifier[txt] =[ literal[string] , literal[string] , literal[string] , identifier[u] ( literal[string] ), literal[string] ]
keyword[if] identifier[xmethod] == literal[string] :
identifier[txt] . identifier[extend] ([ literal[string] , literal[string] , identifier[u] ( literal[string] )])
keyword[else] :
keyword[if] identifier[opt] [ literal[string] , literal[string] , literal[string] ]:
identifier[txt] . identifier[extend] ([ literal[string] , literal[string] , literal[string] , literal[string] ])
keyword[else] :
identifier[txt] . identifier[append] ( literal[string] )
keyword[if] identifier[dmethod] != literal[string] :
identifier[txt] . identifier[extend] ([ literal[string] , literal[string] , identifier[u] ( literal[string] )])
keyword[else] :
keyword[if] identifier[opt] [ literal[string] , literal[string] , literal[string] ]:
identifier[txt] . identifier[extend] ([ literal[string] , literal[string] , literal[string] , literal[string] ])
keyword[else] :
identifier[txt] . identifier[append] ( literal[string] )
keyword[return] identifier[txt]
|
def hdrtxt(xmethod, dmethod, opt):
"""Return ``hdrtxt`` argument for ``.IterStatsConfig`` initialiser.
"""
txt = ['Itn', 'Fnc', 'DFid', u('ℓ1'), 'Cnstr']
if xmethod == 'admm':
txt.extend(['r_X', 's_X', u('ρ_X')]) # depends on [control=['if'], data=[]]
elif opt['CBPDN', 'BackTrack', 'Enabled']:
txt.extend(['F_X', 'Q_X', 'It_X', 'L_X']) # depends on [control=['if'], data=[]]
else:
txt.append('L_X')
if dmethod != 'fista':
txt.extend(['r_D', 's_D', u('ρ_D')]) # depends on [control=['if'], data=[]]
elif opt['CCMOD', 'BackTrack', 'Enabled']:
txt.extend(['F_D', 'Q_D', 'It_D', 'L_D']) # depends on [control=['if'], data=[]]
else:
txt.append('L_D')
return txt
|
def cidr_notation(ip_address, netmask):
"""
Retrieve the cidr notation given an ip address and netmask.
For example:
cidr_notation('12.34.56.78', '255.255.255.248')
Would return: 12.34.56.72/29
@see http://terminalmage.net/2012/06/10/how-to-find-out-the-cidr-notation-for-a-subne-given-an-ip-and-netmask/
@see http://www.aelius.com/njh/subnet_sheet.html
"""
try:
inet_aton(ip_address)
except:
raise Exception("Invalid ip address '%s'" % ip_address)
try:
inet_aton(netmask)
except:
raise Exception("Invalid netmask '%s'" % netmask)
ip_address_split = ip_address.split('.')
netmask_split = netmask.split('.')
# calculate network start
net_start = [str(int(ip_address_split[x]) & int(netmask_split[x]))
for x in range(0,4)]
return '.'.join(net_start) + '/' + get_net_size(netmask_split)
|
def function[cidr_notation, parameter[ip_address, netmask]]:
constant[
Retrieve the cidr notation given an ip address and netmask.
For example:
cidr_notation('12.34.56.78', '255.255.255.248')
Would return: 12.34.56.72/29
@see http://terminalmage.net/2012/06/10/how-to-find-out-the-cidr-notation-for-a-subne-given-an-ip-and-netmask/
@see http://www.aelius.com/njh/subnet_sheet.html
]
<ast.Try object at 0x7da20c7c9210>
<ast.Try object at 0x7da20c7c89d0>
variable[ip_address_split] assign[=] call[name[ip_address].split, parameter[constant[.]]]
variable[netmask_split] assign[=] call[name[netmask].split, parameter[constant[.]]]
variable[net_start] assign[=] <ast.ListComp object at 0x7da20c7c91e0>
return[binary_operation[binary_operation[call[constant[.].join, parameter[name[net_start]]] + constant[/]] + call[name[get_net_size], parameter[name[netmask_split]]]]]
|
keyword[def] identifier[cidr_notation] ( identifier[ip_address] , identifier[netmask] ):
literal[string]
keyword[try] :
identifier[inet_aton] ( identifier[ip_address] )
keyword[except] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[ip_address] )
keyword[try] :
identifier[inet_aton] ( identifier[netmask] )
keyword[except] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[netmask] )
identifier[ip_address_split] = identifier[ip_address] . identifier[split] ( literal[string] )
identifier[netmask_split] = identifier[netmask] . identifier[split] ( literal[string] )
identifier[net_start] =[ identifier[str] ( identifier[int] ( identifier[ip_address_split] [ identifier[x] ])& identifier[int] ( identifier[netmask_split] [ identifier[x] ]))
keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , literal[int] )]
keyword[return] literal[string] . identifier[join] ( identifier[net_start] )+ literal[string] + identifier[get_net_size] ( identifier[netmask_split] )
|
def cidr_notation(ip_address, netmask):
"""
Retrieve the cidr notation given an ip address and netmask.
For example:
cidr_notation('12.34.56.78', '255.255.255.248')
Would return: 12.34.56.72/29
@see http://terminalmage.net/2012/06/10/how-to-find-out-the-cidr-notation-for-a-subne-given-an-ip-and-netmask/
@see http://www.aelius.com/njh/subnet_sheet.html
"""
try:
inet_aton(ip_address) # depends on [control=['try'], data=[]]
except:
raise Exception("Invalid ip address '%s'" % ip_address) # depends on [control=['except'], data=[]]
try:
inet_aton(netmask) # depends on [control=['try'], data=[]]
except:
raise Exception("Invalid netmask '%s'" % netmask) # depends on [control=['except'], data=[]]
ip_address_split = ip_address.split('.')
netmask_split = netmask.split('.')
# calculate network start
net_start = [str(int(ip_address_split[x]) & int(netmask_split[x])) for x in range(0, 4)]
return '.'.join(net_start) + '/' + get_net_size(netmask_split)
|
async def delete(self):
"""Deletes a scene from a shade"""
_val = await self.request.delete(
self._base_path,
params={
ATTR_SCENE_ID: self._raw_data.get(ATTR_SCENE_ID),
ATTR_SHADE_ID: self._raw_data.get(ATTR_SHADE_ID),
},
)
return _val
|
<ast.AsyncFunctionDef object at 0x7da1b2345de0>
|
keyword[async] keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
identifier[_val] = keyword[await] identifier[self] . identifier[request] . identifier[delete] (
identifier[self] . identifier[_base_path] ,
identifier[params] ={
identifier[ATTR_SCENE_ID] : identifier[self] . identifier[_raw_data] . identifier[get] ( identifier[ATTR_SCENE_ID] ),
identifier[ATTR_SHADE_ID] : identifier[self] . identifier[_raw_data] . identifier[get] ( identifier[ATTR_SHADE_ID] ),
},
)
keyword[return] identifier[_val]
|
async def delete(self):
"""Deletes a scene from a shade"""
_val = await self.request.delete(self._base_path, params={ATTR_SCENE_ID: self._raw_data.get(ATTR_SCENE_ID), ATTR_SHADE_ID: self._raw_data.get(ATTR_SHADE_ID)})
return _val
|
def _endProgramsNode(self, name, content):
"""Process the end of a node under xtvd/programs"""
if name == 'series':
self._series = content
elif name == 'title':
self._title = content
elif name == 'subtitle':
self._subtitle = content
elif name == 'description':
self._description = content
elif name == 'mpaaRating':
self._mpaaRating = content
elif name == 'starRating':
self._starRating = content
elif name == 'runTime':
self._runTime = self._parseDuration(content)
elif name == 'year':
self._year = content
elif name == 'showType':
self._showType = content
elif name == 'colorCode':
self._colorCode = content
elif name == 'originalAirDate':
self._originalAirDate = self._parseDate(content)
elif name == 'syndicatedEpisodeNumber':
self._syndicatedEpisodeNumber = content
elif name == 'advisory':
self._advisories.append(content)
elif name == 'program':
if not self._error:
self._importer.new_program(self._programId, self._series,
self._title, self._subtitle,
self._description, self._mpaaRating,
self._starRating, self._runTime,
self._year, self._showType,
self._colorCode,
self._originalAirDate,
self._syndicatedEpisodeNumber,
self._advisories)
|
def function[_endProgramsNode, parameter[self, name, content]]:
constant[Process the end of a node under xtvd/programs]
if compare[name[name] equal[==] constant[series]] begin[:]
name[self]._series assign[=] name[content]
|
keyword[def] identifier[_endProgramsNode] ( identifier[self] , identifier[name] , identifier[content] ):
literal[string]
keyword[if] identifier[name] == literal[string] :
identifier[self] . identifier[_series] = identifier[content]
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_title] = identifier[content]
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_subtitle] = identifier[content]
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_description] = identifier[content]
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_mpaaRating] = identifier[content]
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_starRating] = identifier[content]
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_runTime] = identifier[self] . identifier[_parseDuration] ( identifier[content] )
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_year] = identifier[content]
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_showType] = identifier[content]
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_colorCode] = identifier[content]
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_originalAirDate] = identifier[self] . identifier[_parseDate] ( identifier[content] )
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_syndicatedEpisodeNumber] = identifier[content]
keyword[elif] identifier[name] == literal[string] :
identifier[self] . identifier[_advisories] . identifier[append] ( identifier[content] )
keyword[elif] identifier[name] == literal[string] :
keyword[if] keyword[not] identifier[self] . identifier[_error] :
identifier[self] . identifier[_importer] . identifier[new_program] ( identifier[self] . identifier[_programId] , identifier[self] . identifier[_series] ,
identifier[self] . identifier[_title] , identifier[self] . identifier[_subtitle] ,
identifier[self] . identifier[_description] , identifier[self] . identifier[_mpaaRating] ,
identifier[self] . identifier[_starRating] , identifier[self] . identifier[_runTime] ,
identifier[self] . identifier[_year] , identifier[self] . identifier[_showType] ,
identifier[self] . identifier[_colorCode] ,
identifier[self] . identifier[_originalAirDate] ,
identifier[self] . identifier[_syndicatedEpisodeNumber] ,
identifier[self] . identifier[_advisories] )
|
def _endProgramsNode(self, name, content):
"""Process the end of a node under xtvd/programs"""
if name == 'series':
self._series = content # depends on [control=['if'], data=[]]
elif name == 'title':
self._title = content # depends on [control=['if'], data=[]]
elif name == 'subtitle':
self._subtitle = content # depends on [control=['if'], data=[]]
elif name == 'description':
self._description = content # depends on [control=['if'], data=[]]
elif name == 'mpaaRating':
self._mpaaRating = content # depends on [control=['if'], data=[]]
elif name == 'starRating':
self._starRating = content # depends on [control=['if'], data=[]]
elif name == 'runTime':
self._runTime = self._parseDuration(content) # depends on [control=['if'], data=[]]
elif name == 'year':
self._year = content # depends on [control=['if'], data=[]]
elif name == 'showType':
self._showType = content # depends on [control=['if'], data=[]]
elif name == 'colorCode':
self._colorCode = content # depends on [control=['if'], data=[]]
elif name == 'originalAirDate':
self._originalAirDate = self._parseDate(content) # depends on [control=['if'], data=[]]
elif name == 'syndicatedEpisodeNumber':
self._syndicatedEpisodeNumber = content # depends on [control=['if'], data=[]]
elif name == 'advisory':
self._advisories.append(content) # depends on [control=['if'], data=[]]
elif name == 'program':
if not self._error:
self._importer.new_program(self._programId, self._series, self._title, self._subtitle, self._description, self._mpaaRating, self._starRating, self._runTime, self._year, self._showType, self._colorCode, self._originalAirDate, self._syndicatedEpisodeNumber, self._advisories) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
|
def scatter(self, x, y, xerr=[], yerr=[], mark='o', markstyle=None):
"""Plot a series of points.
Plot a series of points (marks) that are not connected by a
line. Shortcut for plot with linestyle=None.
:param x: array containing x-values.
:param y: array containing y-values.
:param xerr: array containing errors on the x-values.
:param yerr: array containing errors on the y-values.
:param mark: the symbol used to mark the data points. May be
any plot mark accepted by TikZ (e.g. ``*, x, +, o, square,
triangle``).
:param markstyle: the style of the plot marks (e.g. 'mark
size=.75pt')
Example::
>>> plot = artist.Plot()
>>> x = np.random.normal(size=20)
>>> y = np.random.normal(size=20)
>>> plot.scatter(x, y, mark='*')
"""
self.plot(x, y, xerr=xerr, yerr=yerr, mark=mark, linestyle=None,
markstyle=markstyle)
|
def function[scatter, parameter[self, x, y, xerr, yerr, mark, markstyle]]:
constant[Plot a series of points.
Plot a series of points (marks) that are not connected by a
line. Shortcut for plot with linestyle=None.
:param x: array containing x-values.
:param y: array containing y-values.
:param xerr: array containing errors on the x-values.
:param yerr: array containing errors on the y-values.
:param mark: the symbol used to mark the data points. May be
any plot mark accepted by TikZ (e.g. ``*, x, +, o, square,
triangle``).
:param markstyle: the style of the plot marks (e.g. 'mark
size=.75pt')
Example::
>>> plot = artist.Plot()
>>> x = np.random.normal(size=20)
>>> y = np.random.normal(size=20)
>>> plot.scatter(x, y, mark='*')
]
call[name[self].plot, parameter[name[x], name[y]]]
|
keyword[def] identifier[scatter] ( identifier[self] , identifier[x] , identifier[y] , identifier[xerr] =[], identifier[yerr] =[], identifier[mark] = literal[string] , identifier[markstyle] = keyword[None] ):
literal[string]
identifier[self] . identifier[plot] ( identifier[x] , identifier[y] , identifier[xerr] = identifier[xerr] , identifier[yerr] = identifier[yerr] , identifier[mark] = identifier[mark] , identifier[linestyle] = keyword[None] ,
identifier[markstyle] = identifier[markstyle] )
|
def scatter(self, x, y, xerr=[], yerr=[], mark='o', markstyle=None):
"""Plot a series of points.
Plot a series of points (marks) that are not connected by a
line. Shortcut for plot with linestyle=None.
:param x: array containing x-values.
:param y: array containing y-values.
:param xerr: array containing errors on the x-values.
:param yerr: array containing errors on the y-values.
:param mark: the symbol used to mark the data points. May be
any plot mark accepted by TikZ (e.g. ``*, x, +, o, square,
triangle``).
:param markstyle: the style of the plot marks (e.g. 'mark
size=.75pt')
Example::
>>> plot = artist.Plot()
>>> x = np.random.normal(size=20)
>>> y = np.random.normal(size=20)
>>> plot.scatter(x, y, mark='*')
"""
self.plot(x, y, xerr=xerr, yerr=yerr, mark=mark, linestyle=None, markstyle=markstyle)
|
def get_json_object_lines(annotation: ResourceAnnotation,
properties: Dict[str, Any], field: str,
url_params: Dict, request: bool = False,
object_property: bool = False) -> List[str]:
"""Generate documentation for the given object annotation.
:param doctor.resource.ResourceAnnotation annotation:
Annotation object for the associated handler method.
:param str field: Sphinx field type to use (e.g. '<json').
:param list url_params: A list of url parameter strings.
:param bool request: Whether the schema is for the request or not.
:param bool object_property: If True it indicates this is a property of
an object that we are documenting. This is only set to True when
called recursively when encountering a property that is an object in
order to document the properties of it.
:returns: list of strings, one for each line.
"""
sig_params = annotation.logic._doctor_signature.parameters
required_lines = []
lines = []
default_field = field
for prop in sorted(properties.keys()):
annotated_type = properties[prop]
# If the property is a url parameter override the field to use
# param so that it's not documented in the json body or query params.
field = default_field
if request and prop in url_params:
field = 'param'
types = get_json_types(annotated_type)
description = annotated_type.description
obj_ref = ''
if issubclass(annotated_type, Object):
obj_ref = get_object_reference(annotated_type)
elif (issubclass(annotated_type, Array) and
annotated_type.items is not None and
not isinstance(annotated_type.items, list) and
issubclass(annotated_type.items, Object)):
# This means the type is an array of objects, so we want to
# collect the object as a resource we can document later.
obj_ref = get_object_reference(annotated_type.items)
elif (issubclass(annotated_type, Array) and
isinstance(annotated_type.items, list)):
# This means the type is array and items is a list of types. Iterate
# through each type to see if any are objects that we can document.
for item in annotated_type.items:
if issubclass(item, Object):
# Note: we are just adding them to the global variable
# ALL_RESOURCES when calling the function below and not
# using the return value as this special case is handled
# below in documenting items of an array.
get_object_reference(item)
# Document any enum.
enum = ''
if issubclass(annotated_type, Enum):
enum = ' Must be one of: `{}`'.format(annotated_type.enum)
if annotated_type.case_insensitive:
enum += ' (case-insensitive)'
enum += '.'
# Document type(s) for an array's items.
if (issubclass(annotated_type, Array) and
annotated_type.items is not None):
array_description = get_array_items_description(annotated_type)
# Prevents creating a duplicate object reference link in the docs.
if obj_ref in array_description:
obj_ref = ''
description += array_description
# Document any default value.
default = ''
if (request and prop in sig_params and
sig_params[prop].default != Signature.empty):
default = ' (Defaults to `{}`) '.format(sig_params[prop].default)
field_prop = prop
# If this is a request param and the property is required
# add required text and append lines to required_lines. This
# will make the required properties appear in alphabetical order
# before the optional.
line_template = (
':{field} {types} {prop}: {description}{enum}{default}{obj_ref}')
if request and prop in annotation.params.required:
description = '**Required**. ' + description
required_lines.append(line_template.format(
field=field, types=','.join(types), prop=field_prop,
description=description, enum=enum, obj_ref=obj_ref,
default=default))
else:
lines.append(line_template.format(
field=field, types=','.join(types), prop=field_prop,
description=description, enum=enum, obj_ref=obj_ref,
default=default))
return required_lines + lines
|
def function[get_json_object_lines, parameter[annotation, properties, field, url_params, request, object_property]]:
constant[Generate documentation for the given object annotation.
:param doctor.resource.ResourceAnnotation annotation:
Annotation object for the associated handler method.
:param str field: Sphinx field type to use (e.g. '<json').
:param list url_params: A list of url parameter strings.
:param bool request: Whether the schema is for the request or not.
:param bool object_property: If True it indicates this is a property of
an object that we are documenting. This is only set to True when
called recursively when encountering a property that is an object in
order to document the properties of it.
:returns: list of strings, one for each line.
]
variable[sig_params] assign[=] name[annotation].logic._doctor_signature.parameters
variable[required_lines] assign[=] list[[]]
variable[lines] assign[=] list[[]]
variable[default_field] assign[=] name[field]
for taget[name[prop]] in starred[call[name[sorted], parameter[call[name[properties].keys, parameter[]]]]] begin[:]
variable[annotated_type] assign[=] call[name[properties]][name[prop]]
variable[field] assign[=] name[default_field]
if <ast.BoolOp object at 0x7da20c7cae60> begin[:]
variable[field] assign[=] constant[param]
variable[types] assign[=] call[name[get_json_types], parameter[name[annotated_type]]]
variable[description] assign[=] name[annotated_type].description
variable[obj_ref] assign[=] constant[]
if call[name[issubclass], parameter[name[annotated_type], name[Object]]] begin[:]
variable[obj_ref] assign[=] call[name[get_object_reference], parameter[name[annotated_type]]]
variable[enum] assign[=] constant[]
if call[name[issubclass], parameter[name[annotated_type], name[Enum]]] begin[:]
variable[enum] assign[=] call[constant[ Must be one of: `{}`].format, parameter[name[annotated_type].enum]]
if name[annotated_type].case_insensitive begin[:]
<ast.AugAssign object at 0x7da20c7c8190>
<ast.AugAssign object at 0x7da20c7cb1c0>
if <ast.BoolOp object at 0x7da20c7cb3a0> begin[:]
variable[array_description] assign[=] call[name[get_array_items_description], parameter[name[annotated_type]]]
if compare[name[obj_ref] in name[array_description]] begin[:]
variable[obj_ref] assign[=] constant[]
<ast.AugAssign object at 0x7da20c7cbb80>
variable[default] assign[=] constant[]
if <ast.BoolOp object at 0x7da20c7cb4f0> begin[:]
variable[default] assign[=] call[constant[ (Defaults to `{}`) ].format, parameter[call[name[sig_params]][name[prop]].default]]
variable[field_prop] assign[=] name[prop]
variable[line_template] assign[=] constant[:{field} {types} {prop}: {description}{enum}{default}{obj_ref}]
if <ast.BoolOp object at 0x7da20c7c92d0> begin[:]
variable[description] assign[=] binary_operation[constant[**Required**. ] + name[description]]
call[name[required_lines].append, parameter[call[name[line_template].format, parameter[]]]]
return[binary_operation[name[required_lines] + name[lines]]]
|
keyword[def] identifier[get_json_object_lines] ( identifier[annotation] : identifier[ResourceAnnotation] ,
identifier[properties] : identifier[Dict] [ identifier[str] , identifier[Any] ], identifier[field] : identifier[str] ,
identifier[url_params] : identifier[Dict] , identifier[request] : identifier[bool] = keyword[False] ,
identifier[object_property] : identifier[bool] = keyword[False] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[sig_params] = identifier[annotation] . identifier[logic] . identifier[_doctor_signature] . identifier[parameters]
identifier[required_lines] =[]
identifier[lines] =[]
identifier[default_field] = identifier[field]
keyword[for] identifier[prop] keyword[in] identifier[sorted] ( identifier[properties] . identifier[keys] ()):
identifier[annotated_type] = identifier[properties] [ identifier[prop] ]
identifier[field] = identifier[default_field]
keyword[if] identifier[request] keyword[and] identifier[prop] keyword[in] identifier[url_params] :
identifier[field] = literal[string]
identifier[types] = identifier[get_json_types] ( identifier[annotated_type] )
identifier[description] = identifier[annotated_type] . identifier[description]
identifier[obj_ref] = literal[string]
keyword[if] identifier[issubclass] ( identifier[annotated_type] , identifier[Object] ):
identifier[obj_ref] = identifier[get_object_reference] ( identifier[annotated_type] )
keyword[elif] ( identifier[issubclass] ( identifier[annotated_type] , identifier[Array] ) keyword[and]
identifier[annotated_type] . identifier[items] keyword[is] keyword[not] keyword[None] keyword[and]
keyword[not] identifier[isinstance] ( identifier[annotated_type] . identifier[items] , identifier[list] ) keyword[and]
identifier[issubclass] ( identifier[annotated_type] . identifier[items] , identifier[Object] )):
identifier[obj_ref] = identifier[get_object_reference] ( identifier[annotated_type] . identifier[items] )
keyword[elif] ( identifier[issubclass] ( identifier[annotated_type] , identifier[Array] ) keyword[and]
identifier[isinstance] ( identifier[annotated_type] . identifier[items] , identifier[list] )):
keyword[for] identifier[item] keyword[in] identifier[annotated_type] . identifier[items] :
keyword[if] identifier[issubclass] ( identifier[item] , identifier[Object] ):
identifier[get_object_reference] ( identifier[item] )
identifier[enum] = literal[string]
keyword[if] identifier[issubclass] ( identifier[annotated_type] , identifier[Enum] ):
identifier[enum] = literal[string] . identifier[format] ( identifier[annotated_type] . identifier[enum] )
keyword[if] identifier[annotated_type] . identifier[case_insensitive] :
identifier[enum] += literal[string]
identifier[enum] += literal[string]
keyword[if] ( identifier[issubclass] ( identifier[annotated_type] , identifier[Array] ) keyword[and]
identifier[annotated_type] . identifier[items] keyword[is] keyword[not] keyword[None] ):
identifier[array_description] = identifier[get_array_items_description] ( identifier[annotated_type] )
keyword[if] identifier[obj_ref] keyword[in] identifier[array_description] :
identifier[obj_ref] = literal[string]
identifier[description] += identifier[array_description]
identifier[default] = literal[string]
keyword[if] ( identifier[request] keyword[and] identifier[prop] keyword[in] identifier[sig_params] keyword[and]
identifier[sig_params] [ identifier[prop] ]. identifier[default] != identifier[Signature] . identifier[empty] ):
identifier[default] = literal[string] . identifier[format] ( identifier[sig_params] [ identifier[prop] ]. identifier[default] )
identifier[field_prop] = identifier[prop]
identifier[line_template] =(
literal[string] )
keyword[if] identifier[request] keyword[and] identifier[prop] keyword[in] identifier[annotation] . identifier[params] . identifier[required] :
identifier[description] = literal[string] + identifier[description]
identifier[required_lines] . identifier[append] ( identifier[line_template] . identifier[format] (
identifier[field] = identifier[field] , identifier[types] = literal[string] . identifier[join] ( identifier[types] ), identifier[prop] = identifier[field_prop] ,
identifier[description] = identifier[description] , identifier[enum] = identifier[enum] , identifier[obj_ref] = identifier[obj_ref] ,
identifier[default] = identifier[default] ))
keyword[else] :
identifier[lines] . identifier[append] ( identifier[line_template] . identifier[format] (
identifier[field] = identifier[field] , identifier[types] = literal[string] . identifier[join] ( identifier[types] ), identifier[prop] = identifier[field_prop] ,
identifier[description] = identifier[description] , identifier[enum] = identifier[enum] , identifier[obj_ref] = identifier[obj_ref] ,
identifier[default] = identifier[default] ))
keyword[return] identifier[required_lines] + identifier[lines]
|
def get_json_object_lines(annotation: ResourceAnnotation, properties: Dict[str, Any], field: str, url_params: Dict, request: bool=False, object_property: bool=False) -> List[str]:
"""Generate documentation for the given object annotation.
:param doctor.resource.ResourceAnnotation annotation:
Annotation object for the associated handler method.
:param str field: Sphinx field type to use (e.g. '<json').
:param list url_params: A list of url parameter strings.
:param bool request: Whether the schema is for the request or not.
:param bool object_property: If True it indicates this is a property of
an object that we are documenting. This is only set to True when
called recursively when encountering a property that is an object in
order to document the properties of it.
:returns: list of strings, one for each line.
"""
sig_params = annotation.logic._doctor_signature.parameters
required_lines = []
lines = []
default_field = field
for prop in sorted(properties.keys()):
annotated_type = properties[prop]
# If the property is a url parameter override the field to use
# param so that it's not documented in the json body or query params.
field = default_field
if request and prop in url_params:
field = 'param' # depends on [control=['if'], data=[]]
types = get_json_types(annotated_type)
description = annotated_type.description
obj_ref = ''
if issubclass(annotated_type, Object):
obj_ref = get_object_reference(annotated_type) # depends on [control=['if'], data=[]]
elif issubclass(annotated_type, Array) and annotated_type.items is not None and (not isinstance(annotated_type.items, list)) and issubclass(annotated_type.items, Object):
# This means the type is an array of objects, so we want to
# collect the object as a resource we can document later.
obj_ref = get_object_reference(annotated_type.items) # depends on [control=['if'], data=[]]
elif issubclass(annotated_type, Array) and isinstance(annotated_type.items, list):
# This means the type is array and items is a list of types. Iterate
# through each type to see if any are objects that we can document.
for item in annotated_type.items:
if issubclass(item, Object):
# Note: we are just adding them to the global variable
# ALL_RESOURCES when calling the function below and not
# using the return value as this special case is handled
# below in documenting items of an array.
get_object_reference(item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
# Document any enum.
enum = ''
if issubclass(annotated_type, Enum):
enum = ' Must be one of: `{}`'.format(annotated_type.enum)
if annotated_type.case_insensitive:
enum += ' (case-insensitive)' # depends on [control=['if'], data=[]]
enum += '.' # depends on [control=['if'], data=[]]
# Document type(s) for an array's items.
if issubclass(annotated_type, Array) and annotated_type.items is not None:
array_description = get_array_items_description(annotated_type)
# Prevents creating a duplicate object reference link in the docs.
if obj_ref in array_description:
obj_ref = '' # depends on [control=['if'], data=['obj_ref']]
description += array_description # depends on [control=['if'], data=[]]
# Document any default value.
default = ''
if request and prop in sig_params and (sig_params[prop].default != Signature.empty):
default = ' (Defaults to `{}`) '.format(sig_params[prop].default) # depends on [control=['if'], data=[]]
field_prop = prop
# If this is a request param and the property is required
# add required text and append lines to required_lines. This
# will make the required properties appear in alphabetical order
# before the optional.
line_template = ':{field} {types} {prop}: {description}{enum}{default}{obj_ref}'
if request and prop in annotation.params.required:
description = '**Required**. ' + description
required_lines.append(line_template.format(field=field, types=','.join(types), prop=field_prop, description=description, enum=enum, obj_ref=obj_ref, default=default)) # depends on [control=['if'], data=[]]
else:
lines.append(line_template.format(field=field, types=','.join(types), prop=field_prop, description=description, enum=enum, obj_ref=obj_ref, default=default)) # depends on [control=['for'], data=['prop']]
return required_lines + lines
|
def free(self, local_path):
'''
Stop synchronization of local_path
'''
# Process local ~~~
# 1. Syncthing config
config = self.get_config()
# Check whether folders are still connected to this device
folder = st_util.find_folder_with_path(local_path, config)
self.delete_folder(local_path, config)
pruned = st_util.prune_devices(folder, config)
# Done processing st config, commit :)
self.set_config(config)
if pruned:
self.restart()
# 2. App config
dir_config = self.adapter.get_dir_config(local_path)
if not dir_config:
raise custom_errors.FileNotInConfig(local_path)
kodrive_config = self.adapter.get_config()
for key in kodrive_config['directories']:
d = kodrive_config['directories'][key]
if d['local_path'].rstrip('/') == local_path.rstrip('/'):
del kodrive_config['directories'][key]
break
# Done process app config, commit :)
self.adapter.set_config(kodrive_config)
# If the folder was shared, try remove data from remote
if dir_config['is_shared'] and dir_config['server']:
# Process remote ~~~
r_api_key = dir_config['api_key']
r_device_id = dir_config['device_id']
if dir_config['host']:
host = dir_config['host']
else:
host = self.devid_to_ip(r_device_id, False)
try:
# Create remote proxy to interact with remote
remote = SyncthingProxy(
r_device_id, host, r_api_key,
port=dir_config['port'] if 'port' in dir_config else None
)
except Exception as e:
return True
r_config = remote.get_config()
r_folder = st_util.find_folder_with_path(
dir_config['remote_path'], r_config
)
# Delete device id from folder
r_config = remote.get_config()
self_devid = self.get_device_id()
del_device = remote.delete_device_from_folder(
dir_config['remote_path'], self_devid, r_config
)
# Check to see if no other folder depends has this device
pruned = st_util.prune_devices(r_folder, r_config)
remote.set_config(r_config)
if pruned:
remote.restart()
return True
|
def function[free, parameter[self, local_path]]:
constant[
Stop synchronization of local_path
]
variable[config] assign[=] call[name[self].get_config, parameter[]]
variable[folder] assign[=] call[name[st_util].find_folder_with_path, parameter[name[local_path], name[config]]]
call[name[self].delete_folder, parameter[name[local_path], name[config]]]
variable[pruned] assign[=] call[name[st_util].prune_devices, parameter[name[folder], name[config]]]
call[name[self].set_config, parameter[name[config]]]
if name[pruned] begin[:]
call[name[self].restart, parameter[]]
variable[dir_config] assign[=] call[name[self].adapter.get_dir_config, parameter[name[local_path]]]
if <ast.UnaryOp object at 0x7da18f09e8f0> begin[:]
<ast.Raise object at 0x7da18f09f730>
variable[kodrive_config] assign[=] call[name[self].adapter.get_config, parameter[]]
for taget[name[key]] in starred[call[name[kodrive_config]][constant[directories]]] begin[:]
variable[d] assign[=] call[call[name[kodrive_config]][constant[directories]]][name[key]]
if compare[call[call[name[d]][constant[local_path]].rstrip, parameter[constant[/]]] equal[==] call[name[local_path].rstrip, parameter[constant[/]]]] begin[:]
<ast.Delete object at 0x7da18f09d450>
break
call[name[self].adapter.set_config, parameter[name[kodrive_config]]]
if <ast.BoolOp object at 0x7da18f09f340> begin[:]
variable[r_api_key] assign[=] call[name[dir_config]][constant[api_key]]
variable[r_device_id] assign[=] call[name[dir_config]][constant[device_id]]
if call[name[dir_config]][constant[host]] begin[:]
variable[host] assign[=] call[name[dir_config]][constant[host]]
<ast.Try object at 0x7da18f09c910>
variable[r_config] assign[=] call[name[remote].get_config, parameter[]]
variable[r_folder] assign[=] call[name[st_util].find_folder_with_path, parameter[call[name[dir_config]][constant[remote_path]], name[r_config]]]
variable[r_config] assign[=] call[name[remote].get_config, parameter[]]
variable[self_devid] assign[=] call[name[self].get_device_id, parameter[]]
variable[del_device] assign[=] call[name[remote].delete_device_from_folder, parameter[call[name[dir_config]][constant[remote_path]], name[self_devid], name[r_config]]]
variable[pruned] assign[=] call[name[st_util].prune_devices, parameter[name[r_folder], name[r_config]]]
call[name[remote].set_config, parameter[name[r_config]]]
if name[pruned] begin[:]
call[name[remote].restart, parameter[]]
return[constant[True]]
|
keyword[def] identifier[free] ( identifier[self] , identifier[local_path] ):
literal[string]
identifier[config] = identifier[self] . identifier[get_config] ()
identifier[folder] = identifier[st_util] . identifier[find_folder_with_path] ( identifier[local_path] , identifier[config] )
identifier[self] . identifier[delete_folder] ( identifier[local_path] , identifier[config] )
identifier[pruned] = identifier[st_util] . identifier[prune_devices] ( identifier[folder] , identifier[config] )
identifier[self] . identifier[set_config] ( identifier[config] )
keyword[if] identifier[pruned] :
identifier[self] . identifier[restart] ()
identifier[dir_config] = identifier[self] . identifier[adapter] . identifier[get_dir_config] ( identifier[local_path] )
keyword[if] keyword[not] identifier[dir_config] :
keyword[raise] identifier[custom_errors] . identifier[FileNotInConfig] ( identifier[local_path] )
identifier[kodrive_config] = identifier[self] . identifier[adapter] . identifier[get_config] ()
keyword[for] identifier[key] keyword[in] identifier[kodrive_config] [ literal[string] ]:
identifier[d] = identifier[kodrive_config] [ literal[string] ][ identifier[key] ]
keyword[if] identifier[d] [ literal[string] ]. identifier[rstrip] ( literal[string] )== identifier[local_path] . identifier[rstrip] ( literal[string] ):
keyword[del] identifier[kodrive_config] [ literal[string] ][ identifier[key] ]
keyword[break]
identifier[self] . identifier[adapter] . identifier[set_config] ( identifier[kodrive_config] )
keyword[if] identifier[dir_config] [ literal[string] ] keyword[and] identifier[dir_config] [ literal[string] ]:
identifier[r_api_key] = identifier[dir_config] [ literal[string] ]
identifier[r_device_id] = identifier[dir_config] [ literal[string] ]
keyword[if] identifier[dir_config] [ literal[string] ]:
identifier[host] = identifier[dir_config] [ literal[string] ]
keyword[else] :
identifier[host] = identifier[self] . identifier[devid_to_ip] ( identifier[r_device_id] , keyword[False] )
keyword[try] :
identifier[remote] = identifier[SyncthingProxy] (
identifier[r_device_id] , identifier[host] , identifier[r_api_key] ,
identifier[port] = identifier[dir_config] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[dir_config] keyword[else] keyword[None]
)
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] keyword[True]
identifier[r_config] = identifier[remote] . identifier[get_config] ()
identifier[r_folder] = identifier[st_util] . identifier[find_folder_with_path] (
identifier[dir_config] [ literal[string] ], identifier[r_config]
)
identifier[r_config] = identifier[remote] . identifier[get_config] ()
identifier[self_devid] = identifier[self] . identifier[get_device_id] ()
identifier[del_device] = identifier[remote] . identifier[delete_device_from_folder] (
identifier[dir_config] [ literal[string] ], identifier[self_devid] , identifier[r_config]
)
identifier[pruned] = identifier[st_util] . identifier[prune_devices] ( identifier[r_folder] , identifier[r_config] )
identifier[remote] . identifier[set_config] ( identifier[r_config] )
keyword[if] identifier[pruned] :
identifier[remote] . identifier[restart] ()
keyword[return] keyword[True]
|
def free(self, local_path):
"""
Stop synchronization of local_path
"""
# Process local ~~~
# 1. Syncthing config
config = self.get_config() # Check whether folders are still connected to this device
folder = st_util.find_folder_with_path(local_path, config)
self.delete_folder(local_path, config)
pruned = st_util.prune_devices(folder, config)
# Done processing st config, commit :)
self.set_config(config)
if pruned:
self.restart() # depends on [control=['if'], data=[]]
# 2. App config
dir_config = self.adapter.get_dir_config(local_path)
if not dir_config:
raise custom_errors.FileNotInConfig(local_path) # depends on [control=['if'], data=[]]
kodrive_config = self.adapter.get_config()
for key in kodrive_config['directories']:
d = kodrive_config['directories'][key]
if d['local_path'].rstrip('/') == local_path.rstrip('/'):
del kodrive_config['directories'][key]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
# Done process app config, commit :)
self.adapter.set_config(kodrive_config) # If the folder was shared, try remove data from remote
if dir_config['is_shared'] and dir_config['server']:
# Process remote ~~~
r_api_key = dir_config['api_key']
r_device_id = dir_config['device_id']
if dir_config['host']:
host = dir_config['host'] # depends on [control=['if'], data=[]]
else:
host = self.devid_to_ip(r_device_id, False)
try:
# Create remote proxy to interact with remote
remote = SyncthingProxy(r_device_id, host, r_api_key, port=dir_config['port'] if 'port' in dir_config else None) # depends on [control=['try'], data=[]]
except Exception as e:
return True # depends on [control=['except'], data=[]]
r_config = remote.get_config()
r_folder = st_util.find_folder_with_path(dir_config['remote_path'], r_config)
# Delete device id from folder
r_config = remote.get_config()
self_devid = self.get_device_id()
del_device = remote.delete_device_from_folder(dir_config['remote_path'], self_devid, r_config)
# Check to see if no other folder depends has this device
pruned = st_util.prune_devices(r_folder, r_config)
remote.set_config(r_config)
if pruned:
remote.restart() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True
|
def vrf_add(self, route_dist, import_rts, export_rts, site_of_origins=None,
route_family=RF_VPN_V4, multi_exit_disc=None):
""" This method adds a new vrf used for VPN.
``route_dist`` specifies a route distinguisher value.
``import_rts`` specifies a list of route targets to be imported.
``export_rts`` specifies a list of route targets to be exported.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 (default) = 'ipv4'
- RF_VPN_V6 = 'ipv6'
- RF_L2_EVPN = 'evpn'
- RF_VPNV4_FLOWSPEC = 'ipv4fs'
- RF_VPNV6_FLOWSPEC = 'ipv6fs'
- RF_L2VPN_FLOWSPEC = 'l2vpnfs'
``multi_exit_disc`` specifies multi exit discriminator (MED) value.
It must be an integer.
"""
if route_family not in SUPPORTED_VRF_RF:
raise ValueError('Unsupported route_family: %s' % route_family)
vrf = {
vrfs.ROUTE_DISTINGUISHER: route_dist,
vrfs.IMPORT_RTS: import_rts,
vrfs.EXPORT_RTS: export_rts,
vrfs.SITE_OF_ORIGINS: site_of_origins,
vrfs.VRF_RF: route_family,
vrfs.MULTI_EXIT_DISC: multi_exit_disc,
}
call('vrf.create', **vrf)
|
def function[vrf_add, parameter[self, route_dist, import_rts, export_rts, site_of_origins, route_family, multi_exit_disc]]:
constant[ This method adds a new vrf used for VPN.
``route_dist`` specifies a route distinguisher value.
``import_rts`` specifies a list of route targets to be imported.
``export_rts`` specifies a list of route targets to be exported.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 (default) = 'ipv4'
- RF_VPN_V6 = 'ipv6'
- RF_L2_EVPN = 'evpn'
- RF_VPNV4_FLOWSPEC = 'ipv4fs'
- RF_VPNV6_FLOWSPEC = 'ipv6fs'
- RF_L2VPN_FLOWSPEC = 'l2vpnfs'
``multi_exit_disc`` specifies multi exit discriminator (MED) value.
It must be an integer.
]
if compare[name[route_family] <ast.NotIn object at 0x7da2590d7190> name[SUPPORTED_VRF_RF]] begin[:]
<ast.Raise object at 0x7da1b1b3f2e0>
variable[vrf] assign[=] dictionary[[<ast.Attribute object at 0x7da1b1b3dc30>, <ast.Attribute object at 0x7da1b1b3f430>, <ast.Attribute object at 0x7da1b1b3c370>, <ast.Attribute object at 0x7da1b1b3ef50>, <ast.Attribute object at 0x7da1b1b3d270>, <ast.Attribute object at 0x7da1b1b3f0d0>], [<ast.Name object at 0x7da1b1b3e1d0>, <ast.Name object at 0x7da1b1b3d120>, <ast.Name object at 0x7da1b1b3e800>, <ast.Name object at 0x7da1b1b3d030>, <ast.Name object at 0x7da1b1b3ed70>, <ast.Name object at 0x7da1b1b3d150>]]
call[name[call], parameter[constant[vrf.create]]]
|
keyword[def] identifier[vrf_add] ( identifier[self] , identifier[route_dist] , identifier[import_rts] , identifier[export_rts] , identifier[site_of_origins] = keyword[None] ,
identifier[route_family] = identifier[RF_VPN_V4] , identifier[multi_exit_disc] = keyword[None] ):
literal[string]
keyword[if] identifier[route_family] keyword[not] keyword[in] identifier[SUPPORTED_VRF_RF] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[route_family] )
identifier[vrf] ={
identifier[vrfs] . identifier[ROUTE_DISTINGUISHER] : identifier[route_dist] ,
identifier[vrfs] . identifier[IMPORT_RTS] : identifier[import_rts] ,
identifier[vrfs] . identifier[EXPORT_RTS] : identifier[export_rts] ,
identifier[vrfs] . identifier[SITE_OF_ORIGINS] : identifier[site_of_origins] ,
identifier[vrfs] . identifier[VRF_RF] : identifier[route_family] ,
identifier[vrfs] . identifier[MULTI_EXIT_DISC] : identifier[multi_exit_disc] ,
}
identifier[call] ( literal[string] ,** identifier[vrf] )
|
def vrf_add(self, route_dist, import_rts, export_rts, site_of_origins=None, route_family=RF_VPN_V4, multi_exit_disc=None):
""" This method adds a new vrf used for VPN.
``route_dist`` specifies a route distinguisher value.
``import_rts`` specifies a list of route targets to be imported.
``export_rts`` specifies a list of route targets to be exported.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 (default) = 'ipv4'
- RF_VPN_V6 = 'ipv6'
- RF_L2_EVPN = 'evpn'
- RF_VPNV4_FLOWSPEC = 'ipv4fs'
- RF_VPNV6_FLOWSPEC = 'ipv6fs'
- RF_L2VPN_FLOWSPEC = 'l2vpnfs'
``multi_exit_disc`` specifies multi exit discriminator (MED) value.
It must be an integer.
"""
if route_family not in SUPPORTED_VRF_RF:
raise ValueError('Unsupported route_family: %s' % route_family) # depends on [control=['if'], data=['route_family']]
vrf = {vrfs.ROUTE_DISTINGUISHER: route_dist, vrfs.IMPORT_RTS: import_rts, vrfs.EXPORT_RTS: export_rts, vrfs.SITE_OF_ORIGINS: site_of_origins, vrfs.VRF_RF: route_family, vrfs.MULTI_EXIT_DISC: multi_exit_disc}
call('vrf.create', **vrf)
|
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self.__target, self.__args, self.__kwargs
|
def function[run, parameter[self]]:
constant[Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
]
<ast.Try object at 0x7da1b002b670>
|
keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[__target] :
identifier[self] . identifier[__target] (* identifier[self] . identifier[__args] ,** identifier[self] . identifier[__kwargs] )
keyword[finally] :
keyword[del] identifier[self] . identifier[__target] , identifier[self] . identifier[__args] , identifier[self] . identifier[__kwargs]
|
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self.__target, self.__args, self.__kwargs
|
def get_index_and_columns_order(cls, columns_in_file_expected, columns_dict, file_path):
"""
:param columns_in_file_expected:
:param columns_dict:
:param file_path:
:rtype: tuple[list,list]
"""
use_columns_with_index = []
column_names_in_db = []
column_names_from_file = cls.get_column_names_from_file(file_path)
if not set(columns_in_file_expected).issubset(column_names_from_file):
log.exception(
'%s columns are not a subset of columns %s in file %s',
columns_in_file_expected,
column_names_from_file,
file_path
)
else:
for index, column in enumerate(column_names_from_file):
if column in columns_dict:
use_columns_with_index.append(index)
column_names_in_db.append(columns_dict[column])
return use_columns_with_index, column_names_in_db
|
def function[get_index_and_columns_order, parameter[cls, columns_in_file_expected, columns_dict, file_path]]:
constant[
:param columns_in_file_expected:
:param columns_dict:
:param file_path:
:rtype: tuple[list,list]
]
variable[use_columns_with_index] assign[=] list[[]]
variable[column_names_in_db] assign[=] list[[]]
variable[column_names_from_file] assign[=] call[name[cls].get_column_names_from_file, parameter[name[file_path]]]
if <ast.UnaryOp object at 0x7da204565ba0> begin[:]
call[name[log].exception, parameter[constant[%s columns are not a subset of columns %s in file %s], name[columns_in_file_expected], name[column_names_from_file], name[file_path]]]
return[tuple[[<ast.Name object at 0x7da1b0b7d060>, <ast.Name object at 0x7da1b0b7f640>]]]
|
keyword[def] identifier[get_index_and_columns_order] ( identifier[cls] , identifier[columns_in_file_expected] , identifier[columns_dict] , identifier[file_path] ):
literal[string]
identifier[use_columns_with_index] =[]
identifier[column_names_in_db] =[]
identifier[column_names_from_file] = identifier[cls] . identifier[get_column_names_from_file] ( identifier[file_path] )
keyword[if] keyword[not] identifier[set] ( identifier[columns_in_file_expected] ). identifier[issubset] ( identifier[column_names_from_file] ):
identifier[log] . identifier[exception] (
literal[string] ,
identifier[columns_in_file_expected] ,
identifier[column_names_from_file] ,
identifier[file_path]
)
keyword[else] :
keyword[for] identifier[index] , identifier[column] keyword[in] identifier[enumerate] ( identifier[column_names_from_file] ):
keyword[if] identifier[column] keyword[in] identifier[columns_dict] :
identifier[use_columns_with_index] . identifier[append] ( identifier[index] )
identifier[column_names_in_db] . identifier[append] ( identifier[columns_dict] [ identifier[column] ])
keyword[return] identifier[use_columns_with_index] , identifier[column_names_in_db]
|
def get_index_and_columns_order(cls, columns_in_file_expected, columns_dict, file_path):
"""
:param columns_in_file_expected:
:param columns_dict:
:param file_path:
:rtype: tuple[list,list]
"""
use_columns_with_index = []
column_names_in_db = []
column_names_from_file = cls.get_column_names_from_file(file_path)
if not set(columns_in_file_expected).issubset(column_names_from_file):
log.exception('%s columns are not a subset of columns %s in file %s', columns_in_file_expected, column_names_from_file, file_path) # depends on [control=['if'], data=[]]
else:
for (index, column) in enumerate(column_names_from_file):
if column in columns_dict:
use_columns_with_index.append(index)
column_names_in_db.append(columns_dict[column]) # depends on [control=['if'], data=['column', 'columns_dict']] # depends on [control=['for'], data=[]]
return (use_columns_with_index, column_names_in_db)
|
def debug(self, text):
"""
Posts a debug message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.DEBUG)))
|
def function[debug, parameter[self, text]]:
constant[
Posts a debug message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
]
call[name[self].queue.put, parameter[call[name[dill].dumps, parameter[call[name[LogMessageCommand], parameter[]]]]]]
|
keyword[def] identifier[debug] ( identifier[self] , identifier[text] ):
literal[string]
identifier[self] . identifier[queue] . identifier[put] ( identifier[dill] . identifier[dumps] ( identifier[LogMessageCommand] ( identifier[text] = identifier[text] , identifier[level] = identifier[logging] . identifier[DEBUG] )))
|
def debug(self, text):
"""
Posts a debug message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.DEBUG)))
|
def thread_pool(self, thread_pool_patterns=None, params=None):
"""
Get information about thread pools.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html>`_
:arg thread_pool_patterns: A comma-separated list of regular-expressions
to filter the thread pools in the output
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg size: The multiplier in which to display values, valid choices are:
'', 'k', 'm', 'g', 't', 'p'
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'thread_pool', thread_pool_patterns), params=params)
|
def function[thread_pool, parameter[self, thread_pool_patterns, params]]:
constant[
Get information about thread pools.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html>`_
:arg thread_pool_patterns: A comma-separated list of regular-expressions
to filter the thread pools in the output
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg size: The multiplier in which to display values, valid choices are:
'', 'k', 'm', 'g', 't', 'p'
:arg v: Verbose mode. Display column headers, default False
]
return[call[name[self].transport.perform_request, parameter[constant[GET], call[name[_make_path], parameter[constant[_cat], constant[thread_pool], name[thread_pool_patterns]]]]]]
|
keyword[def] identifier[thread_pool] ( identifier[self] , identifier[thread_pool_patterns] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] ( literal[string] , identifier[_make_path] ( literal[string] ,
literal[string] , identifier[thread_pool_patterns] ), identifier[params] = identifier[params] )
|
def thread_pool(self, thread_pool_patterns=None, params=None):
"""
Get information about thread pools.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html>`_
:arg thread_pool_patterns: A comma-separated list of regular-expressions
to filter the thread pools in the output
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg size: The multiplier in which to display values, valid choices are:
'', 'k', 'm', 'g', 't', 'p'
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat', 'thread_pool', thread_pool_patterns), params=params)
|
def check_for_errors(self):
"""Check connection and channel for errors.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
try:
self._connection.check_for_errors()
except AMQPConnectionError:
self.set_state(self.CLOSED)
raise
if self.exceptions:
exception = self.exceptions[0]
if self.is_open:
self.exceptions.pop(0)
raise exception
if self.is_closed:
raise AMQPChannelError('channel was closed')
|
def function[check_for_errors, parameter[self]]:
constant[Check connection and channel for errors.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
]
<ast.Try object at 0x7da18bc71420>
if name[self].exceptions begin[:]
variable[exception] assign[=] call[name[self].exceptions][constant[0]]
if name[self].is_open begin[:]
call[name[self].exceptions.pop, parameter[constant[0]]]
<ast.Raise object at 0x7da2054a5570>
if name[self].is_closed begin[:]
<ast.Raise object at 0x7da2047e84c0>
|
keyword[def] identifier[check_for_errors] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[_connection] . identifier[check_for_errors] ()
keyword[except] identifier[AMQPConnectionError] :
identifier[self] . identifier[set_state] ( identifier[self] . identifier[CLOSED] )
keyword[raise]
keyword[if] identifier[self] . identifier[exceptions] :
identifier[exception] = identifier[self] . identifier[exceptions] [ literal[int] ]
keyword[if] identifier[self] . identifier[is_open] :
identifier[self] . identifier[exceptions] . identifier[pop] ( literal[int] )
keyword[raise] identifier[exception]
keyword[if] identifier[self] . identifier[is_closed] :
keyword[raise] identifier[AMQPChannelError] ( literal[string] )
|
def check_for_errors(self):
"""Check connection and channel for errors.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
try:
self._connection.check_for_errors() # depends on [control=['try'], data=[]]
except AMQPConnectionError:
self.set_state(self.CLOSED)
raise # depends on [control=['except'], data=[]]
if self.exceptions:
exception = self.exceptions[0]
if self.is_open:
self.exceptions.pop(0) # depends on [control=['if'], data=[]]
raise exception # depends on [control=['if'], data=[]]
if self.is_closed:
raise AMQPChannelError('channel was closed') # depends on [control=['if'], data=[]]
|
def create(vm_):
'''
Create a single VM from a data dict.
'''
try:
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'azurearm',
vm_['profile'],
vm_=vm_
) is False:
return False
except AttributeError:
pass
if vm_.get('bootstrap_interface') is None:
vm_['bootstrap_interface'] = 'public'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'creating', vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'azurearm', vm_['driver']
)
if not vm_.get('location'):
vm_['location'] = get_location(kwargs=vm_)
log.info('Creating Cloud VM %s in %s', vm_['name'], vm_['location'])
vm_request = request_instance(vm_=vm_)
if not vm_request or 'error' in vm_request:
err_message = 'Error creating VM {0}! ({1})'.format(vm_['name'], six.text_type(vm_request))
log.error(err_message)
raise SaltCloudSystemExit(err_message)
def _query_node_data(name, bootstrap_interface):
'''
Query node data.
'''
data = show_instance(name, call='action')
if not data:
return False
ip_address = None
if bootstrap_interface == 'public':
ip_address = data['public_ips'][0]
if bootstrap_interface == 'private':
ip_address = data['private_ips'][0]
if ip_address is None:
return False
return ip_address
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(vm_['name'], vm_['bootstrap_interface'],),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure,
SaltCloudSystemExit
) as exc:
try:
log.warning(exc)
finally:
raise SaltCloudSystemExit(six.text_type(exc))
vm_['ssh_host'] = data
if not vm_.get('ssh_username'):
vm_['ssh_username'] = config.get_cloud_config_value(
'ssh_username', vm_, __opts__
)
vm_['password'] = config.get_cloud_config_value(
'ssh_password', vm_, __opts__
)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
data = show_instance(vm_['name'], call='action')
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'],
pprint.pformat(data)
)
ret.update(data)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'created',
vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
|
def function[create, parameter[vm_]]:
constant[
Create a single VM from a data dict.
]
<ast.Try object at 0x7da1b2346b30>
if compare[call[name[vm_].get, parameter[constant[bootstrap_interface]]] is constant[None]] begin[:]
call[name[vm_]][constant[bootstrap_interface]] assign[=] constant[public]
call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[starting create], call[constant[salt/cloud/{0}/creating].format, parameter[call[name[vm_]][constant[name]]]]]]
call[call[name[__utils__]][constant[cloud.cachedir_index_add]], parameter[call[name[vm_]][constant[name]], call[name[vm_]][constant[profile]], constant[azurearm], call[name[vm_]][constant[driver]]]]
if <ast.UnaryOp object at 0x7da1b21350f0> begin[:]
call[name[vm_]][constant[location]] assign[=] call[name[get_location], parameter[]]
call[name[log].info, parameter[constant[Creating Cloud VM %s in %s], call[name[vm_]][constant[name]], call[name[vm_]][constant[location]]]]
variable[vm_request] assign[=] call[name[request_instance], parameter[]]
if <ast.BoolOp object at 0x7da1b2136980> begin[:]
variable[err_message] assign[=] call[constant[Error creating VM {0}! ({1})].format, parameter[call[name[vm_]][constant[name]], call[name[six].text_type, parameter[name[vm_request]]]]]
call[name[log].error, parameter[name[err_message]]]
<ast.Raise object at 0x7da1b2135a50>
def function[_query_node_data, parameter[name, bootstrap_interface]]:
constant[
Query node data.
]
variable[data] assign[=] call[name[show_instance], parameter[name[name]]]
if <ast.UnaryOp object at 0x7da1b2136200> begin[:]
return[constant[False]]
variable[ip_address] assign[=] constant[None]
if compare[name[bootstrap_interface] equal[==] constant[public]] begin[:]
variable[ip_address] assign[=] call[call[name[data]][constant[public_ips]]][constant[0]]
if compare[name[bootstrap_interface] equal[==] constant[private]] begin[:]
variable[ip_address] assign[=] call[call[name[data]][constant[private_ips]]][constant[0]]
if compare[name[ip_address] is constant[None]] begin[:]
return[constant[False]]
return[name[ip_address]]
<ast.Try object at 0x7da1b2134dc0>
call[name[vm_]][constant[ssh_host]] assign[=] name[data]
if <ast.UnaryOp object at 0x7da1b2134a00> begin[:]
call[name[vm_]][constant[ssh_username]] assign[=] call[name[config].get_cloud_config_value, parameter[constant[ssh_username], name[vm_], name[__opts__]]]
call[name[vm_]][constant[password]] assign[=] call[name[config].get_cloud_config_value, parameter[constant[ssh_password], name[vm_], name[__opts__]]]
variable[ret] assign[=] call[call[name[__utils__]][constant[cloud.bootstrap]], parameter[name[vm_], name[__opts__]]]
variable[data] assign[=] call[name[show_instance], parameter[call[name[vm_]][constant[name]]]]
call[name[log].info, parameter[constant[Created Cloud VM '%s'], call[name[vm_]][constant[name]]]]
call[name[log].debug, parameter[constant['%s' VM creation details:
%s], call[name[vm_]][constant[name]], call[name[pprint].pformat, parameter[name[data]]]]]
call[name[ret].update, parameter[name[data]]]
call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[created instance], call[constant[salt/cloud/{0}/created].format, parameter[call[name[vm_]][constant[name]]]]]]
return[name[ret]]
|
keyword[def] identifier[create] ( identifier[vm_] ):
literal[string]
keyword[try] :
keyword[if] identifier[vm_] [ literal[string] ] keyword[and] identifier[config] . identifier[is_profile_configured] (
identifier[__opts__] ,
identifier[__active_provider_name__] keyword[or] literal[string] ,
identifier[vm_] [ literal[string] ],
identifier[vm_] = identifier[vm_]
) keyword[is] keyword[False] :
keyword[return] keyword[False]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[if] identifier[vm_] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[vm_] [ literal[string] ]= literal[string]
identifier[__utils__] [ literal[string] ](
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[vm_] [ literal[string] ]),
identifier[args] = identifier[__utils__] [ literal[string] ](
literal[string] , identifier[vm_] ,[ literal[string] , literal[string] , literal[string] , literal[string] ]
),
identifier[sock_dir] = identifier[__opts__] [ literal[string] ],
identifier[transport] = identifier[__opts__] [ literal[string] ]
)
identifier[__utils__] [ literal[string] ](
identifier[vm_] [ literal[string] ], identifier[vm_] [ literal[string] ], literal[string] , identifier[vm_] [ literal[string] ]
)
keyword[if] keyword[not] identifier[vm_] . identifier[get] ( literal[string] ):
identifier[vm_] [ literal[string] ]= identifier[get_location] ( identifier[kwargs] = identifier[vm_] )
identifier[log] . identifier[info] ( literal[string] , identifier[vm_] [ literal[string] ], identifier[vm_] [ literal[string] ])
identifier[vm_request] = identifier[request_instance] ( identifier[vm_] = identifier[vm_] )
keyword[if] keyword[not] identifier[vm_request] keyword[or] literal[string] keyword[in] identifier[vm_request] :
identifier[err_message] = literal[string] . identifier[format] ( identifier[vm_] [ literal[string] ], identifier[six] . identifier[text_type] ( identifier[vm_request] ))
identifier[log] . identifier[error] ( identifier[err_message] )
keyword[raise] identifier[SaltCloudSystemExit] ( identifier[err_message] )
keyword[def] identifier[_query_node_data] ( identifier[name] , identifier[bootstrap_interface] ):
literal[string]
identifier[data] = identifier[show_instance] ( identifier[name] , identifier[call] = literal[string] )
keyword[if] keyword[not] identifier[data] :
keyword[return] keyword[False]
identifier[ip_address] = keyword[None]
keyword[if] identifier[bootstrap_interface] == literal[string] :
identifier[ip_address] = identifier[data] [ literal[string] ][ literal[int] ]
keyword[if] identifier[bootstrap_interface] == literal[string] :
identifier[ip_address] = identifier[data] [ literal[string] ][ literal[int] ]
keyword[if] identifier[ip_address] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[return] identifier[ip_address]
keyword[try] :
identifier[data] = identifier[salt] . identifier[utils] . identifier[cloud] . identifier[wait_for_ip] (
identifier[_query_node_data] ,
identifier[update_args] =( identifier[vm_] [ literal[string] ], identifier[vm_] [ literal[string] ],),
identifier[timeout] = identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__] , identifier[default] = literal[int] * literal[int] ),
identifier[interval] = identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__] , identifier[default] = literal[int] ),
identifier[interval_multiplier] = identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__] , identifier[default] = literal[int] ),
)
keyword[except] (
identifier[SaltCloudExecutionTimeout] ,
identifier[SaltCloudExecutionFailure] ,
identifier[SaltCloudSystemExit]
) keyword[as] identifier[exc] :
keyword[try] :
identifier[log] . identifier[warning] ( identifier[exc] )
keyword[finally] :
keyword[raise] identifier[SaltCloudSystemExit] ( identifier[six] . identifier[text_type] ( identifier[exc] ))
identifier[vm_] [ literal[string] ]= identifier[data]
keyword[if] keyword[not] identifier[vm_] . identifier[get] ( literal[string] ):
identifier[vm_] [ literal[string] ]= identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__]
)
identifier[vm_] [ literal[string] ]= identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__]
)
identifier[ret] = identifier[__utils__] [ literal[string] ]( identifier[vm_] , identifier[__opts__] )
identifier[data] = identifier[show_instance] ( identifier[vm_] [ literal[string] ], identifier[call] = literal[string] )
identifier[log] . identifier[info] ( literal[string] , identifier[vm_] [ literal[string] ])
identifier[log] . identifier[debug] (
literal[string] ,
identifier[vm_] [ literal[string] ],
identifier[pprint] . identifier[pformat] ( identifier[data] )
)
identifier[ret] . identifier[update] ( identifier[data] )
identifier[__utils__] [ literal[string] ](
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[vm_] [ literal[string] ]),
identifier[args] = identifier[__utils__] [ literal[string] ](
literal[string] ,
identifier[vm_] ,[ literal[string] , literal[string] , literal[string] , literal[string] ]
),
identifier[sock_dir] = identifier[__opts__] [ literal[string] ],
identifier[transport] = identifier[__opts__] [ literal[string] ]
)
keyword[return] identifier[ret]
|
def create(vm_):
"""
Create a single VM from a data dict.
"""
try:
if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'azurearm', vm_['profile'], vm_=vm_) is False:
return False # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
if vm_.get('bootstrap_interface') is None:
vm_['bootstrap_interface'] = 'public' # depends on [control=['if'], data=[]]
__utils__['cloud.fire_event']('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'])
__utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'azurearm', vm_['driver'])
if not vm_.get('location'):
vm_['location'] = get_location(kwargs=vm_) # depends on [control=['if'], data=[]]
log.info('Creating Cloud VM %s in %s', vm_['name'], vm_['location'])
vm_request = request_instance(vm_=vm_)
if not vm_request or 'error' in vm_request:
err_message = 'Error creating VM {0}! ({1})'.format(vm_['name'], six.text_type(vm_request))
log.error(err_message)
raise SaltCloudSystemExit(err_message) # depends on [control=['if'], data=[]]
def _query_node_data(name, bootstrap_interface):
"""
Query node data.
"""
data = show_instance(name, call='action')
if not data:
return False # depends on [control=['if'], data=[]]
ip_address = None
if bootstrap_interface == 'public':
ip_address = data['public_ips'][0] # depends on [control=['if'], data=[]]
if bootstrap_interface == 'private':
ip_address = data['private_ips'][0] # depends on [control=['if'], data=[]]
if ip_address is None:
return False # depends on [control=['if'], data=[]]
return ip_address
try:
data = salt.utils.cloud.wait_for_ip(_query_node_data, update_args=(vm_['name'], vm_['bootstrap_interface']), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), interval_multiplier=config.get_cloud_config_value('wait_for_ip_interval_multiplier', vm_, __opts__, default=1)) # depends on [control=['try'], data=[]]
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure, SaltCloudSystemExit) as exc:
try:
log.warning(exc) # depends on [control=['try'], data=[]]
finally:
raise SaltCloudSystemExit(six.text_type(exc)) # depends on [control=['except'], data=['exc']]
vm_['ssh_host'] = data
if not vm_.get('ssh_username'):
vm_['ssh_username'] = config.get_cloud_config_value('ssh_username', vm_, __opts__) # depends on [control=['if'], data=[]]
vm_['password'] = config.get_cloud_config_value('ssh_password', vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
data = show_instance(vm_['name'], call='action')
log.info("Created Cloud VM '%s'", vm_['name'])
log.debug("'%s' VM creation details:\n%s", vm_['name'], pprint.pformat(data))
ret.update(data)
__utils__['cloud.fire_event']('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'])
return ret
|
def _extract_nonce(cls, http_result):
"""
Given an HTTP response from the sessino endpoint, extract the nonce, so we can "sign" requests with it.
We don't really sign the requests in the traditional sense of a nonce, we just incude them in the auth requests.
:param http_result: HTTP response from the bbc session endpoint.
:type http_result: requests.Response
:return: nonce to "sign" url requests with
:rtype: string
"""
# Extract the redirect URL from the last call
last_redirect_url = urlparse(http_result.history[-1].request.url)
last_redirect_query = dict(parse_qsl(last_redirect_url.query))
# Extract the nonce from the query string in the redirect URL
final_url = urlparse(last_redirect_query['goto'])
goto_url = dict(parse_qsl(final_url.query))
goto_url_query = parse_json(goto_url['state'])
# Return the nonce we can use for future queries
return goto_url_query['nonce']
|
def function[_extract_nonce, parameter[cls, http_result]]:
constant[
Given an HTTP response from the sessino endpoint, extract the nonce, so we can "sign" requests with it.
We don't really sign the requests in the traditional sense of a nonce, we just incude them in the auth requests.
:param http_result: HTTP response from the bbc session endpoint.
:type http_result: requests.Response
:return: nonce to "sign" url requests with
:rtype: string
]
variable[last_redirect_url] assign[=] call[name[urlparse], parameter[call[name[http_result].history][<ast.UnaryOp object at 0x7da20c794b80>].request.url]]
variable[last_redirect_query] assign[=] call[name[dict], parameter[call[name[parse_qsl], parameter[name[last_redirect_url].query]]]]
variable[final_url] assign[=] call[name[urlparse], parameter[call[name[last_redirect_query]][constant[goto]]]]
variable[goto_url] assign[=] call[name[dict], parameter[call[name[parse_qsl], parameter[name[final_url].query]]]]
variable[goto_url_query] assign[=] call[name[parse_json], parameter[call[name[goto_url]][constant[state]]]]
return[call[name[goto_url_query]][constant[nonce]]]
|
keyword[def] identifier[_extract_nonce] ( identifier[cls] , identifier[http_result] ):
literal[string]
identifier[last_redirect_url] = identifier[urlparse] ( identifier[http_result] . identifier[history] [- literal[int] ]. identifier[request] . identifier[url] )
identifier[last_redirect_query] = identifier[dict] ( identifier[parse_qsl] ( identifier[last_redirect_url] . identifier[query] ))
identifier[final_url] = identifier[urlparse] ( identifier[last_redirect_query] [ literal[string] ])
identifier[goto_url] = identifier[dict] ( identifier[parse_qsl] ( identifier[final_url] . identifier[query] ))
identifier[goto_url_query] = identifier[parse_json] ( identifier[goto_url] [ literal[string] ])
keyword[return] identifier[goto_url_query] [ literal[string] ]
|
def _extract_nonce(cls, http_result):
"""
Given an HTTP response from the sessino endpoint, extract the nonce, so we can "sign" requests with it.
We don't really sign the requests in the traditional sense of a nonce, we just incude them in the auth requests.
:param http_result: HTTP response from the bbc session endpoint.
:type http_result: requests.Response
:return: nonce to "sign" url requests with
:rtype: string
"""
# Extract the redirect URL from the last call
last_redirect_url = urlparse(http_result.history[-1].request.url)
last_redirect_query = dict(parse_qsl(last_redirect_url.query))
# Extract the nonce from the query string in the redirect URL
final_url = urlparse(last_redirect_query['goto'])
goto_url = dict(parse_qsl(final_url.query))
goto_url_query = parse_json(goto_url['state'])
# Return the nonce we can use for future queries
return goto_url_query['nonce']
|
def to_server_timezone(self, timezones, for_year):
"""Returns the Microsoft timezone ID corresponding to this timezone. There may not be a match at all, and there
may be multiple matches. If so, we return a random timezone ID.
:param timezones: A list of server timezones, as returned by
list(account.protocol.get_timezones(return_full_timezone_data=True))
:param for_year:
:return: A Microsoft timezone ID, as a string
"""
candidates = set()
for tz_id, tz_name, tz_periods, tz_transitions, tz_transitions_groups in timezones:
candidate = self.from_server_timezone(tz_periods, tz_transitions, tz_transitions_groups, for_year)
if candidate == self:
log.debug('Found exact candidate: %s (%s)', tz_id, tz_name)
# We prefer this timezone over anything else. Return immediately.
return tz_id
# Reduce list based on base bias and standard / daylight bias values
if candidate.bias != self.bias:
continue
if candidate.standard_time is None:
if self.standard_time is not None:
continue
else:
if self.standard_time is None:
continue
if candidate.standard_time.bias != self.standard_time.bias:
continue
if candidate.daylight_time is None:
if self.daylight_time is not None:
continue
else:
if self.daylight_time is None:
continue
if candidate.daylight_time.bias != self.daylight_time.bias:
continue
log.debug('Found candidate with matching biases: %s (%s)', tz_id, tz_name)
candidates.add(tz_id)
if not candidates:
raise ValueError('No server timezones match this timezone definition')
if len(candidates) == 1:
log.info('Could not find an exact timezone match for %s. Selecting the best candidate', self)
else:
log.warning('Could not find an exact timezone match for %s. Selecting a random candidate', self)
return candidates.pop()
|
def function[to_server_timezone, parameter[self, timezones, for_year]]:
constant[Returns the Microsoft timezone ID corresponding to this timezone. There may not be a match at all, and there
may be multiple matches. If so, we return a random timezone ID.
:param timezones: A list of server timezones, as returned by
list(account.protocol.get_timezones(return_full_timezone_data=True))
:param for_year:
:return: A Microsoft timezone ID, as a string
]
variable[candidates] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b26adde0>, <ast.Name object at 0x7da1b26ae380>, <ast.Name object at 0x7da1b26ad120>, <ast.Name object at 0x7da1b26afe80>, <ast.Name object at 0x7da1b26ad8a0>]]] in starred[name[timezones]] begin[:]
variable[candidate] assign[=] call[name[self].from_server_timezone, parameter[name[tz_periods], name[tz_transitions], name[tz_transitions_groups], name[for_year]]]
if compare[name[candidate] equal[==] name[self]] begin[:]
call[name[log].debug, parameter[constant[Found exact candidate: %s (%s)], name[tz_id], name[tz_name]]]
return[name[tz_id]]
if compare[name[candidate].bias not_equal[!=] name[self].bias] begin[:]
continue
if compare[name[candidate].standard_time is constant[None]] begin[:]
if compare[name[self].standard_time is_not constant[None]] begin[:]
continue
if compare[name[candidate].daylight_time is constant[None]] begin[:]
if compare[name[self].daylight_time is_not constant[None]] begin[:]
continue
call[name[log].debug, parameter[constant[Found candidate with matching biases: %s (%s)], name[tz_id], name[tz_name]]]
call[name[candidates].add, parameter[name[tz_id]]]
if <ast.UnaryOp object at 0x7da1b26ad510> begin[:]
<ast.Raise object at 0x7da1b26ae0e0>
if compare[call[name[len], parameter[name[candidates]]] equal[==] constant[1]] begin[:]
call[name[log].info, parameter[constant[Could not find an exact timezone match for %s. Selecting the best candidate], name[self]]]
return[call[name[candidates].pop, parameter[]]]
|
keyword[def] identifier[to_server_timezone] ( identifier[self] , identifier[timezones] , identifier[for_year] ):
literal[string]
identifier[candidates] = identifier[set] ()
keyword[for] identifier[tz_id] , identifier[tz_name] , identifier[tz_periods] , identifier[tz_transitions] , identifier[tz_transitions_groups] keyword[in] identifier[timezones] :
identifier[candidate] = identifier[self] . identifier[from_server_timezone] ( identifier[tz_periods] , identifier[tz_transitions] , identifier[tz_transitions_groups] , identifier[for_year] )
keyword[if] identifier[candidate] == identifier[self] :
identifier[log] . identifier[debug] ( literal[string] , identifier[tz_id] , identifier[tz_name] )
keyword[return] identifier[tz_id]
keyword[if] identifier[candidate] . identifier[bias] != identifier[self] . identifier[bias] :
keyword[continue]
keyword[if] identifier[candidate] . identifier[standard_time] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[standard_time] keyword[is] keyword[not] keyword[None] :
keyword[continue]
keyword[else] :
keyword[if] identifier[self] . identifier[standard_time] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[candidate] . identifier[standard_time] . identifier[bias] != identifier[self] . identifier[standard_time] . identifier[bias] :
keyword[continue]
keyword[if] identifier[candidate] . identifier[daylight_time] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[daylight_time] keyword[is] keyword[not] keyword[None] :
keyword[continue]
keyword[else] :
keyword[if] identifier[self] . identifier[daylight_time] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[candidate] . identifier[daylight_time] . identifier[bias] != identifier[self] . identifier[daylight_time] . identifier[bias] :
keyword[continue]
identifier[log] . identifier[debug] ( literal[string] , identifier[tz_id] , identifier[tz_name] )
identifier[candidates] . identifier[add] ( identifier[tz_id] )
keyword[if] keyword[not] identifier[candidates] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[candidates] )== literal[int] :
identifier[log] . identifier[info] ( literal[string] , identifier[self] )
keyword[else] :
identifier[log] . identifier[warning] ( literal[string] , identifier[self] )
keyword[return] identifier[candidates] . identifier[pop] ()
|
def to_server_timezone(self, timezones, for_year):
"""Returns the Microsoft timezone ID corresponding to this timezone. There may not be a match at all, and there
may be multiple matches. If so, we return a random timezone ID.
:param timezones: A list of server timezones, as returned by
list(account.protocol.get_timezones(return_full_timezone_data=True))
:param for_year:
:return: A Microsoft timezone ID, as a string
"""
candidates = set()
for (tz_id, tz_name, tz_periods, tz_transitions, tz_transitions_groups) in timezones:
candidate = self.from_server_timezone(tz_periods, tz_transitions, tz_transitions_groups, for_year)
if candidate == self:
log.debug('Found exact candidate: %s (%s)', tz_id, tz_name)
# We prefer this timezone over anything else. Return immediately.
return tz_id # depends on [control=['if'], data=[]]
# Reduce list based on base bias and standard / daylight bias values
if candidate.bias != self.bias:
continue # depends on [control=['if'], data=[]]
if candidate.standard_time is None:
if self.standard_time is not None:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if self.standard_time is None:
continue # depends on [control=['if'], data=[]]
if candidate.standard_time.bias != self.standard_time.bias:
continue # depends on [control=['if'], data=[]]
if candidate.daylight_time is None:
if self.daylight_time is not None:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if self.daylight_time is None:
continue # depends on [control=['if'], data=[]]
if candidate.daylight_time.bias != self.daylight_time.bias:
continue # depends on [control=['if'], data=[]]
log.debug('Found candidate with matching biases: %s (%s)', tz_id, tz_name)
candidates.add(tz_id) # depends on [control=['for'], data=[]]
if not candidates:
raise ValueError('No server timezones match this timezone definition') # depends on [control=['if'], data=[]]
if len(candidates) == 1:
log.info('Could not find an exact timezone match for %s. Selecting the best candidate', self) # depends on [control=['if'], data=[]]
else:
log.warning('Could not find an exact timezone match for %s. Selecting a random candidate', self)
return candidates.pop()
|
def _startXTVDNode(self, name, attrs):
"""Process the start of the top-level xtvd node"""
schemaVersion = attrs.get('schemaVersion')
validFrom = self._parseDateTime(attrs.get('from'))
validTo = self._parseDateTime(attrs.get('to'))
self._progress.printMsg('Parsing version %s data from %s to %s' %
(schemaVersion,
validFrom.strftime('%Y/%m/%d'),
validTo.strftime('%Y/%m/%d')))
|
def function[_startXTVDNode, parameter[self, name, attrs]]:
constant[Process the start of the top-level xtvd node]
variable[schemaVersion] assign[=] call[name[attrs].get, parameter[constant[schemaVersion]]]
variable[validFrom] assign[=] call[name[self]._parseDateTime, parameter[call[name[attrs].get, parameter[constant[from]]]]]
variable[validTo] assign[=] call[name[self]._parseDateTime, parameter[call[name[attrs].get, parameter[constant[to]]]]]
call[name[self]._progress.printMsg, parameter[binary_operation[constant[Parsing version %s data from %s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6c5660>, <ast.Call object at 0x7da20c6c50f0>, <ast.Call object at 0x7da20c6c59f0>]]]]]
|
keyword[def] identifier[_startXTVDNode] ( identifier[self] , identifier[name] , identifier[attrs] ):
literal[string]
identifier[schemaVersion] = identifier[attrs] . identifier[get] ( literal[string] )
identifier[validFrom] = identifier[self] . identifier[_parseDateTime] ( identifier[attrs] . identifier[get] ( literal[string] ))
identifier[validTo] = identifier[self] . identifier[_parseDateTime] ( identifier[attrs] . identifier[get] ( literal[string] ))
identifier[self] . identifier[_progress] . identifier[printMsg] ( literal[string] %
( identifier[schemaVersion] ,
identifier[validFrom] . identifier[strftime] ( literal[string] ),
identifier[validTo] . identifier[strftime] ( literal[string] )))
|
def _startXTVDNode(self, name, attrs):
"""Process the start of the top-level xtvd node"""
schemaVersion = attrs.get('schemaVersion')
validFrom = self._parseDateTime(attrs.get('from'))
validTo = self._parseDateTime(attrs.get('to'))
self._progress.printMsg('Parsing version %s data from %s to %s' % (schemaVersion, validFrom.strftime('%Y/%m/%d'), validTo.strftime('%Y/%m/%d')))
|
def _get_friends_count(session, user_id):
"""
https://vk.com/dev/friends.get
"""
response = session.fetch('friends.get', user_id=user_id, count=1)
return response["count"]
|
def function[_get_friends_count, parameter[session, user_id]]:
constant[
https://vk.com/dev/friends.get
]
variable[response] assign[=] call[name[session].fetch, parameter[constant[friends.get]]]
return[call[name[response]][constant[count]]]
|
keyword[def] identifier[_get_friends_count] ( identifier[session] , identifier[user_id] ):
literal[string]
identifier[response] = identifier[session] . identifier[fetch] ( literal[string] , identifier[user_id] = identifier[user_id] , identifier[count] = literal[int] )
keyword[return] identifier[response] [ literal[string] ]
|
def _get_friends_count(session, user_id):
"""
https://vk.com/dev/friends.get
"""
response = session.fetch('friends.get', user_id=user_id, count=1)
return response['count']
|
def get_task(self, name, include_helpers=True):
"""
Get task identified by name or raise TaskNotFound if there
is no such task
:param name: name of helper/task to get
:param include_helpers: if True, also look for helpers
:return: task or helper identified by name
"""
if not include_helpers and name in self._helper_names:
raise TaskNotFound(name)
try:
return getattr(self._tasks, name)
except AttributeError:
raise TaskNotFound(name)
|
def function[get_task, parameter[self, name, include_helpers]]:
constant[
Get task identified by name or raise TaskNotFound if there
is no such task
:param name: name of helper/task to get
:param include_helpers: if True, also look for helpers
:return: task or helper identified by name
]
if <ast.BoolOp object at 0x7da20c76db70> begin[:]
<ast.Raise object at 0x7da20c76c250>
<ast.Try object at 0x7da20c76faf0>
|
keyword[def] identifier[get_task] ( identifier[self] , identifier[name] , identifier[include_helpers] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[include_helpers] keyword[and] identifier[name] keyword[in] identifier[self] . identifier[_helper_names] :
keyword[raise] identifier[TaskNotFound] ( identifier[name] )
keyword[try] :
keyword[return] identifier[getattr] ( identifier[self] . identifier[_tasks] , identifier[name] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[TaskNotFound] ( identifier[name] )
|
def get_task(self, name, include_helpers=True):
"""
Get task identified by name or raise TaskNotFound if there
is no such task
:param name: name of helper/task to get
:param include_helpers: if True, also look for helpers
:return: task or helper identified by name
"""
if not include_helpers and name in self._helper_names:
raise TaskNotFound(name) # depends on [control=['if'], data=[]]
try:
return getattr(self._tasks, name) # depends on [control=['try'], data=[]]
except AttributeError:
raise TaskNotFound(name) # depends on [control=['except'], data=[]]
|
def _get_gos_upper(self, ntpltgo1, max_upper, go2parentids):
"""Plot a GO DAG for the upper portion of a single Group of user GOs."""
# Get GO IDs which are in the hdrgo path
goids_possible = ntpltgo1.gosubdag.go2obj.keys()
# Get upper GO IDs which have the most descendants
return self._get_gosrcs_upper(goids_possible, max_upper, go2parentids)
|
def function[_get_gos_upper, parameter[self, ntpltgo1, max_upper, go2parentids]]:
constant[Plot a GO DAG for the upper portion of a single Group of user GOs.]
variable[goids_possible] assign[=] call[name[ntpltgo1].gosubdag.go2obj.keys, parameter[]]
return[call[name[self]._get_gosrcs_upper, parameter[name[goids_possible], name[max_upper], name[go2parentids]]]]
|
keyword[def] identifier[_get_gos_upper] ( identifier[self] , identifier[ntpltgo1] , identifier[max_upper] , identifier[go2parentids] ):
literal[string]
identifier[goids_possible] = identifier[ntpltgo1] . identifier[gosubdag] . identifier[go2obj] . identifier[keys] ()
keyword[return] identifier[self] . identifier[_get_gosrcs_upper] ( identifier[goids_possible] , identifier[max_upper] , identifier[go2parentids] )
|
def _get_gos_upper(self, ntpltgo1, max_upper, go2parentids):
"""Plot a GO DAG for the upper portion of a single Group of user GOs."""
# Get GO IDs which are in the hdrgo path
goids_possible = ntpltgo1.gosubdag.go2obj.keys()
# Get upper GO IDs which have the most descendants
return self._get_gosrcs_upper(goids_possible, max_upper, go2parentids)
|
def build_parameters(self, stack, provider_stack=None):
"""Builds the CloudFormation Parameters for our stack.
Args:
stack (:class:`stacker.stack.Stack`): A stacker stack
provider_stack (dict): An optional Stacker provider object
Returns:
dict: The parameters for the given stack
"""
resolved = _resolve_parameters(stack.parameter_values, stack.blueprint)
required_parameters = list(stack.required_parameter_definitions)
all_parameters = list(stack.all_parameter_definitions)
parameters = _handle_missing_parameters(resolved, all_parameters,
required_parameters,
provider_stack)
param_list = []
for key, value in parameters:
param_dict = {"ParameterKey": key}
if value is UsePreviousParameterValue:
param_dict["UsePreviousValue"] = True
else:
param_dict["ParameterValue"] = str(value)
param_list.append(param_dict)
return param_list
|
def function[build_parameters, parameter[self, stack, provider_stack]]:
constant[Builds the CloudFormation Parameters for our stack.
Args:
stack (:class:`stacker.stack.Stack`): A stacker stack
provider_stack (dict): An optional Stacker provider object
Returns:
dict: The parameters for the given stack
]
variable[resolved] assign[=] call[name[_resolve_parameters], parameter[name[stack].parameter_values, name[stack].blueprint]]
variable[required_parameters] assign[=] call[name[list], parameter[name[stack].required_parameter_definitions]]
variable[all_parameters] assign[=] call[name[list], parameter[name[stack].all_parameter_definitions]]
variable[parameters] assign[=] call[name[_handle_missing_parameters], parameter[name[resolved], name[all_parameters], name[required_parameters], name[provider_stack]]]
variable[param_list] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c9907c0>, <ast.Name object at 0x7da20c9924d0>]]] in starred[name[parameters]] begin[:]
variable[param_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b18b6890>], [<ast.Name object at 0x7da1b18b6650>]]
if compare[name[value] is name[UsePreviousParameterValue]] begin[:]
call[name[param_dict]][constant[UsePreviousValue]] assign[=] constant[True]
call[name[param_list].append, parameter[name[param_dict]]]
return[name[param_list]]
|
keyword[def] identifier[build_parameters] ( identifier[self] , identifier[stack] , identifier[provider_stack] = keyword[None] ):
literal[string]
identifier[resolved] = identifier[_resolve_parameters] ( identifier[stack] . identifier[parameter_values] , identifier[stack] . identifier[blueprint] )
identifier[required_parameters] = identifier[list] ( identifier[stack] . identifier[required_parameter_definitions] )
identifier[all_parameters] = identifier[list] ( identifier[stack] . identifier[all_parameter_definitions] )
identifier[parameters] = identifier[_handle_missing_parameters] ( identifier[resolved] , identifier[all_parameters] ,
identifier[required_parameters] ,
identifier[provider_stack] )
identifier[param_list] =[]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[parameters] :
identifier[param_dict] ={ literal[string] : identifier[key] }
keyword[if] identifier[value] keyword[is] identifier[UsePreviousParameterValue] :
identifier[param_dict] [ literal[string] ]= keyword[True]
keyword[else] :
identifier[param_dict] [ literal[string] ]= identifier[str] ( identifier[value] )
identifier[param_list] . identifier[append] ( identifier[param_dict] )
keyword[return] identifier[param_list]
|
def build_parameters(self, stack, provider_stack=None):
"""Builds the CloudFormation Parameters for our stack.
Args:
stack (:class:`stacker.stack.Stack`): A stacker stack
provider_stack (dict): An optional Stacker provider object
Returns:
dict: The parameters for the given stack
"""
resolved = _resolve_parameters(stack.parameter_values, stack.blueprint)
required_parameters = list(stack.required_parameter_definitions)
all_parameters = list(stack.all_parameter_definitions)
parameters = _handle_missing_parameters(resolved, all_parameters, required_parameters, provider_stack)
param_list = []
for (key, value) in parameters:
param_dict = {'ParameterKey': key}
if value is UsePreviousParameterValue:
param_dict['UsePreviousValue'] = True # depends on [control=['if'], data=[]]
else:
param_dict['ParameterValue'] = str(value)
param_list.append(param_dict) # depends on [control=['for'], data=[]]
return param_list
|
def check_sample_files(fam_filename, raw_dirname):
"""Checks the raw sample files.
:param fam_filename: the name of the FAM file.
:param raw_dirname: the name of the directory containing the raw file.
:type fam_filename: str
:type raw_dirname: str
:returns: the set of all the sample files that are compatible with the FAM
file.
:rtype: set
"""
# Reading the sample identification number from the FAM file
fam_samples = None
with open(fam_filename, "r") as i_file:
fam_samples = {line.split()[1] for line in i_file.read().splitlines()}
# Checking the files in the raw directory
sample_files = set()
all_samples = set()
for filename in glob.glob(os.path.join(raw_dirname, "*")):
sample = os.path.splitext(os.path.basename(filename))[0]
all_samples.add(sample)
if sample not in fam_samples:
logger.warning("{}: sample not in FAM file".format(sample))
else:
sample_files.add(filename)
for sample in fam_samples - all_samples:
logger.warning("{}: sample not in raw directory".format(sample))
if len(sample_files) == 0:
raise ProgramError("no sample left for analysis")
return sample_files
|
def function[check_sample_files, parameter[fam_filename, raw_dirname]]:
constant[Checks the raw sample files.
:param fam_filename: the name of the FAM file.
:param raw_dirname: the name of the directory containing the raw file.
:type fam_filename: str
:type raw_dirname: str
:returns: the set of all the sample files that are compatible with the FAM
file.
:rtype: set
]
variable[fam_samples] assign[=] constant[None]
with call[name[open], parameter[name[fam_filename], constant[r]]] begin[:]
variable[fam_samples] assign[=] <ast.SetComp object at 0x7da1b0a4e620>
variable[sample_files] assign[=] call[name[set], parameter[]]
variable[all_samples] assign[=] call[name[set], parameter[]]
for taget[name[filename]] in starred[call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[raw_dirname], constant[*]]]]]] begin[:]
variable[sample] assign[=] call[call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[name[filename]]]]]][constant[0]]
call[name[all_samples].add, parameter[name[sample]]]
if compare[name[sample] <ast.NotIn object at 0x7da2590d7190> name[fam_samples]] begin[:]
call[name[logger].warning, parameter[call[constant[{}: sample not in FAM file].format, parameter[name[sample]]]]]
for taget[name[sample]] in starred[binary_operation[name[fam_samples] - name[all_samples]]] begin[:]
call[name[logger].warning, parameter[call[constant[{}: sample not in raw directory].format, parameter[name[sample]]]]]
if compare[call[name[len], parameter[name[sample_files]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b0a4f220>
return[name[sample_files]]
|
keyword[def] identifier[check_sample_files] ( identifier[fam_filename] , identifier[raw_dirname] ):
literal[string]
identifier[fam_samples] = keyword[None]
keyword[with] identifier[open] ( identifier[fam_filename] , literal[string] ) keyword[as] identifier[i_file] :
identifier[fam_samples] ={ identifier[line] . identifier[split] ()[ literal[int] ] keyword[for] identifier[line] keyword[in] identifier[i_file] . identifier[read] (). identifier[splitlines] ()}
identifier[sample_files] = identifier[set] ()
identifier[all_samples] = identifier[set] ()
keyword[for] identifier[filename] keyword[in] identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[raw_dirname] , literal[string] )):
identifier[sample] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] ))[ literal[int] ]
identifier[all_samples] . identifier[add] ( identifier[sample] )
keyword[if] identifier[sample] keyword[not] keyword[in] identifier[fam_samples] :
identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[sample] ))
keyword[else] :
identifier[sample_files] . identifier[add] ( identifier[filename] )
keyword[for] identifier[sample] keyword[in] identifier[fam_samples] - identifier[all_samples] :
identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[sample] ))
keyword[if] identifier[len] ( identifier[sample_files] )== literal[int] :
keyword[raise] identifier[ProgramError] ( literal[string] )
keyword[return] identifier[sample_files]
|
def check_sample_files(fam_filename, raw_dirname):
"""Checks the raw sample files.
:param fam_filename: the name of the FAM file.
:param raw_dirname: the name of the directory containing the raw file.
:type fam_filename: str
:type raw_dirname: str
:returns: the set of all the sample files that are compatible with the FAM
file.
:rtype: set
"""
# Reading the sample identification number from the FAM file
fam_samples = None
with open(fam_filename, 'r') as i_file:
fam_samples = {line.split()[1] for line in i_file.read().splitlines()} # depends on [control=['with'], data=['i_file']]
# Checking the files in the raw directory
sample_files = set()
all_samples = set()
for filename in glob.glob(os.path.join(raw_dirname, '*')):
sample = os.path.splitext(os.path.basename(filename))[0]
all_samples.add(sample)
if sample not in fam_samples:
logger.warning('{}: sample not in FAM file'.format(sample)) # depends on [control=['if'], data=['sample']]
else:
sample_files.add(filename) # depends on [control=['for'], data=['filename']]
for sample in fam_samples - all_samples:
logger.warning('{}: sample not in raw directory'.format(sample)) # depends on [control=['for'], data=['sample']]
if len(sample_files) == 0:
raise ProgramError('no sample left for analysis') # depends on [control=['if'], data=[]]
return sample_files
|
def save(self):
"""Save this object to the database. Behaves very similarly to
whatever collection.save(document) would, ie. does upserts on _id
presence. If methods ``pre_save`` or ``post_save`` are defined, those
are called. If there is a spec document, then the document is
validated against it after the ``pre_save`` hook but before the save."""
if hasattr(self, 'pre_save'):
self.pre_save()
database, collection = self._collection_key.split('.')
self.validate()
_id = current()[database][collection].save(dict(self))
if _id: self._id = _id
if hasattr(self, 'post_save'):
self.post_save()
|
def function[save, parameter[self]]:
constant[Save this object to the database. Behaves very similarly to
whatever collection.save(document) would, ie. does upserts on _id
presence. If methods ``pre_save`` or ``post_save`` are defined, those
are called. If there is a spec document, then the document is
validated against it after the ``pre_save`` hook but before the save.]
if call[name[hasattr], parameter[name[self], constant[pre_save]]] begin[:]
call[name[self].pre_save, parameter[]]
<ast.Tuple object at 0x7da18f00ee30> assign[=] call[name[self]._collection_key.split, parameter[constant[.]]]
call[name[self].validate, parameter[]]
variable[_id] assign[=] call[call[call[call[name[current], parameter[]]][name[database]]][name[collection]].save, parameter[call[name[dict], parameter[name[self]]]]]
if name[_id] begin[:]
name[self]._id assign[=] name[_id]
if call[name[hasattr], parameter[name[self], constant[post_save]]] begin[:]
call[name[self].post_save, parameter[]]
|
keyword[def] identifier[save] ( identifier[self] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[pre_save] ()
identifier[database] , identifier[collection] = identifier[self] . identifier[_collection_key] . identifier[split] ( literal[string] )
identifier[self] . identifier[validate] ()
identifier[_id] = identifier[current] ()[ identifier[database] ][ identifier[collection] ]. identifier[save] ( identifier[dict] ( identifier[self] ))
keyword[if] identifier[_id] : identifier[self] . identifier[_id] = identifier[_id]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[post_save] ()
|
def save(self):
"""Save this object to the database. Behaves very similarly to
whatever collection.save(document) would, ie. does upserts on _id
presence. If methods ``pre_save`` or ``post_save`` are defined, those
are called. If there is a spec document, then the document is
validated against it after the ``pre_save`` hook but before the save."""
if hasattr(self, 'pre_save'):
self.pre_save() # depends on [control=['if'], data=[]]
(database, collection) = self._collection_key.split('.')
self.validate()
_id = current()[database][collection].save(dict(self))
if _id:
self._id = _id # depends on [control=['if'], data=[]]
if hasattr(self, 'post_save'):
self.post_save() # depends on [control=['if'], data=[]]
|
def set_aux(self, aux):
"""
Sets the TCP auxiliary port.
:param aux: console auxiliary port (integer)
"""
self.aux = aux
yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=aux))
|
def function[set_aux, parameter[self, aux]]:
constant[
Sets the TCP auxiliary port.
:param aux: console auxiliary port (integer)
]
name[self].aux assign[=] name[aux]
<ast.YieldFrom object at 0x7da18f8104f0>
|
keyword[def] identifier[set_aux] ( identifier[self] , identifier[aux] ):
literal[string]
identifier[self] . identifier[aux] = identifier[aux]
keyword[yield] keyword[from] identifier[self] . identifier[_hypervisor] . identifier[send] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] , identifier[aux] = identifier[aux] ))
|
def set_aux(self, aux):
"""
Sets the TCP auxiliary port.
:param aux: console auxiliary port (integer)
"""
self.aux = aux
yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=aux))
|
def update_date(self):
"""269 Date normalization."""
dates_269 = record_get_field_instances(self.record, '269')
for idx, field in enumerate(dates_269):
new_subs = []
old_subs = field[0]
for code, value in old_subs:
if code == "c":
new_subs.append((
"c",
convert_date_from_iso_to_human(value)
))
else:
new_subs.append((code, value))
dates_269[idx] = field_swap_subfields(field, new_subs)
|
def function[update_date, parameter[self]]:
constant[269 Date normalization.]
variable[dates_269] assign[=] call[name[record_get_field_instances], parameter[name[self].record, constant[269]]]
for taget[tuple[[<ast.Name object at 0x7da20c6c5810>, <ast.Name object at 0x7da20c6c6680>]]] in starred[call[name[enumerate], parameter[name[dates_269]]]] begin[:]
variable[new_subs] assign[=] list[[]]
variable[old_subs] assign[=] call[name[field]][constant[0]]
for taget[tuple[[<ast.Name object at 0x7da20c6c7220>, <ast.Name object at 0x7da20c6c7e50>]]] in starred[name[old_subs]] begin[:]
if compare[name[code] equal[==] constant[c]] begin[:]
call[name[new_subs].append, parameter[tuple[[<ast.Constant object at 0x7da20c6c7f70>, <ast.Call object at 0x7da20c6c7c10>]]]]
call[name[dates_269]][name[idx]] assign[=] call[name[field_swap_subfields], parameter[name[field], name[new_subs]]]
|
keyword[def] identifier[update_date] ( identifier[self] ):
literal[string]
identifier[dates_269] = identifier[record_get_field_instances] ( identifier[self] . identifier[record] , literal[string] )
keyword[for] identifier[idx] , identifier[field] keyword[in] identifier[enumerate] ( identifier[dates_269] ):
identifier[new_subs] =[]
identifier[old_subs] = identifier[field] [ literal[int] ]
keyword[for] identifier[code] , identifier[value] keyword[in] identifier[old_subs] :
keyword[if] identifier[code] == literal[string] :
identifier[new_subs] . identifier[append] ((
literal[string] ,
identifier[convert_date_from_iso_to_human] ( identifier[value] )
))
keyword[else] :
identifier[new_subs] . identifier[append] (( identifier[code] , identifier[value] ))
identifier[dates_269] [ identifier[idx] ]= identifier[field_swap_subfields] ( identifier[field] , identifier[new_subs] )
|
def update_date(self):
"""269 Date normalization."""
dates_269 = record_get_field_instances(self.record, '269')
for (idx, field) in enumerate(dates_269):
new_subs = []
old_subs = field[0]
for (code, value) in old_subs:
if code == 'c':
new_subs.append(('c', convert_date_from_iso_to_human(value))) # depends on [control=['if'], data=[]]
else:
new_subs.append((code, value)) # depends on [control=['for'], data=[]]
dates_269[idx] = field_swap_subfields(field, new_subs) # depends on [control=['for'], data=[]]
|
def handle(self, cycle_delay=0.1):
"""
Spend approximately ``cycle_delay`` seconds to process requests to the server.
:param cycle_delay: S
"""
asyncore.loop(cycle_delay, count=1)
self._server.process(int(cycle_delay * 1000))
|
def function[handle, parameter[self, cycle_delay]]:
constant[
Spend approximately ``cycle_delay`` seconds to process requests to the server.
:param cycle_delay: S
]
call[name[asyncore].loop, parameter[name[cycle_delay]]]
call[name[self]._server.process, parameter[call[name[int], parameter[binary_operation[name[cycle_delay] * constant[1000]]]]]]
|
keyword[def] identifier[handle] ( identifier[self] , identifier[cycle_delay] = literal[int] ):
literal[string]
identifier[asyncore] . identifier[loop] ( identifier[cycle_delay] , identifier[count] = literal[int] )
identifier[self] . identifier[_server] . identifier[process] ( identifier[int] ( identifier[cycle_delay] * literal[int] ))
|
def handle(self, cycle_delay=0.1):
"""
Spend approximately ``cycle_delay`` seconds to process requests to the server.
:param cycle_delay: S
"""
asyncore.loop(cycle_delay, count=1)
self._server.process(int(cycle_delay * 1000))
|
def databases(self):
"""
list of databases available from eutils (per einfo query)
"""
try:
return self._databases
except AttributeError:
self._databases = self.einfo().databases
return self._databases
|
def function[databases, parameter[self]]:
constant[
list of databases available from eutils (per einfo query)
]
<ast.Try object at 0x7da18f58eec0>
|
keyword[def] identifier[databases] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_databases]
keyword[except] identifier[AttributeError] :
identifier[self] . identifier[_databases] = identifier[self] . identifier[einfo] (). identifier[databases]
keyword[return] identifier[self] . identifier[_databases]
|
def databases(self):
"""
list of databases available from eutils (per einfo query)
"""
try:
return self._databases # depends on [control=['try'], data=[]]
except AttributeError:
self._databases = self.einfo().databases
return self._databases # depends on [control=['except'], data=[]]
|
def init_parser(cls, parser):
"""Initialize argument parser"""
subparsers = parser.add_subparsers(title='Search domain')
# Compound subcommand
parser_compound = subparsers.add_parser(
'compound', help='Search in compounds')
parser_compound.set_defaults(which='compound')
parser_compound.add_argument(
'--id', '-i', dest='id', metavar='id',
action=FilePrefixAppendAction, type=text_type, default=[],
help='Compound ID')
parser_compound.add_argument(
'--name', '-n', dest='name', metavar='name',
action=FilePrefixAppendAction, type=text_type, default=[],
help='Name of compound')
# Reaction subcommand
parser_reaction = subparsers.add_parser(
'reaction', help='Search in reactions')
parser_reaction.set_defaults(which='reaction')
parser_reaction.add_argument(
'--id', '-i', dest='id', metavar='id',
action=FilePrefixAppendAction, type=str, default=[],
help='Reaction ID')
parser_reaction.add_argument(
'--compound', '-c', dest='compound', metavar='compound',
action=FilePrefixAppendAction, type=str, default=[],
help='Comma-separated list of compound IDs')
|
def function[init_parser, parameter[cls, parser]]:
constant[Initialize argument parser]
variable[subparsers] assign[=] call[name[parser].add_subparsers, parameter[]]
variable[parser_compound] assign[=] call[name[subparsers].add_parser, parameter[constant[compound]]]
call[name[parser_compound].set_defaults, parameter[]]
call[name[parser_compound].add_argument, parameter[constant[--id], constant[-i]]]
call[name[parser_compound].add_argument, parameter[constant[--name], constant[-n]]]
variable[parser_reaction] assign[=] call[name[subparsers].add_parser, parameter[constant[reaction]]]
call[name[parser_reaction].set_defaults, parameter[]]
call[name[parser_reaction].add_argument, parameter[constant[--id], constant[-i]]]
call[name[parser_reaction].add_argument, parameter[constant[--compound], constant[-c]]]
|
keyword[def] identifier[init_parser] ( identifier[cls] , identifier[parser] ):
literal[string]
identifier[subparsers] = identifier[parser] . identifier[add_subparsers] ( identifier[title] = literal[string] )
identifier[parser_compound] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[help] = literal[string] )
identifier[parser_compound] . identifier[set_defaults] ( identifier[which] = literal[string] )
identifier[parser_compound] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[action] = identifier[FilePrefixAppendAction] , identifier[type] = identifier[text_type] , identifier[default] =[],
identifier[help] = literal[string] )
identifier[parser_compound] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[action] = identifier[FilePrefixAppendAction] , identifier[type] = identifier[text_type] , identifier[default] =[],
identifier[help] = literal[string] )
identifier[parser_reaction] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[help] = literal[string] )
identifier[parser_reaction] . identifier[set_defaults] ( identifier[which] = literal[string] )
identifier[parser_reaction] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[action] = identifier[FilePrefixAppendAction] , identifier[type] = identifier[str] , identifier[default] =[],
identifier[help] = literal[string] )
identifier[parser_reaction] . identifier[add_argument] (
literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[action] = identifier[FilePrefixAppendAction] , identifier[type] = identifier[str] , identifier[default] =[],
identifier[help] = literal[string] )
|
def init_parser(cls, parser):
"""Initialize argument parser"""
subparsers = parser.add_subparsers(title='Search domain')
# Compound subcommand
parser_compound = subparsers.add_parser('compound', help='Search in compounds')
parser_compound.set_defaults(which='compound')
parser_compound.add_argument('--id', '-i', dest='id', metavar='id', action=FilePrefixAppendAction, type=text_type, default=[], help='Compound ID')
parser_compound.add_argument('--name', '-n', dest='name', metavar='name', action=FilePrefixAppendAction, type=text_type, default=[], help='Name of compound')
# Reaction subcommand
parser_reaction = subparsers.add_parser('reaction', help='Search in reactions')
parser_reaction.set_defaults(which='reaction')
parser_reaction.add_argument('--id', '-i', dest='id', metavar='id', action=FilePrefixAppendAction, type=str, default=[], help='Reaction ID')
parser_reaction.add_argument('--compound', '-c', dest='compound', metavar='compound', action=FilePrefixAppendAction, type=str, default=[], help='Comma-separated list of compound IDs')
|
def GetIPAddresses(self):
"""Return a list of IP addresses."""
results = []
for address in self.addresses:
human_readable_address = address.human_readable_address
if human_readable_address is not None:
results.append(human_readable_address)
return results
|
def function[GetIPAddresses, parameter[self]]:
constant[Return a list of IP addresses.]
variable[results] assign[=] list[[]]
for taget[name[address]] in starred[name[self].addresses] begin[:]
variable[human_readable_address] assign[=] name[address].human_readable_address
if compare[name[human_readable_address] is_not constant[None]] begin[:]
call[name[results].append, parameter[name[human_readable_address]]]
return[name[results]]
|
keyword[def] identifier[GetIPAddresses] ( identifier[self] ):
literal[string]
identifier[results] =[]
keyword[for] identifier[address] keyword[in] identifier[self] . identifier[addresses] :
identifier[human_readable_address] = identifier[address] . identifier[human_readable_address]
keyword[if] identifier[human_readable_address] keyword[is] keyword[not] keyword[None] :
identifier[results] . identifier[append] ( identifier[human_readable_address] )
keyword[return] identifier[results]
|
def GetIPAddresses(self):
"""Return a list of IP addresses."""
results = []
for address in self.addresses:
human_readable_address = address.human_readable_address
if human_readable_address is not None:
results.append(human_readable_address) # depends on [control=['if'], data=['human_readable_address']] # depends on [control=['for'], data=['address']]
return results
|
def unpack_nested_exception(error):
"""
If exception are stacked, return the first one
:param error: A python exception with possible exception embeded within
:return: A python exception with no exception embeded within
"""
i = 0
while True:
if error.args[i:]:
if isinstance(error.args[i], Exception):
error = error.args[i]
i = 0
else:
i += 1
else:
break
return error
|
def function[unpack_nested_exception, parameter[error]]:
constant[
If exception are stacked, return the first one
:param error: A python exception with possible exception embeded within
:return: A python exception with no exception embeded within
]
variable[i] assign[=] constant[0]
while constant[True] begin[:]
if call[name[error].args][<ast.Slice object at 0x7da1b0d988b0>] begin[:]
if call[name[isinstance], parameter[call[name[error].args][name[i]], name[Exception]]] begin[:]
variable[error] assign[=] call[name[error].args][name[i]]
variable[i] assign[=] constant[0]
return[name[error]]
|
keyword[def] identifier[unpack_nested_exception] ( identifier[error] ):
literal[string]
identifier[i] = literal[int]
keyword[while] keyword[True] :
keyword[if] identifier[error] . identifier[args] [ identifier[i] :]:
keyword[if] identifier[isinstance] ( identifier[error] . identifier[args] [ identifier[i] ], identifier[Exception] ):
identifier[error] = identifier[error] . identifier[args] [ identifier[i] ]
identifier[i] = literal[int]
keyword[else] :
identifier[i] += literal[int]
keyword[else] :
keyword[break]
keyword[return] identifier[error]
|
def unpack_nested_exception(error):
"""
If exception are stacked, return the first one
:param error: A python exception with possible exception embeded within
:return: A python exception with no exception embeded within
"""
i = 0
while True:
if error.args[i:]:
if isinstance(error.args[i], Exception):
error = error.args[i]
i = 0 # depends on [control=['if'], data=[]]
else:
i += 1 # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
return error
|
def patch(self, url: StrOrURL,
*, data: Any=None, **kwargs: Any) -> '_RequestContextManager':
"""Perform HTTP PATCH request."""
return _RequestContextManager(
self._request(hdrs.METH_PATCH, url,
data=data,
**kwargs))
|
def function[patch, parameter[self, url]]:
constant[Perform HTTP PATCH request.]
return[call[name[_RequestContextManager], parameter[call[name[self]._request, parameter[name[hdrs].METH_PATCH, name[url]]]]]]
|
keyword[def] identifier[patch] ( identifier[self] , identifier[url] : identifier[StrOrURL] ,
*, identifier[data] : identifier[Any] = keyword[None] ,** identifier[kwargs] : identifier[Any] )-> literal[string] :
literal[string]
keyword[return] identifier[_RequestContextManager] (
identifier[self] . identifier[_request] ( identifier[hdrs] . identifier[METH_PATCH] , identifier[url] ,
identifier[data] = identifier[data] ,
** identifier[kwargs] ))
|
def patch(self, url: StrOrURL, *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':
"""Perform HTTP PATCH request."""
return _RequestContextManager(self._request(hdrs.METH_PATCH, url, data=data, **kwargs))
|
def get_current_user():
"""Get current user."""
if not 'user_id' in session:
return None
user = User.query.filter(User.id == session['user_id']).first()
if not user:
signout_user()
return None
return user
|
def function[get_current_user, parameter[]]:
constant[Get current user.]
if <ast.UnaryOp object at 0x7da20c9939a0> begin[:]
return[constant[None]]
variable[user] assign[=] call[call[name[User].query.filter, parameter[compare[name[User].id equal[==] call[name[session]][constant[user_id]]]]].first, parameter[]]
if <ast.UnaryOp object at 0x7da18f721ba0> begin[:]
call[name[signout_user], parameter[]]
return[constant[None]]
return[name[user]]
|
keyword[def] identifier[get_current_user] ():
literal[string]
keyword[if] keyword[not] literal[string] keyword[in] identifier[session] :
keyword[return] keyword[None]
identifier[user] = identifier[User] . identifier[query] . identifier[filter] ( identifier[User] . identifier[id] == identifier[session] [ literal[string] ]). identifier[first] ()
keyword[if] keyword[not] identifier[user] :
identifier[signout_user] ()
keyword[return] keyword[None]
keyword[return] identifier[user]
|
def get_current_user():
"""Get current user."""
if not 'user_id' in session:
return None # depends on [control=['if'], data=[]]
user = User.query.filter(User.id == session['user_id']).first()
if not user:
signout_user()
return None # depends on [control=['if'], data=[]]
return user
|
def set_subplot_ylabel(self, row, column, text):
"""Set a label for the y-axis of a subplot.
:param row,column: specify the subplot.
:param text: text of the label.
"""
subplot = self.get_subplot_at(row, column)
subplot.set_ylabel(text)
|
def function[set_subplot_ylabel, parameter[self, row, column, text]]:
constant[Set a label for the y-axis of a subplot.
:param row,column: specify the subplot.
:param text: text of the label.
]
variable[subplot] assign[=] call[name[self].get_subplot_at, parameter[name[row], name[column]]]
call[name[subplot].set_ylabel, parameter[name[text]]]
|
keyword[def] identifier[set_subplot_ylabel] ( identifier[self] , identifier[row] , identifier[column] , identifier[text] ):
literal[string]
identifier[subplot] = identifier[self] . identifier[get_subplot_at] ( identifier[row] , identifier[column] )
identifier[subplot] . identifier[set_ylabel] ( identifier[text] )
|
def set_subplot_ylabel(self, row, column, text):
"""Set a label for the y-axis of a subplot.
:param row,column: specify the subplot.
:param text: text of the label.
"""
subplot = self.get_subplot_at(row, column)
subplot.set_ylabel(text)
|
def register_file(name, member, path, digest='', conn=None):
'''
Register a file in the package database
'''
close = False
if conn is None:
close = True
conn = init()
conn.execute('INSERT INTO files VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
name,
'{0}/{1}'.format(path, member.path),
member.size,
member.mode,
digest,
member.devmajor,
member.devminor,
member.linkname,
member.linkpath,
member.uname,
member.gname,
member.mtime
))
if close:
conn.close()
|
def function[register_file, parameter[name, member, path, digest, conn]]:
constant[
Register a file in the package database
]
variable[close] assign[=] constant[False]
if compare[name[conn] is constant[None]] begin[:]
variable[close] assign[=] constant[True]
variable[conn] assign[=] call[name[init], parameter[]]
call[name[conn].execute, parameter[constant[INSERT INTO files VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)], tuple[[<ast.Name object at 0x7da20c7cb3a0>, <ast.Call object at 0x7da20c7c9390>, <ast.Attribute object at 0x7da20c7c8640>, <ast.Attribute object at 0x7da20c7c9ab0>, <ast.Name object at 0x7da20c7c8430>, <ast.Attribute object at 0x7da20c7cbdf0>, <ast.Attribute object at 0x7da20c7cbb80>, <ast.Attribute object at 0x7da20c7c9780>, <ast.Attribute object at 0x7da20c7c8df0>, <ast.Attribute object at 0x7da20c7cb3d0>, <ast.Attribute object at 0x7da20c7c8580>, <ast.Attribute object at 0x7da20c7c8a90>]]]]
if name[close] begin[:]
call[name[conn].close, parameter[]]
|
keyword[def] identifier[register_file] ( identifier[name] , identifier[member] , identifier[path] , identifier[digest] = literal[string] , identifier[conn] = keyword[None] ):
literal[string]
identifier[close] = keyword[False]
keyword[if] identifier[conn] keyword[is] keyword[None] :
identifier[close] = keyword[True]
identifier[conn] = identifier[init] ()
identifier[conn] . identifier[execute] ( literal[string] ,(
identifier[name] ,
literal[string] . identifier[format] ( identifier[path] , identifier[member] . identifier[path] ),
identifier[member] . identifier[size] ,
identifier[member] . identifier[mode] ,
identifier[digest] ,
identifier[member] . identifier[devmajor] ,
identifier[member] . identifier[devminor] ,
identifier[member] . identifier[linkname] ,
identifier[member] . identifier[linkpath] ,
identifier[member] . identifier[uname] ,
identifier[member] . identifier[gname] ,
identifier[member] . identifier[mtime]
))
keyword[if] identifier[close] :
identifier[conn] . identifier[close] ()
|
def register_file(name, member, path, digest='', conn=None):
"""
Register a file in the package database
"""
close = False
if conn is None:
close = True
conn = init() # depends on [control=['if'], data=['conn']]
conn.execute('INSERT INTO files VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (name, '{0}/{1}'.format(path, member.path), member.size, member.mode, digest, member.devmajor, member.devminor, member.linkname, member.linkpath, member.uname, member.gname, member.mtime))
if close:
conn.close() # depends on [control=['if'], data=[]]
|
def dec(self, byts):
'''
Decode an envelope dict and decrypt the given bytes.
Args:
byts (bytes): Bytes to decrypt.
Returns:
bytes: Decrypted message.
'''
envl = s_msgpack.un(byts)
iv = envl.get('iv', b'')
asscd = envl.get('asscd', b'')
data = envl.get('data', b'')
decryptor = AESGCM(self.ekey)
try:
data = decryptor.decrypt(iv, data, asscd)
except Exception:
logger.exception('Error decrypting data')
return None
return data
|
def function[dec, parameter[self, byts]]:
constant[
Decode an envelope dict and decrypt the given bytes.
Args:
byts (bytes): Bytes to decrypt.
Returns:
bytes: Decrypted message.
]
variable[envl] assign[=] call[name[s_msgpack].un, parameter[name[byts]]]
variable[iv] assign[=] call[name[envl].get, parameter[constant[iv], constant[b'']]]
variable[asscd] assign[=] call[name[envl].get, parameter[constant[asscd], constant[b'']]]
variable[data] assign[=] call[name[envl].get, parameter[constant[data], constant[b'']]]
variable[decryptor] assign[=] call[name[AESGCM], parameter[name[self].ekey]]
<ast.Try object at 0x7da207f03130>
return[name[data]]
|
keyword[def] identifier[dec] ( identifier[self] , identifier[byts] ):
literal[string]
identifier[envl] = identifier[s_msgpack] . identifier[un] ( identifier[byts] )
identifier[iv] = identifier[envl] . identifier[get] ( literal[string] , literal[string] )
identifier[asscd] = identifier[envl] . identifier[get] ( literal[string] , literal[string] )
identifier[data] = identifier[envl] . identifier[get] ( literal[string] , literal[string] )
identifier[decryptor] = identifier[AESGCM] ( identifier[self] . identifier[ekey] )
keyword[try] :
identifier[data] = identifier[decryptor] . identifier[decrypt] ( identifier[iv] , identifier[data] , identifier[asscd] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] )
keyword[return] keyword[None]
keyword[return] identifier[data]
|
def dec(self, byts):
"""
Decode an envelope dict and decrypt the given bytes.
Args:
byts (bytes): Bytes to decrypt.
Returns:
bytes: Decrypted message.
"""
envl = s_msgpack.un(byts)
iv = envl.get('iv', b'')
asscd = envl.get('asscd', b'')
data = envl.get('data', b'')
decryptor = AESGCM(self.ekey)
try:
data = decryptor.decrypt(iv, data, asscd) # depends on [control=['try'], data=[]]
except Exception:
logger.exception('Error decrypting data')
return None # depends on [control=['except'], data=[]]
return data
|
def _request_check(self, address):
"""Wake one monitor. Hold the lock when calling this."""
server = self._servers.get(address)
# "server" is None if another thread removed it from the topology.
if server:
server.request_check()
|
def function[_request_check, parameter[self, address]]:
constant[Wake one monitor. Hold the lock when calling this.]
variable[server] assign[=] call[name[self]._servers.get, parameter[name[address]]]
if name[server] begin[:]
call[name[server].request_check, parameter[]]
|
keyword[def] identifier[_request_check] ( identifier[self] , identifier[address] ):
literal[string]
identifier[server] = identifier[self] . identifier[_servers] . identifier[get] ( identifier[address] )
keyword[if] identifier[server] :
identifier[server] . identifier[request_check] ()
|
def _request_check(self, address):
"""Wake one monitor. Hold the lock when calling this."""
server = self._servers.get(address)
# "server" is None if another thread removed it from the topology.
if server:
server.request_check() # depends on [control=['if'], data=[]]
|
def search_dict(data, key):
"""
Search for a key in a nested dict, or list of nested dicts, and return the values.
:param data: dict/list to search
:param key: key to find
:return: matches for key
"""
if isinstance(data, dict):
for dkey, value in data.items():
if dkey == key:
yield value
for result in search_dict(value, key):
yield result
elif isinstance(data, list):
for value in data:
for result in search_dict(value, key):
yield result
|
def function[search_dict, parameter[data, key]]:
constant[
Search for a key in a nested dict, or list of nested dicts, and return the values.
:param data: dict/list to search
:param key: key to find
:return: matches for key
]
if call[name[isinstance], parameter[name[data], name[dict]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2041da290>, <ast.Name object at 0x7da2041da5f0>]]] in starred[call[name[data].items, parameter[]]] begin[:]
if compare[name[dkey] equal[==] name[key]] begin[:]
<ast.Yield object at 0x7da2041db910>
for taget[name[result]] in starred[call[name[search_dict], parameter[name[value], name[key]]]] begin[:]
<ast.Yield object at 0x7da1b0356860>
|
keyword[def] identifier[search_dict] ( identifier[data] , identifier[key] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ):
keyword[for] identifier[dkey] , identifier[value] keyword[in] identifier[data] . identifier[items] ():
keyword[if] identifier[dkey] == identifier[key] :
keyword[yield] identifier[value]
keyword[for] identifier[result] keyword[in] identifier[search_dict] ( identifier[value] , identifier[key] ):
keyword[yield] identifier[result]
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[list] ):
keyword[for] identifier[value] keyword[in] identifier[data] :
keyword[for] identifier[result] keyword[in] identifier[search_dict] ( identifier[value] , identifier[key] ):
keyword[yield] identifier[result]
|
def search_dict(data, key):
"""
Search for a key in a nested dict, or list of nested dicts, and return the values.
:param data: dict/list to search
:param key: key to find
:return: matches for key
"""
if isinstance(data, dict):
for (dkey, value) in data.items():
if dkey == key:
yield value # depends on [control=['if'], data=[]]
for result in search_dict(value, key):
yield result # depends on [control=['for'], data=['result']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(data, list):
for value in data:
for result in search_dict(value, key):
yield result # depends on [control=['for'], data=['result']] # depends on [control=['for'], data=['value']] # depends on [control=['if'], data=[]]
|
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = EventOccurrence.objects.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
|
def function[_check_values, parameter[self, value]]:
constant[
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
]
variable[key] assign[=] <ast.BoolOp object at 0x7da1b1378910>
<ast.Try object at 0x7da1b137abc0>
for taget[name[pk]] in starred[name[value]] begin[:]
<ast.Try object at 0x7da1b137bd00>
variable[qs] assign[=] call[name[EventOccurrence].objects.filter, parameter[]]
variable[pks] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b1378be0>]]
for taget[name[val]] in starred[name[value]] begin[:]
if compare[call[name[force_text], parameter[name[val]]] <ast.NotIn object at 0x7da2590d7190> name[pks]] begin[:]
<ast.Raise object at 0x7da1b1378f70>
return[name[qs]]
|
keyword[def] identifier[_check_values] ( identifier[self] , identifier[value] ):
literal[string]
identifier[key] = identifier[self] . identifier[to_field_name] keyword[or] literal[string]
keyword[try] :
identifier[value] = identifier[frozenset] ( identifier[value] )
keyword[except] identifier[TypeError] :
keyword[raise] identifier[ValidationError] (
identifier[self] . identifier[error_messages] [ literal[string] ],
identifier[code] = literal[string] ,
)
keyword[for] identifier[pk] keyword[in] identifier[value] :
keyword[try] :
identifier[self] . identifier[queryset] . identifier[filter] (**{ identifier[key] : identifier[pk] })
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[raise] identifier[ValidationError] (
identifier[self] . identifier[error_messages] [ literal[string] ],
identifier[code] = literal[string] ,
identifier[params] ={ literal[string] : identifier[pk] },
)
identifier[qs] = identifier[EventOccurrence] . identifier[objects] . identifier[filter] (**{ literal[string] % identifier[key] : identifier[value] })
identifier[pks] = identifier[set] ( identifier[force_text] ( identifier[getattr] ( identifier[o] , identifier[key] )) keyword[for] identifier[o] keyword[in] identifier[qs] )
keyword[for] identifier[val] keyword[in] identifier[value] :
keyword[if] identifier[force_text] ( identifier[val] ) keyword[not] keyword[in] identifier[pks] :
keyword[raise] identifier[ValidationError] (
identifier[self] . identifier[error_messages] [ literal[string] ],
identifier[code] = literal[string] ,
identifier[params] ={ literal[string] : identifier[val] },
)
keyword[return] identifier[qs]
|
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value) # depends on [control=['try'], data=[]]
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(self.error_messages['list'], code='list') # depends on [control=['except'], data=[]]
for pk in value:
try:
self.queryset.filter(**{key: pk}) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid_pk_value'], code='invalid_pk_value', params={'pk': pk}) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['pk']]
qs = EventOccurrence.objects.filter(**{'%s__in' % key: value})
pks = set((force_text(getattr(o, key)) for o in qs))
for val in value:
if force_text(val) not in pks:
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice', params={'value': val}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['val']]
return qs
|
def save(self):
"""
:return: save this OS instance on Ariane server (create or update)
"""
LOGGER.debug("OSInstance.save")
post_payload = {}
consolidated_osi_id = []
consolidated_ipa_id = []
consolidated_nic_id = []
consolidated_app_id = []
consolidated_env_id = []
consolidated_snet_id = []
consolidated_team_id = []
if self.id is not None:
post_payload['osInstanceID'] = self.id
if self.name is not None:
post_payload['osInstanceName'] = self.name
if self.description is not None:
post_payload['osInstanceDescription'] = self.description
if self.admin_gate_uri is not None:
post_payload['osInstanceAdminGateURI'] = self.admin_gate_uri
if self.embedding_osi_id is not None:
post_payload['osInstanceEmbeddingOSInstanceID'] = self.embedding_osi_id
if self.ost_id is not None:
post_payload['osInstanceOSTypeID'] = self.ost_id
if self.embedded_osi_ids is not None:
consolidated_osi_id = copy.deepcopy(self.embedded_osi_ids)
if self.embedded_osi_2_rm is not None:
for osi_2_rm in self.embedded_osi_2_rm:
if osi_2_rm.id is None:
osi_2_rm.sync()
consolidated_osi_id.remove(osi_2_rm.id)
if self.embedded_osi_2_add is not None:
for osi_id_2_add in self.embedded_osi_2_add:
if osi_id_2_add.id is None:
osi_id_2_add.save()
consolidated_osi_id.append(osi_id_2_add.id)
post_payload['osInstanceEmbeddedOSInstancesID'] = consolidated_osi_id
if self.ip_address_ids is not None:
consolidated_ipa_id = copy.deepcopy(self.ip_address_ids)
if self.ip_address_2_rm is not None:
for ipa_2_rm in self.ip_address_2_rm:
if ipa_2_rm.id is None:
ipa_2_rm.sync()
consolidated_ipa_id.remove(ipa_2_rm.id)
if self.ip_address_2_add is not None:
for ipa_2_add in self.ip_address_2_add:
if ipa_2_add.id is None:
ipa_2_add.save()
consolidated_ipa_id.append(ipa_2_add.id)
post_payload['osInstanceIPAddressesID'] = consolidated_ipa_id
if self.nic_ids is not None:
consolidated_nic_id = copy.deepcopy(self.nic_ids)
if self.nic_2_rm is not None:
for nic_2_rm in self.nic_2_rm:
if nic_2_rm.id is None:
nic_2_rm.sync()
consolidated_nic_id.remove(nic_2_rm.id)
if self.nic_2_add is not None:
for nic_2_add in self.nic_2_add:
if nic_2_add.id is None:
nic_2_add.save()
consolidated_nic_id.append(nic_2_add.id)
post_payload['osInstanceNICsID'] = consolidated_nic_id
if self.subnet_ids is not None:
consolidated_snet_id = copy.deepcopy(self.subnet_ids)
if self.subnets_2_rm is not None:
for snet_2_rm in self.subnets_2_rm:
if snet_2_rm.id is None:
snet_2_rm.sync()
consolidated_snet_id.remove(snet_2_rm.id)
if self.subnets_2_add is not None:
for snet_2_add in self.subnets_2_add:
if snet_2_add.id is None:
snet_2_add.save()
consolidated_snet_id.append(snet_2_add.id)
post_payload['osInstanceSubnetsID'] = consolidated_snet_id
if self.application_ids is not None:
consolidated_app_id = copy.deepcopy(self.application_ids)
if self.application_2_rm is not None:
for app_2_rm in self.application_2_rm:
if app_2_rm.id is None:
app_2_rm.sync()
consolidated_app_id.remove(app_2_rm.id)
if self.application_2_add is not None:
for app_2_add in self.application_2_add:
if app_2_add.id is None:
app_2_add.save()
consolidated_app_id.append(app_2_add.id)
post_payload['osInstanceApplicationsID'] = consolidated_app_id
if self.environment_ids is not None:
consolidated_env_id = copy.deepcopy(self.environment_ids)
if self.environment_2_rm is not None:
for env_2_rm in self.environment_2_rm:
if env_2_rm.id is None:
env_2_rm.sync()
consolidated_env_id.remove(env_2_rm.id)
if self.environment_2_add is not None:
for env_2_add in self.environment_2_add:
if env_2_add.id is None:
env_2_add.save()
consolidated_env_id.append(env_2_add.id)
post_payload['osInstanceEnvironmentsID'] = consolidated_env_id
if self.team_ids is not None:
consolidated_team_id = copy.deepcopy(self.team_ids)
if self.team_2_rm is not None:
for team_2_rm in self.team_2_rm:
if team_2_rm.id is None:
team_2_rm.sync()
consolidated_team_id.remove(team_2_rm.id)
if self.team_2_add is not None:
for team_2_add in self.team_2_add:
if team_2_add.id is None:
team_2_add.save()
consolidated_team_id.append(team_2_add.id)
post_payload['osInstanceTeamsID'] = consolidated_team_id
args = {'http_operation': 'POST', 'operation_path': '', 'parameters': {'payload': json.dumps(post_payload)}}
response = OSInstanceService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'OSInstance.save - Problem while saving OS instance ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.id = response.response_content['osInstanceID']
if self.embedded_osi_2_add is not None:
for osi_2_add in self.embedded_osi_2_add:
osi_2_add.sync()
if self.embedded_osi_2_rm is not None:
for osi_2_rm in self.embedded_osi_2_rm:
osi_2_rm.sync()
if self.ip_address_2_add is not None:
for ipa_2_add in self.ip_address_2_add:
ipa_2_add.sync()
if self.ip_address_2_rm is not None:
for ipa_2_rm in self.ip_address_2_rm:
ipa_2_rm.sync()
if self.nic_2_add is not None:
for nic_2_add in self.nic_2_add:
nic_2_add.sync()
if self.nic_2_rm is not None:
for nic_2_rm in self.nic_2_rm:
nic_2_rm.sync()
if self.subnets_2_add is not None:
for snet_2_add in self.subnets_2_add:
snet_2_add.sync()
if self.subnets_2_rm is not None:
for snet_2_rm in self.subnets_2_rm:
snet_2_rm.sync()
if self.application_2_add is not None:
for app_2_add in self.application_2_add:
app_2_add.sync()
if self.application_2_rm is not None:
for app_2_rm in self.application_2_rm:
app_2_rm.sync()
if self.environment_2_add is not None:
for env_2_add in self.environment_2_add:
env_2_add.sync()
if self.environment_2_rm is not None:
for env_2_rm in self.environment_2_rm:
env_2_rm.sync()
if self.team_2_add is not None:
for team_2_add in self.team_2_add:
team_2_add.sync()
if self.team_2_rm is not None:
for team_2_rm in self.team_2_rm:
team_2_rm.sync()
self.embedded_osi_2_add.clear()
self.embedded_osi_2_rm.clear()
self.ip_address_2_add.clear()
self.ip_address_2_rm.clear()
self.nic_2_add.clear()
self.nic_2_rm.clear()
self.subnets_2_add.clear()
self.subnets_2_rm.clear()
self.application_2_add.clear()
self.application_2_rm.clear()
self.environment_2_add.clear()
self.environment_2_rm.clear()
self.team_2_add.clear()
self.team_2_rm.clear()
self.sync()
return self
|
def function[save, parameter[self]]:
constant[
:return: save this OS instance on Ariane server (create or update)
]
call[name[LOGGER].debug, parameter[constant[OSInstance.save]]]
variable[post_payload] assign[=] dictionary[[], []]
variable[consolidated_osi_id] assign[=] list[[]]
variable[consolidated_ipa_id] assign[=] list[[]]
variable[consolidated_nic_id] assign[=] list[[]]
variable[consolidated_app_id] assign[=] list[[]]
variable[consolidated_env_id] assign[=] list[[]]
variable[consolidated_snet_id] assign[=] list[[]]
variable[consolidated_team_id] assign[=] list[[]]
if compare[name[self].id is_not constant[None]] begin[:]
call[name[post_payload]][constant[osInstanceID]] assign[=] name[self].id
if compare[name[self].name is_not constant[None]] begin[:]
call[name[post_payload]][constant[osInstanceName]] assign[=] name[self].name
if compare[name[self].description is_not constant[None]] begin[:]
call[name[post_payload]][constant[osInstanceDescription]] assign[=] name[self].description
if compare[name[self].admin_gate_uri is_not constant[None]] begin[:]
call[name[post_payload]][constant[osInstanceAdminGateURI]] assign[=] name[self].admin_gate_uri
if compare[name[self].embedding_osi_id is_not constant[None]] begin[:]
call[name[post_payload]][constant[osInstanceEmbeddingOSInstanceID]] assign[=] name[self].embedding_osi_id
if compare[name[self].ost_id is_not constant[None]] begin[:]
call[name[post_payload]][constant[osInstanceOSTypeID]] assign[=] name[self].ost_id
if compare[name[self].embedded_osi_ids is_not constant[None]] begin[:]
variable[consolidated_osi_id] assign[=] call[name[copy].deepcopy, parameter[name[self].embedded_osi_ids]]
if compare[name[self].embedded_osi_2_rm is_not constant[None]] begin[:]
for taget[name[osi_2_rm]] in starred[name[self].embedded_osi_2_rm] begin[:]
if compare[name[osi_2_rm].id is constant[None]] begin[:]
call[name[osi_2_rm].sync, parameter[]]
call[name[consolidated_osi_id].remove, parameter[name[osi_2_rm].id]]
if compare[name[self].embedded_osi_2_add is_not constant[None]] begin[:]
for taget[name[osi_id_2_add]] in starred[name[self].embedded_osi_2_add] begin[:]
if compare[name[osi_id_2_add].id is constant[None]] begin[:]
call[name[osi_id_2_add].save, parameter[]]
call[name[consolidated_osi_id].append, parameter[name[osi_id_2_add].id]]
call[name[post_payload]][constant[osInstanceEmbeddedOSInstancesID]] assign[=] name[consolidated_osi_id]
if compare[name[self].ip_address_ids is_not constant[None]] begin[:]
variable[consolidated_ipa_id] assign[=] call[name[copy].deepcopy, parameter[name[self].ip_address_ids]]
if compare[name[self].ip_address_2_rm is_not constant[None]] begin[:]
for taget[name[ipa_2_rm]] in starred[name[self].ip_address_2_rm] begin[:]
if compare[name[ipa_2_rm].id is constant[None]] begin[:]
call[name[ipa_2_rm].sync, parameter[]]
call[name[consolidated_ipa_id].remove, parameter[name[ipa_2_rm].id]]
if compare[name[self].ip_address_2_add is_not constant[None]] begin[:]
for taget[name[ipa_2_add]] in starred[name[self].ip_address_2_add] begin[:]
if compare[name[ipa_2_add].id is constant[None]] begin[:]
call[name[ipa_2_add].save, parameter[]]
call[name[consolidated_ipa_id].append, parameter[name[ipa_2_add].id]]
call[name[post_payload]][constant[osInstanceIPAddressesID]] assign[=] name[consolidated_ipa_id]
if compare[name[self].nic_ids is_not constant[None]] begin[:]
variable[consolidated_nic_id] assign[=] call[name[copy].deepcopy, parameter[name[self].nic_ids]]
if compare[name[self].nic_2_rm is_not constant[None]] begin[:]
for taget[name[nic_2_rm]] in starred[name[self].nic_2_rm] begin[:]
if compare[name[nic_2_rm].id is constant[None]] begin[:]
call[name[nic_2_rm].sync, parameter[]]
call[name[consolidated_nic_id].remove, parameter[name[nic_2_rm].id]]
if compare[name[self].nic_2_add is_not constant[None]] begin[:]
for taget[name[nic_2_add]] in starred[name[self].nic_2_add] begin[:]
if compare[name[nic_2_add].id is constant[None]] begin[:]
call[name[nic_2_add].save, parameter[]]
call[name[consolidated_nic_id].append, parameter[name[nic_2_add].id]]
call[name[post_payload]][constant[osInstanceNICsID]] assign[=] name[consolidated_nic_id]
if compare[name[self].subnet_ids is_not constant[None]] begin[:]
variable[consolidated_snet_id] assign[=] call[name[copy].deepcopy, parameter[name[self].subnet_ids]]
if compare[name[self].subnets_2_rm is_not constant[None]] begin[:]
for taget[name[snet_2_rm]] in starred[name[self].subnets_2_rm] begin[:]
if compare[name[snet_2_rm].id is constant[None]] begin[:]
call[name[snet_2_rm].sync, parameter[]]
call[name[consolidated_snet_id].remove, parameter[name[snet_2_rm].id]]
if compare[name[self].subnets_2_add is_not constant[None]] begin[:]
for taget[name[snet_2_add]] in starred[name[self].subnets_2_add] begin[:]
if compare[name[snet_2_add].id is constant[None]] begin[:]
call[name[snet_2_add].save, parameter[]]
call[name[consolidated_snet_id].append, parameter[name[snet_2_add].id]]
call[name[post_payload]][constant[osInstanceSubnetsID]] assign[=] name[consolidated_snet_id]
if compare[name[self].application_ids is_not constant[None]] begin[:]
variable[consolidated_app_id] assign[=] call[name[copy].deepcopy, parameter[name[self].application_ids]]
if compare[name[self].application_2_rm is_not constant[None]] begin[:]
for taget[name[app_2_rm]] in starred[name[self].application_2_rm] begin[:]
if compare[name[app_2_rm].id is constant[None]] begin[:]
call[name[app_2_rm].sync, parameter[]]
call[name[consolidated_app_id].remove, parameter[name[app_2_rm].id]]
if compare[name[self].application_2_add is_not constant[None]] begin[:]
for taget[name[app_2_add]] in starred[name[self].application_2_add] begin[:]
if compare[name[app_2_add].id is constant[None]] begin[:]
call[name[app_2_add].save, parameter[]]
call[name[consolidated_app_id].append, parameter[name[app_2_add].id]]
call[name[post_payload]][constant[osInstanceApplicationsID]] assign[=] name[consolidated_app_id]
if compare[name[self].environment_ids is_not constant[None]] begin[:]
variable[consolidated_env_id] assign[=] call[name[copy].deepcopy, parameter[name[self].environment_ids]]
if compare[name[self].environment_2_rm is_not constant[None]] begin[:]
for taget[name[env_2_rm]] in starred[name[self].environment_2_rm] begin[:]
if compare[name[env_2_rm].id is constant[None]] begin[:]
call[name[env_2_rm].sync, parameter[]]
call[name[consolidated_env_id].remove, parameter[name[env_2_rm].id]]
if compare[name[self].environment_2_add is_not constant[None]] begin[:]
for taget[name[env_2_add]] in starred[name[self].environment_2_add] begin[:]
if compare[name[env_2_add].id is constant[None]] begin[:]
call[name[env_2_add].save, parameter[]]
call[name[consolidated_env_id].append, parameter[name[env_2_add].id]]
call[name[post_payload]][constant[osInstanceEnvironmentsID]] assign[=] name[consolidated_env_id]
if compare[name[self].team_ids is_not constant[None]] begin[:]
variable[consolidated_team_id] assign[=] call[name[copy].deepcopy, parameter[name[self].team_ids]]
if compare[name[self].team_2_rm is_not constant[None]] begin[:]
for taget[name[team_2_rm]] in starred[name[self].team_2_rm] begin[:]
if compare[name[team_2_rm].id is constant[None]] begin[:]
call[name[team_2_rm].sync, parameter[]]
call[name[consolidated_team_id].remove, parameter[name[team_2_rm].id]]
if compare[name[self].team_2_add is_not constant[None]] begin[:]
for taget[name[team_2_add]] in starred[name[self].team_2_add] begin[:]
if compare[name[team_2_add].id is constant[None]] begin[:]
call[name[team_2_add].save, parameter[]]
call[name[consolidated_team_id].append, parameter[name[team_2_add].id]]
call[name[post_payload]][constant[osInstanceTeamsID]] assign[=] name[consolidated_team_id]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b1454490>, <ast.Constant object at 0x7da1b14552a0>, <ast.Constant object at 0x7da1b1455270>], [<ast.Constant object at 0x7da1b1454820>, <ast.Constant object at 0x7da1b1455ab0>, <ast.Dict object at 0x7da1b14554e0>]]
variable[response] assign[=] call[name[OSInstanceService].requester.call, parameter[name[args]]]
if compare[name[response].rc not_equal[!=] constant[0]] begin[:]
call[name[LOGGER].warning, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[OSInstance.save - Problem while saving OS instance ] + name[self].name] + constant[. Reason: ]] + call[name[str], parameter[name[response].response_content]]] + constant[-]] + call[name[str], parameter[name[response].error_message]]] + constant[ (]] + call[name[str], parameter[name[response].rc]]] + constant[)]]]]
call[name[self].embedded_osi_2_add.clear, parameter[]]
call[name[self].embedded_osi_2_rm.clear, parameter[]]
call[name[self].ip_address_2_add.clear, parameter[]]
call[name[self].ip_address_2_rm.clear, parameter[]]
call[name[self].nic_2_add.clear, parameter[]]
call[name[self].nic_2_rm.clear, parameter[]]
call[name[self].subnets_2_add.clear, parameter[]]
call[name[self].subnets_2_rm.clear, parameter[]]
call[name[self].application_2_add.clear, parameter[]]
call[name[self].application_2_rm.clear, parameter[]]
call[name[self].environment_2_add.clear, parameter[]]
call[name[self].environment_2_rm.clear, parameter[]]
call[name[self].team_2_add.clear, parameter[]]
call[name[self].team_2_rm.clear, parameter[]]
call[name[self].sync, parameter[]]
return[name[self]]
|
keyword[def] identifier[save] ( identifier[self] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[post_payload] ={}
identifier[consolidated_osi_id] =[]
identifier[consolidated_ipa_id] =[]
identifier[consolidated_nic_id] =[]
identifier[consolidated_app_id] =[]
identifier[consolidated_env_id] =[]
identifier[consolidated_snet_id] =[]
identifier[consolidated_team_id] =[]
keyword[if] identifier[self] . identifier[id] keyword[is] keyword[not] keyword[None] :
identifier[post_payload] [ literal[string] ]= identifier[self] . identifier[id]
keyword[if] identifier[self] . identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[post_payload] [ literal[string] ]= identifier[self] . identifier[name]
keyword[if] identifier[self] . identifier[description] keyword[is] keyword[not] keyword[None] :
identifier[post_payload] [ literal[string] ]= identifier[self] . identifier[description]
keyword[if] identifier[self] . identifier[admin_gate_uri] keyword[is] keyword[not] keyword[None] :
identifier[post_payload] [ literal[string] ]= identifier[self] . identifier[admin_gate_uri]
keyword[if] identifier[self] . identifier[embedding_osi_id] keyword[is] keyword[not] keyword[None] :
identifier[post_payload] [ literal[string] ]= identifier[self] . identifier[embedding_osi_id]
keyword[if] identifier[self] . identifier[ost_id] keyword[is] keyword[not] keyword[None] :
identifier[post_payload] [ literal[string] ]= identifier[self] . identifier[ost_id]
keyword[if] identifier[self] . identifier[embedded_osi_ids] keyword[is] keyword[not] keyword[None] :
identifier[consolidated_osi_id] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[embedded_osi_ids] )
keyword[if] identifier[self] . identifier[embedded_osi_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[osi_2_rm] keyword[in] identifier[self] . identifier[embedded_osi_2_rm] :
keyword[if] identifier[osi_2_rm] . identifier[id] keyword[is] keyword[None] :
identifier[osi_2_rm] . identifier[sync] ()
identifier[consolidated_osi_id] . identifier[remove] ( identifier[osi_2_rm] . identifier[id] )
keyword[if] identifier[self] . identifier[embedded_osi_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[osi_id_2_add] keyword[in] identifier[self] . identifier[embedded_osi_2_add] :
keyword[if] identifier[osi_id_2_add] . identifier[id] keyword[is] keyword[None] :
identifier[osi_id_2_add] . identifier[save] ()
identifier[consolidated_osi_id] . identifier[append] ( identifier[osi_id_2_add] . identifier[id] )
identifier[post_payload] [ literal[string] ]= identifier[consolidated_osi_id]
keyword[if] identifier[self] . identifier[ip_address_ids] keyword[is] keyword[not] keyword[None] :
identifier[consolidated_ipa_id] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[ip_address_ids] )
keyword[if] identifier[self] . identifier[ip_address_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[ipa_2_rm] keyword[in] identifier[self] . identifier[ip_address_2_rm] :
keyword[if] identifier[ipa_2_rm] . identifier[id] keyword[is] keyword[None] :
identifier[ipa_2_rm] . identifier[sync] ()
identifier[consolidated_ipa_id] . identifier[remove] ( identifier[ipa_2_rm] . identifier[id] )
keyword[if] identifier[self] . identifier[ip_address_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[ipa_2_add] keyword[in] identifier[self] . identifier[ip_address_2_add] :
keyword[if] identifier[ipa_2_add] . identifier[id] keyword[is] keyword[None] :
identifier[ipa_2_add] . identifier[save] ()
identifier[consolidated_ipa_id] . identifier[append] ( identifier[ipa_2_add] . identifier[id] )
identifier[post_payload] [ literal[string] ]= identifier[consolidated_ipa_id]
keyword[if] identifier[self] . identifier[nic_ids] keyword[is] keyword[not] keyword[None] :
identifier[consolidated_nic_id] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[nic_ids] )
keyword[if] identifier[self] . identifier[nic_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[nic_2_rm] keyword[in] identifier[self] . identifier[nic_2_rm] :
keyword[if] identifier[nic_2_rm] . identifier[id] keyword[is] keyword[None] :
identifier[nic_2_rm] . identifier[sync] ()
identifier[consolidated_nic_id] . identifier[remove] ( identifier[nic_2_rm] . identifier[id] )
keyword[if] identifier[self] . identifier[nic_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[nic_2_add] keyword[in] identifier[self] . identifier[nic_2_add] :
keyword[if] identifier[nic_2_add] . identifier[id] keyword[is] keyword[None] :
identifier[nic_2_add] . identifier[save] ()
identifier[consolidated_nic_id] . identifier[append] ( identifier[nic_2_add] . identifier[id] )
identifier[post_payload] [ literal[string] ]= identifier[consolidated_nic_id]
keyword[if] identifier[self] . identifier[subnet_ids] keyword[is] keyword[not] keyword[None] :
identifier[consolidated_snet_id] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[subnet_ids] )
keyword[if] identifier[self] . identifier[subnets_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[snet_2_rm] keyword[in] identifier[self] . identifier[subnets_2_rm] :
keyword[if] identifier[snet_2_rm] . identifier[id] keyword[is] keyword[None] :
identifier[snet_2_rm] . identifier[sync] ()
identifier[consolidated_snet_id] . identifier[remove] ( identifier[snet_2_rm] . identifier[id] )
keyword[if] identifier[self] . identifier[subnets_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[snet_2_add] keyword[in] identifier[self] . identifier[subnets_2_add] :
keyword[if] identifier[snet_2_add] . identifier[id] keyword[is] keyword[None] :
identifier[snet_2_add] . identifier[save] ()
identifier[consolidated_snet_id] . identifier[append] ( identifier[snet_2_add] . identifier[id] )
identifier[post_payload] [ literal[string] ]= identifier[consolidated_snet_id]
keyword[if] identifier[self] . identifier[application_ids] keyword[is] keyword[not] keyword[None] :
identifier[consolidated_app_id] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[application_ids] )
keyword[if] identifier[self] . identifier[application_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[app_2_rm] keyword[in] identifier[self] . identifier[application_2_rm] :
keyword[if] identifier[app_2_rm] . identifier[id] keyword[is] keyword[None] :
identifier[app_2_rm] . identifier[sync] ()
identifier[consolidated_app_id] . identifier[remove] ( identifier[app_2_rm] . identifier[id] )
keyword[if] identifier[self] . identifier[application_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[app_2_add] keyword[in] identifier[self] . identifier[application_2_add] :
keyword[if] identifier[app_2_add] . identifier[id] keyword[is] keyword[None] :
identifier[app_2_add] . identifier[save] ()
identifier[consolidated_app_id] . identifier[append] ( identifier[app_2_add] . identifier[id] )
identifier[post_payload] [ literal[string] ]= identifier[consolidated_app_id]
keyword[if] identifier[self] . identifier[environment_ids] keyword[is] keyword[not] keyword[None] :
identifier[consolidated_env_id] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[environment_ids] )
keyword[if] identifier[self] . identifier[environment_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[env_2_rm] keyword[in] identifier[self] . identifier[environment_2_rm] :
keyword[if] identifier[env_2_rm] . identifier[id] keyword[is] keyword[None] :
identifier[env_2_rm] . identifier[sync] ()
identifier[consolidated_env_id] . identifier[remove] ( identifier[env_2_rm] . identifier[id] )
keyword[if] identifier[self] . identifier[environment_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[env_2_add] keyword[in] identifier[self] . identifier[environment_2_add] :
keyword[if] identifier[env_2_add] . identifier[id] keyword[is] keyword[None] :
identifier[env_2_add] . identifier[save] ()
identifier[consolidated_env_id] . identifier[append] ( identifier[env_2_add] . identifier[id] )
identifier[post_payload] [ literal[string] ]= identifier[consolidated_env_id]
keyword[if] identifier[self] . identifier[team_ids] keyword[is] keyword[not] keyword[None] :
identifier[consolidated_team_id] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[team_ids] )
keyword[if] identifier[self] . identifier[team_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[team_2_rm] keyword[in] identifier[self] . identifier[team_2_rm] :
keyword[if] identifier[team_2_rm] . identifier[id] keyword[is] keyword[None] :
identifier[team_2_rm] . identifier[sync] ()
identifier[consolidated_team_id] . identifier[remove] ( identifier[team_2_rm] . identifier[id] )
keyword[if] identifier[self] . identifier[team_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[team_2_add] keyword[in] identifier[self] . identifier[team_2_add] :
keyword[if] identifier[team_2_add] . identifier[id] keyword[is] keyword[None] :
identifier[team_2_add] . identifier[save] ()
identifier[consolidated_team_id] . identifier[append] ( identifier[team_2_add] . identifier[id] )
identifier[post_payload] [ literal[string] ]= identifier[consolidated_team_id]
identifier[args] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :{ literal[string] : identifier[json] . identifier[dumps] ( identifier[post_payload] )}}
identifier[response] = identifier[OSInstanceService] . identifier[requester] . identifier[call] ( identifier[args] )
keyword[if] identifier[response] . identifier[rc] != literal[int] :
identifier[LOGGER] . identifier[warning] (
literal[string] + identifier[self] . identifier[name] +
literal[string] + identifier[str] ( identifier[response] . identifier[response_content] )+ literal[string] + identifier[str] ( identifier[response] . identifier[error_message] )+
literal[string] + identifier[str] ( identifier[response] . identifier[rc] )+ literal[string]
)
keyword[else] :
identifier[self] . identifier[id] = identifier[response] . identifier[response_content] [ literal[string] ]
keyword[if] identifier[self] . identifier[embedded_osi_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[osi_2_add] keyword[in] identifier[self] . identifier[embedded_osi_2_add] :
identifier[osi_2_add] . identifier[sync] ()
keyword[if] identifier[self] . identifier[embedded_osi_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[osi_2_rm] keyword[in] identifier[self] . identifier[embedded_osi_2_rm] :
identifier[osi_2_rm] . identifier[sync] ()
keyword[if] identifier[self] . identifier[ip_address_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[ipa_2_add] keyword[in] identifier[self] . identifier[ip_address_2_add] :
identifier[ipa_2_add] . identifier[sync] ()
keyword[if] identifier[self] . identifier[ip_address_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[ipa_2_rm] keyword[in] identifier[self] . identifier[ip_address_2_rm] :
identifier[ipa_2_rm] . identifier[sync] ()
keyword[if] identifier[self] . identifier[nic_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[nic_2_add] keyword[in] identifier[self] . identifier[nic_2_add] :
identifier[nic_2_add] . identifier[sync] ()
keyword[if] identifier[self] . identifier[nic_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[nic_2_rm] keyword[in] identifier[self] . identifier[nic_2_rm] :
identifier[nic_2_rm] . identifier[sync] ()
keyword[if] identifier[self] . identifier[subnets_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[snet_2_add] keyword[in] identifier[self] . identifier[subnets_2_add] :
identifier[snet_2_add] . identifier[sync] ()
keyword[if] identifier[self] . identifier[subnets_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[snet_2_rm] keyword[in] identifier[self] . identifier[subnets_2_rm] :
identifier[snet_2_rm] . identifier[sync] ()
keyword[if] identifier[self] . identifier[application_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[app_2_add] keyword[in] identifier[self] . identifier[application_2_add] :
identifier[app_2_add] . identifier[sync] ()
keyword[if] identifier[self] . identifier[application_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[app_2_rm] keyword[in] identifier[self] . identifier[application_2_rm] :
identifier[app_2_rm] . identifier[sync] ()
keyword[if] identifier[self] . identifier[environment_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[env_2_add] keyword[in] identifier[self] . identifier[environment_2_add] :
identifier[env_2_add] . identifier[sync] ()
keyword[if] identifier[self] . identifier[environment_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[env_2_rm] keyword[in] identifier[self] . identifier[environment_2_rm] :
identifier[env_2_rm] . identifier[sync] ()
keyword[if] identifier[self] . identifier[team_2_add] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[team_2_add] keyword[in] identifier[self] . identifier[team_2_add] :
identifier[team_2_add] . identifier[sync] ()
keyword[if] identifier[self] . identifier[team_2_rm] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[team_2_rm] keyword[in] identifier[self] . identifier[team_2_rm] :
identifier[team_2_rm] . identifier[sync] ()
identifier[self] . identifier[embedded_osi_2_add] . identifier[clear] ()
identifier[self] . identifier[embedded_osi_2_rm] . identifier[clear] ()
identifier[self] . identifier[ip_address_2_add] . identifier[clear] ()
identifier[self] . identifier[ip_address_2_rm] . identifier[clear] ()
identifier[self] . identifier[nic_2_add] . identifier[clear] ()
identifier[self] . identifier[nic_2_rm] . identifier[clear] ()
identifier[self] . identifier[subnets_2_add] . identifier[clear] ()
identifier[self] . identifier[subnets_2_rm] . identifier[clear] ()
identifier[self] . identifier[application_2_add] . identifier[clear] ()
identifier[self] . identifier[application_2_rm] . identifier[clear] ()
identifier[self] . identifier[environment_2_add] . identifier[clear] ()
identifier[self] . identifier[environment_2_rm] . identifier[clear] ()
identifier[self] . identifier[team_2_add] . identifier[clear] ()
identifier[self] . identifier[team_2_rm] . identifier[clear] ()
identifier[self] . identifier[sync] ()
keyword[return] identifier[self]
|
def save(self):
"""
:return: save this OS instance on Ariane server (create or update)
"""
LOGGER.debug('OSInstance.save')
post_payload = {}
consolidated_osi_id = []
consolidated_ipa_id = []
consolidated_nic_id = []
consolidated_app_id = []
consolidated_env_id = []
consolidated_snet_id = []
consolidated_team_id = []
if self.id is not None:
post_payload['osInstanceID'] = self.id # depends on [control=['if'], data=[]]
if self.name is not None:
post_payload['osInstanceName'] = self.name # depends on [control=['if'], data=[]]
if self.description is not None:
post_payload['osInstanceDescription'] = self.description # depends on [control=['if'], data=[]]
if self.admin_gate_uri is not None:
post_payload['osInstanceAdminGateURI'] = self.admin_gate_uri # depends on [control=['if'], data=[]]
if self.embedding_osi_id is not None:
post_payload['osInstanceEmbeddingOSInstanceID'] = self.embedding_osi_id # depends on [control=['if'], data=[]]
if self.ost_id is not None:
post_payload['osInstanceOSTypeID'] = self.ost_id # depends on [control=['if'], data=[]]
if self.embedded_osi_ids is not None:
consolidated_osi_id = copy.deepcopy(self.embedded_osi_ids) # depends on [control=['if'], data=[]]
if self.embedded_osi_2_rm is not None:
for osi_2_rm in self.embedded_osi_2_rm:
if osi_2_rm.id is None:
osi_2_rm.sync() # depends on [control=['if'], data=[]]
consolidated_osi_id.remove(osi_2_rm.id) # depends on [control=['for'], data=['osi_2_rm']] # depends on [control=['if'], data=[]]
if self.embedded_osi_2_add is not None:
for osi_id_2_add in self.embedded_osi_2_add:
if osi_id_2_add.id is None:
osi_id_2_add.save() # depends on [control=['if'], data=[]]
consolidated_osi_id.append(osi_id_2_add.id) # depends on [control=['for'], data=['osi_id_2_add']] # depends on [control=['if'], data=[]]
post_payload['osInstanceEmbeddedOSInstancesID'] = consolidated_osi_id
if self.ip_address_ids is not None:
consolidated_ipa_id = copy.deepcopy(self.ip_address_ids) # depends on [control=['if'], data=[]]
if self.ip_address_2_rm is not None:
for ipa_2_rm in self.ip_address_2_rm:
if ipa_2_rm.id is None:
ipa_2_rm.sync() # depends on [control=['if'], data=[]]
consolidated_ipa_id.remove(ipa_2_rm.id) # depends on [control=['for'], data=['ipa_2_rm']] # depends on [control=['if'], data=[]]
if self.ip_address_2_add is not None:
for ipa_2_add in self.ip_address_2_add:
if ipa_2_add.id is None:
ipa_2_add.save() # depends on [control=['if'], data=[]]
consolidated_ipa_id.append(ipa_2_add.id) # depends on [control=['for'], data=['ipa_2_add']] # depends on [control=['if'], data=[]]
post_payload['osInstanceIPAddressesID'] = consolidated_ipa_id
if self.nic_ids is not None:
consolidated_nic_id = copy.deepcopy(self.nic_ids) # depends on [control=['if'], data=[]]
if self.nic_2_rm is not None:
for nic_2_rm in self.nic_2_rm:
if nic_2_rm.id is None:
nic_2_rm.sync() # depends on [control=['if'], data=[]]
consolidated_nic_id.remove(nic_2_rm.id) # depends on [control=['for'], data=['nic_2_rm']] # depends on [control=['if'], data=[]]
if self.nic_2_add is not None:
for nic_2_add in self.nic_2_add:
if nic_2_add.id is None:
nic_2_add.save() # depends on [control=['if'], data=[]]
consolidated_nic_id.append(nic_2_add.id) # depends on [control=['for'], data=['nic_2_add']] # depends on [control=['if'], data=[]]
post_payload['osInstanceNICsID'] = consolidated_nic_id
if self.subnet_ids is not None:
consolidated_snet_id = copy.deepcopy(self.subnet_ids) # depends on [control=['if'], data=[]]
if self.subnets_2_rm is not None:
for snet_2_rm in self.subnets_2_rm:
if snet_2_rm.id is None:
snet_2_rm.sync() # depends on [control=['if'], data=[]]
consolidated_snet_id.remove(snet_2_rm.id) # depends on [control=['for'], data=['snet_2_rm']] # depends on [control=['if'], data=[]]
if self.subnets_2_add is not None:
for snet_2_add in self.subnets_2_add:
if snet_2_add.id is None:
snet_2_add.save() # depends on [control=['if'], data=[]]
consolidated_snet_id.append(snet_2_add.id) # depends on [control=['for'], data=['snet_2_add']] # depends on [control=['if'], data=[]]
post_payload['osInstanceSubnetsID'] = consolidated_snet_id
if self.application_ids is not None:
consolidated_app_id = copy.deepcopy(self.application_ids) # depends on [control=['if'], data=[]]
if self.application_2_rm is not None:
for app_2_rm in self.application_2_rm:
if app_2_rm.id is None:
app_2_rm.sync() # depends on [control=['if'], data=[]]
consolidated_app_id.remove(app_2_rm.id) # depends on [control=['for'], data=['app_2_rm']] # depends on [control=['if'], data=[]]
if self.application_2_add is not None:
for app_2_add in self.application_2_add:
if app_2_add.id is None:
app_2_add.save() # depends on [control=['if'], data=[]]
consolidated_app_id.append(app_2_add.id) # depends on [control=['for'], data=['app_2_add']] # depends on [control=['if'], data=[]]
post_payload['osInstanceApplicationsID'] = consolidated_app_id
if self.environment_ids is not None:
consolidated_env_id = copy.deepcopy(self.environment_ids) # depends on [control=['if'], data=[]]
if self.environment_2_rm is not None:
for env_2_rm in self.environment_2_rm:
if env_2_rm.id is None:
env_2_rm.sync() # depends on [control=['if'], data=[]]
consolidated_env_id.remove(env_2_rm.id) # depends on [control=['for'], data=['env_2_rm']] # depends on [control=['if'], data=[]]
if self.environment_2_add is not None:
for env_2_add in self.environment_2_add:
if env_2_add.id is None:
env_2_add.save() # depends on [control=['if'], data=[]]
consolidated_env_id.append(env_2_add.id) # depends on [control=['for'], data=['env_2_add']] # depends on [control=['if'], data=[]]
post_payload['osInstanceEnvironmentsID'] = consolidated_env_id
if self.team_ids is not None:
consolidated_team_id = copy.deepcopy(self.team_ids) # depends on [control=['if'], data=[]]
if self.team_2_rm is not None:
for team_2_rm in self.team_2_rm:
if team_2_rm.id is None:
team_2_rm.sync() # depends on [control=['if'], data=[]]
consolidated_team_id.remove(team_2_rm.id) # depends on [control=['for'], data=['team_2_rm']] # depends on [control=['if'], data=[]]
if self.team_2_add is not None:
for team_2_add in self.team_2_add:
if team_2_add.id is None:
team_2_add.save() # depends on [control=['if'], data=[]]
consolidated_team_id.append(team_2_add.id) # depends on [control=['for'], data=['team_2_add']] # depends on [control=['if'], data=[]]
post_payload['osInstanceTeamsID'] = consolidated_team_id
args = {'http_operation': 'POST', 'operation_path': '', 'parameters': {'payload': json.dumps(post_payload)}}
response = OSInstanceService.requester.call(args)
if response.rc != 0:
LOGGER.warning('OSInstance.save - Problem while saving OS instance ' + self.name + '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + ' (' + str(response.rc) + ')') # depends on [control=['if'], data=[]]
else:
self.id = response.response_content['osInstanceID']
if self.embedded_osi_2_add is not None:
for osi_2_add in self.embedded_osi_2_add:
osi_2_add.sync() # depends on [control=['for'], data=['osi_2_add']] # depends on [control=['if'], data=[]]
if self.embedded_osi_2_rm is not None:
for osi_2_rm in self.embedded_osi_2_rm:
osi_2_rm.sync() # depends on [control=['for'], data=['osi_2_rm']] # depends on [control=['if'], data=[]]
if self.ip_address_2_add is not None:
for ipa_2_add in self.ip_address_2_add:
ipa_2_add.sync() # depends on [control=['for'], data=['ipa_2_add']] # depends on [control=['if'], data=[]]
if self.ip_address_2_rm is not None:
for ipa_2_rm in self.ip_address_2_rm:
ipa_2_rm.sync() # depends on [control=['for'], data=['ipa_2_rm']] # depends on [control=['if'], data=[]]
if self.nic_2_add is not None:
for nic_2_add in self.nic_2_add:
nic_2_add.sync() # depends on [control=['for'], data=['nic_2_add']] # depends on [control=['if'], data=[]]
if self.nic_2_rm is not None:
for nic_2_rm in self.nic_2_rm:
nic_2_rm.sync() # depends on [control=['for'], data=['nic_2_rm']] # depends on [control=['if'], data=[]]
if self.subnets_2_add is not None:
for snet_2_add in self.subnets_2_add:
snet_2_add.sync() # depends on [control=['for'], data=['snet_2_add']] # depends on [control=['if'], data=[]]
if self.subnets_2_rm is not None:
for snet_2_rm in self.subnets_2_rm:
snet_2_rm.sync() # depends on [control=['for'], data=['snet_2_rm']] # depends on [control=['if'], data=[]]
if self.application_2_add is not None:
for app_2_add in self.application_2_add:
app_2_add.sync() # depends on [control=['for'], data=['app_2_add']] # depends on [control=['if'], data=[]]
if self.application_2_rm is not None:
for app_2_rm in self.application_2_rm:
app_2_rm.sync() # depends on [control=['for'], data=['app_2_rm']] # depends on [control=['if'], data=[]]
if self.environment_2_add is not None:
for env_2_add in self.environment_2_add:
env_2_add.sync() # depends on [control=['for'], data=['env_2_add']] # depends on [control=['if'], data=[]]
if self.environment_2_rm is not None:
for env_2_rm in self.environment_2_rm:
env_2_rm.sync() # depends on [control=['for'], data=['env_2_rm']] # depends on [control=['if'], data=[]]
if self.team_2_add is not None:
for team_2_add in self.team_2_add:
team_2_add.sync() # depends on [control=['for'], data=['team_2_add']] # depends on [control=['if'], data=[]]
if self.team_2_rm is not None:
for team_2_rm in self.team_2_rm:
team_2_rm.sync() # depends on [control=['for'], data=['team_2_rm']] # depends on [control=['if'], data=[]]
self.embedded_osi_2_add.clear()
self.embedded_osi_2_rm.clear()
self.ip_address_2_add.clear()
self.ip_address_2_rm.clear()
self.nic_2_add.clear()
self.nic_2_rm.clear()
self.subnets_2_add.clear()
self.subnets_2_rm.clear()
self.application_2_add.clear()
self.application_2_rm.clear()
self.environment_2_add.clear()
self.environment_2_rm.clear()
self.team_2_add.clear()
self.team_2_rm.clear()
self.sync()
return self
|
def write_ch (self, ch):
'''This puts a character at the current cursor position. The cursor
position is moved forward with wrap-around, but no scrolling is done if
the cursor hits the lower-right corner of the screen. '''
if isinstance(ch, bytes):
ch = self._decode(ch)
#\r and \n both produce a call to cr() and lf(), respectively.
ch = ch[0]
if ch == u'\r':
self.cr()
return
if ch == u'\n':
self.crlf()
return
if ch == chr(screen.BS):
self.cursor_back()
return
self.put_abs(self.cur_r, self.cur_c, ch)
old_r = self.cur_r
old_c = self.cur_c
self.cursor_forward()
if old_c == self.cur_c:
self.cursor_down()
if old_r != self.cur_r:
self.cursor_home (self.cur_r, 1)
else:
self.scroll_up ()
self.cursor_home (self.cur_r, 1)
self.erase_line()
|
def function[write_ch, parameter[self, ch]]:
constant[This puts a character at the current cursor position. The cursor
position is moved forward with wrap-around, but no scrolling is done if
the cursor hits the lower-right corner of the screen. ]
if call[name[isinstance], parameter[name[ch], name[bytes]]] begin[:]
variable[ch] assign[=] call[name[self]._decode, parameter[name[ch]]]
variable[ch] assign[=] call[name[ch]][constant[0]]
if compare[name[ch] equal[==] constant[
]] begin[:]
call[name[self].cr, parameter[]]
return[None]
if compare[name[ch] equal[==] constant[
]] begin[:]
call[name[self].crlf, parameter[]]
return[None]
if compare[name[ch] equal[==] call[name[chr], parameter[name[screen].BS]]] begin[:]
call[name[self].cursor_back, parameter[]]
return[None]
call[name[self].put_abs, parameter[name[self].cur_r, name[self].cur_c, name[ch]]]
variable[old_r] assign[=] name[self].cur_r
variable[old_c] assign[=] name[self].cur_c
call[name[self].cursor_forward, parameter[]]
if compare[name[old_c] equal[==] name[self].cur_c] begin[:]
call[name[self].cursor_down, parameter[]]
if compare[name[old_r] not_equal[!=] name[self].cur_r] begin[:]
call[name[self].cursor_home, parameter[name[self].cur_r, constant[1]]]
|
keyword[def] identifier[write_ch] ( identifier[self] , identifier[ch] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[ch] , identifier[bytes] ):
identifier[ch] = identifier[self] . identifier[_decode] ( identifier[ch] )
identifier[ch] = identifier[ch] [ literal[int] ]
keyword[if] identifier[ch] == literal[string] :
identifier[self] . identifier[cr] ()
keyword[return]
keyword[if] identifier[ch] == literal[string] :
identifier[self] . identifier[crlf] ()
keyword[return]
keyword[if] identifier[ch] == identifier[chr] ( identifier[screen] . identifier[BS] ):
identifier[self] . identifier[cursor_back] ()
keyword[return]
identifier[self] . identifier[put_abs] ( identifier[self] . identifier[cur_r] , identifier[self] . identifier[cur_c] , identifier[ch] )
identifier[old_r] = identifier[self] . identifier[cur_r]
identifier[old_c] = identifier[self] . identifier[cur_c]
identifier[self] . identifier[cursor_forward] ()
keyword[if] identifier[old_c] == identifier[self] . identifier[cur_c] :
identifier[self] . identifier[cursor_down] ()
keyword[if] identifier[old_r] != identifier[self] . identifier[cur_r] :
identifier[self] . identifier[cursor_home] ( identifier[self] . identifier[cur_r] , literal[int] )
keyword[else] :
identifier[self] . identifier[scroll_up] ()
identifier[self] . identifier[cursor_home] ( identifier[self] . identifier[cur_r] , literal[int] )
identifier[self] . identifier[erase_line] ()
|
def write_ch(self, ch):
"""This puts a character at the current cursor position. The cursor
position is moved forward with wrap-around, but no scrolling is done if
the cursor hits the lower-right corner of the screen. """
if isinstance(ch, bytes):
ch = self._decode(ch) # depends on [control=['if'], data=[]]
#\r and \n both produce a call to cr() and lf(), respectively.
ch = ch[0]
if ch == u'\r':
self.cr()
return # depends on [control=['if'], data=[]]
if ch == u'\n':
self.crlf()
return # depends on [control=['if'], data=[]]
if ch == chr(screen.BS):
self.cursor_back()
return # depends on [control=['if'], data=[]]
self.put_abs(self.cur_r, self.cur_c, ch)
old_r = self.cur_r
old_c = self.cur_c
self.cursor_forward()
if old_c == self.cur_c:
self.cursor_down()
if old_r != self.cur_r:
self.cursor_home(self.cur_r, 1) # depends on [control=['if'], data=[]]
else:
self.scroll_up()
self.cursor_home(self.cur_r, 1)
self.erase_line() # depends on [control=['if'], data=[]]
|
def search_metrics(self, *args, **kwargs):
"""
Args:
query (string): elasticsearch string query
order_by (optional[string]): property by which to order results
offset (optional[int]): number of results to skip for pagination
(default=0)
limit (optional[int]): how many results to return (default=50)
timeout (optional[int]): how long to wait for response (in seconds)
Returns:
result of query search on metrics
"""
return self._search_metrics_and_metadata(
self._METRIC_ENDPOINT_SUFFIX, *args, **kwargs)
|
def function[search_metrics, parameter[self]]:
constant[
Args:
query (string): elasticsearch string query
order_by (optional[string]): property by which to order results
offset (optional[int]): number of results to skip for pagination
(default=0)
limit (optional[int]): how many results to return (default=50)
timeout (optional[int]): how long to wait for response (in seconds)
Returns:
result of query search on metrics
]
return[call[name[self]._search_metrics_and_metadata, parameter[name[self]._METRIC_ENDPOINT_SUFFIX, <ast.Starred object at 0x7da1b049a500>]]]
|
keyword[def] identifier[search_metrics] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_search_metrics_and_metadata] (
identifier[self] . identifier[_METRIC_ENDPOINT_SUFFIX] ,* identifier[args] ,** identifier[kwargs] )
|
def search_metrics(self, *args, **kwargs):
"""
Args:
query (string): elasticsearch string query
order_by (optional[string]): property by which to order results
offset (optional[int]): number of results to skip for pagination
(default=0)
limit (optional[int]): how many results to return (default=50)
timeout (optional[int]): how long to wait for response (in seconds)
Returns:
result of query search on metrics
"""
return self._search_metrics_and_metadata(self._METRIC_ENDPOINT_SUFFIX, *args, **kwargs)
|
def ephemeral(cls):
"""
Creates a new ephemeral key constructed using a raw 32-byte string from urandom.
Ephemeral keys are used once for each encryption task and are then discarded;
they are not intended for long-term or repeat use.
"""
private_key = nacl.public.PrivateKey(os.urandom(32))
return cls(private_key.public_key, private_key)
|
def function[ephemeral, parameter[cls]]:
constant[
Creates a new ephemeral key constructed using a raw 32-byte string from urandom.
Ephemeral keys are used once for each encryption task and are then discarded;
they are not intended for long-term or repeat use.
]
variable[private_key] assign[=] call[name[nacl].public.PrivateKey, parameter[call[name[os].urandom, parameter[constant[32]]]]]
return[call[name[cls], parameter[name[private_key].public_key, name[private_key]]]]
|
keyword[def] identifier[ephemeral] ( identifier[cls] ):
literal[string]
identifier[private_key] = identifier[nacl] . identifier[public] . identifier[PrivateKey] ( identifier[os] . identifier[urandom] ( literal[int] ))
keyword[return] identifier[cls] ( identifier[private_key] . identifier[public_key] , identifier[private_key] )
|
def ephemeral(cls):
"""
Creates a new ephemeral key constructed using a raw 32-byte string from urandom.
Ephemeral keys are used once for each encryption task and are then discarded;
they are not intended for long-term or repeat use.
"""
private_key = nacl.public.PrivateKey(os.urandom(32))
return cls(private_key.public_key, private_key)
|
def make_d2p_id(self):
"""
Make an association id for phenotypic associations with disease
that is defined by:
source of association + disease + relationship + phenotype
+ onset + frequency
:return:
"""
attributes = [self.onset, self.frequency]
assoc_id = self.make_association_id(
self.definedby, self.disease_id, self.rel, self.phenotype_id, attributes)
return assoc_id
|
def function[make_d2p_id, parameter[self]]:
constant[
Make an association id for phenotypic associations with disease
that is defined by:
source of association + disease + relationship + phenotype
+ onset + frequency
:return:
]
variable[attributes] assign[=] list[[<ast.Attribute object at 0x7da18eb57bb0>, <ast.Attribute object at 0x7da18eb54be0>]]
variable[assoc_id] assign[=] call[name[self].make_association_id, parameter[name[self].definedby, name[self].disease_id, name[self].rel, name[self].phenotype_id, name[attributes]]]
return[name[assoc_id]]
|
keyword[def] identifier[make_d2p_id] ( identifier[self] ):
literal[string]
identifier[attributes] =[ identifier[self] . identifier[onset] , identifier[self] . identifier[frequency] ]
identifier[assoc_id] = identifier[self] . identifier[make_association_id] (
identifier[self] . identifier[definedby] , identifier[self] . identifier[disease_id] , identifier[self] . identifier[rel] , identifier[self] . identifier[phenotype_id] , identifier[attributes] )
keyword[return] identifier[assoc_id]
|
def make_d2p_id(self):
"""
Make an association id for phenotypic associations with disease
that is defined by:
source of association + disease + relationship + phenotype
+ onset + frequency
:return:
"""
attributes = [self.onset, self.frequency]
assoc_id = self.make_association_id(self.definedby, self.disease_id, self.rel, self.phenotype_id, attributes)
return assoc_id
|
def mouse(table, day=None):
"""Handler for showing mouse statistics for specified type and day."""
where = (("day", day),) if day else ()
events = db.fetch(table, where=where, order="day")
for e in events: e["dt"] = datetime.datetime.fromtimestamp(e["stamp"])
stats, positions, events = stats_mouse(events, table)
days, input = db.fetch("counts", order="day", type=table), "mouse"
return bottle.template("heatmap.tpl", locals(), conf=conf)
|
def function[mouse, parameter[table, day]]:
constant[Handler for showing mouse statistics for specified type and day.]
variable[where] assign[=] <ast.IfExp object at 0x7da20e956050>
variable[events] assign[=] call[name[db].fetch, parameter[name[table]]]
for taget[name[e]] in starred[name[events]] begin[:]
call[name[e]][constant[dt]] assign[=] call[name[datetime].datetime.fromtimestamp, parameter[call[name[e]][constant[stamp]]]]
<ast.Tuple object at 0x7da20e955ae0> assign[=] call[name[stats_mouse], parameter[name[events], name[table]]]
<ast.Tuple object at 0x7da18fe90cd0> assign[=] tuple[[<ast.Call object at 0x7da18fe90040>, <ast.Constant object at 0x7da18fe931c0>]]
return[call[name[bottle].template, parameter[constant[heatmap.tpl], call[name[locals], parameter[]]]]]
|
keyword[def] identifier[mouse] ( identifier[table] , identifier[day] = keyword[None] ):
literal[string]
identifier[where] =(( literal[string] , identifier[day] ),) keyword[if] identifier[day] keyword[else] ()
identifier[events] = identifier[db] . identifier[fetch] ( identifier[table] , identifier[where] = identifier[where] , identifier[order] = literal[string] )
keyword[for] identifier[e] keyword[in] identifier[events] : identifier[e] [ literal[string] ]= identifier[datetime] . identifier[datetime] . identifier[fromtimestamp] ( identifier[e] [ literal[string] ])
identifier[stats] , identifier[positions] , identifier[events] = identifier[stats_mouse] ( identifier[events] , identifier[table] )
identifier[days] , identifier[input] = identifier[db] . identifier[fetch] ( literal[string] , identifier[order] = literal[string] , identifier[type] = identifier[table] ), literal[string]
keyword[return] identifier[bottle] . identifier[template] ( literal[string] , identifier[locals] (), identifier[conf] = identifier[conf] )
|
def mouse(table, day=None):
"""Handler for showing mouse statistics for specified type and day."""
where = (('day', day),) if day else ()
events = db.fetch(table, where=where, order='day')
for e in events:
e['dt'] = datetime.datetime.fromtimestamp(e['stamp']) # depends on [control=['for'], data=['e']]
(stats, positions, events) = stats_mouse(events, table)
(days, input) = (db.fetch('counts', order='day', type=table), 'mouse')
return bottle.template('heatmap.tpl', locals(), conf=conf)
|
def get_cds_ranges_for_transcript(self, transcript_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/overlap/id/{}?feature=cds".format(transcript_id)
r = self.ensembl_request(ext, headers)
cds_ranges = []
for cds_range in json.loads(r):
if cds_range["Parent"] != transcript_id:
continue
start = cds_range["start"]
end = cds_range["end"]
cds_ranges.append((start, end))
return cds_ranges
|
def function[get_cds_ranges_for_transcript, parameter[self, transcript_id]]:
constant[ obtain the sequence for a transcript from ensembl
]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b1a1c220>], [<ast.Constant object at 0x7da1b1a1f730>]]
name[self].attempt assign[=] constant[0]
variable[ext] assign[=] call[constant[/overlap/id/{}?feature=cds].format, parameter[name[transcript_id]]]
variable[r] assign[=] call[name[self].ensembl_request, parameter[name[ext], name[headers]]]
variable[cds_ranges] assign[=] list[[]]
for taget[name[cds_range]] in starred[call[name[json].loads, parameter[name[r]]]] begin[:]
if compare[call[name[cds_range]][constant[Parent]] not_equal[!=] name[transcript_id]] begin[:]
continue
variable[start] assign[=] call[name[cds_range]][constant[start]]
variable[end] assign[=] call[name[cds_range]][constant[end]]
call[name[cds_ranges].append, parameter[tuple[[<ast.Name object at 0x7da20c6e7c10>, <ast.Name object at 0x7da20c6e58d0>]]]]
return[name[cds_ranges]]
|
keyword[def] identifier[get_cds_ranges_for_transcript] ( identifier[self] , identifier[transcript_id] ):
literal[string]
identifier[headers] ={ literal[string] : literal[string] }
identifier[self] . identifier[attempt] = literal[int]
identifier[ext] = literal[string] . identifier[format] ( identifier[transcript_id] )
identifier[r] = identifier[self] . identifier[ensembl_request] ( identifier[ext] , identifier[headers] )
identifier[cds_ranges] =[]
keyword[for] identifier[cds_range] keyword[in] identifier[json] . identifier[loads] ( identifier[r] ):
keyword[if] identifier[cds_range] [ literal[string] ]!= identifier[transcript_id] :
keyword[continue]
identifier[start] = identifier[cds_range] [ literal[string] ]
identifier[end] = identifier[cds_range] [ literal[string] ]
identifier[cds_ranges] . identifier[append] (( identifier[start] , identifier[end] ))
keyword[return] identifier[cds_ranges]
|
def get_cds_ranges_for_transcript(self, transcript_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {'content-type': 'application/json'}
self.attempt = 0
ext = '/overlap/id/{}?feature=cds'.format(transcript_id)
r = self.ensembl_request(ext, headers)
cds_ranges = []
for cds_range in json.loads(r):
if cds_range['Parent'] != transcript_id:
continue # depends on [control=['if'], data=[]]
start = cds_range['start']
end = cds_range['end']
cds_ranges.append((start, end)) # depends on [control=['for'], data=['cds_range']]
return cds_ranges
|
def get_state_for_transition(self, transition):
"""Calculate the target state of a transition
:param transition: The transition of which the target state is determined
:return: the to-state of the transition
:raises exceptions.TypeError: if the transition parameter is of wrong type
"""
if not isinstance(transition, Transition):
raise TypeError("transition must be of type Transition")
# the to_state is None when the transition connects an outcome of a child state to the outcome of a parent state
if transition.to_state == self.state_id or transition.to_state is None:
return self
else:
return self.states[transition.to_state]
|
def function[get_state_for_transition, parameter[self, transition]]:
constant[Calculate the target state of a transition
:param transition: The transition of which the target state is determined
:return: the to-state of the transition
:raises exceptions.TypeError: if the transition parameter is of wrong type
]
if <ast.UnaryOp object at 0x7da1b1a2a260> begin[:]
<ast.Raise object at 0x7da1b1a2b2e0>
if <ast.BoolOp object at 0x7da1b1a2a050> begin[:]
return[name[self]]
|
keyword[def] identifier[get_state_for_transition] ( identifier[self] , identifier[transition] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[transition] , identifier[Transition] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[transition] . identifier[to_state] == identifier[self] . identifier[state_id] keyword[or] identifier[transition] . identifier[to_state] keyword[is] keyword[None] :
keyword[return] identifier[self]
keyword[else] :
keyword[return] identifier[self] . identifier[states] [ identifier[transition] . identifier[to_state] ]
|
def get_state_for_transition(self, transition):
"""Calculate the target state of a transition
:param transition: The transition of which the target state is determined
:return: the to-state of the transition
:raises exceptions.TypeError: if the transition parameter is of wrong type
"""
if not isinstance(transition, Transition):
raise TypeError('transition must be of type Transition') # depends on [control=['if'], data=[]]
# the to_state is None when the transition connects an outcome of a child state to the outcome of a parent state
if transition.to_state == self.state_id or transition.to_state is None:
return self # depends on [control=['if'], data=[]]
else:
return self.states[transition.to_state]
|
def featured_playlists(self, locale=None, country=None, timestamp=None, limit=20, offset=0):
"""Get a list of Spotify featured playlists.
Parameters
----------
locale : LOCALE_TP
LOCALE
country : COUNTRY_TP
COUNTRY
timestamp : TIMESTAMP_TP
TIMESTAMP
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
"""
route = Route('GET', '/browse/featured-playlists')
payload = {'limit': limit, 'offset': offset}
if country:
payload['country'] = country
if locale:
payload['locale'] = locale
if timestamp:
payload['timestamp'] = timestamp
return self.request(route, params=payload)
|
def function[featured_playlists, parameter[self, locale, country, timestamp, limit, offset]]:
constant[Get a list of Spotify featured playlists.
Parameters
----------
locale : LOCALE_TP
LOCALE
country : COUNTRY_TP
COUNTRY
timestamp : TIMESTAMP_TP
TIMESTAMP
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
]
variable[route] assign[=] call[name[Route], parameter[constant[GET], constant[/browse/featured-playlists]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e5d50>, <ast.Constant object at 0x7da20c6e4a90>], [<ast.Name object at 0x7da20c6e6da0>, <ast.Name object at 0x7da20c6e7850>]]
if name[country] begin[:]
call[name[payload]][constant[country]] assign[=] name[country]
if name[locale] begin[:]
call[name[payload]][constant[locale]] assign[=] name[locale]
if name[timestamp] begin[:]
call[name[payload]][constant[timestamp]] assign[=] name[timestamp]
return[call[name[self].request, parameter[name[route]]]]
|
keyword[def] identifier[featured_playlists] ( identifier[self] , identifier[locale] = keyword[None] , identifier[country] = keyword[None] , identifier[timestamp] = keyword[None] , identifier[limit] = literal[int] , identifier[offset] = literal[int] ):
literal[string]
identifier[route] = identifier[Route] ( literal[string] , literal[string] )
identifier[payload] ={ literal[string] : identifier[limit] , literal[string] : identifier[offset] }
keyword[if] identifier[country] :
identifier[payload] [ literal[string] ]= identifier[country]
keyword[if] identifier[locale] :
identifier[payload] [ literal[string] ]= identifier[locale]
keyword[if] identifier[timestamp] :
identifier[payload] [ literal[string] ]= identifier[timestamp]
keyword[return] identifier[self] . identifier[request] ( identifier[route] , identifier[params] = identifier[payload] )
|
def featured_playlists(self, locale=None, country=None, timestamp=None, limit=20, offset=0):
"""Get a list of Spotify featured playlists.
Parameters
----------
locale : LOCALE_TP
LOCALE
country : COUNTRY_TP
COUNTRY
timestamp : TIMESTAMP_TP
TIMESTAMP
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
"""
route = Route('GET', '/browse/featured-playlists')
payload = {'limit': limit, 'offset': offset}
if country:
payload['country'] = country # depends on [control=['if'], data=[]]
if locale:
payload['locale'] = locale # depends on [control=['if'], data=[]]
if timestamp:
payload['timestamp'] = timestamp # depends on [control=['if'], data=[]]
return self.request(route, params=payload)
|
def get_list(medium, user, credentials):
"""Returns a MediumList (Anime or Manga depends on [medium]) of user.
If user is not given, the username is taken from the initialized auth
credentials.
:param medium Anime or manga (tokens.Medium.Anime or tokens.Medium.Manga)
:param user The user whose list should be grabbed. May use credentials[0].
"""
helpers.check_creds(credentials, header)
list_url = helpers.get_list_url(medium, user)
#for some reason, we don't need auth.
list_resp = requests.get(list_url, headers=header)
if constants.TOO_MANY_REQUESTS in list_resp.text:
return helpers.reschedule(get_list, constants.DEFAULT_WAIT_SECS, medium,
user)
list_soup = BeautifulSoup(list_resp.text, 'lxml')
return objects.MediumList(medium, list_soup)
|
def function[get_list, parameter[medium, user, credentials]]:
constant[Returns a MediumList (Anime or Manga depends on [medium]) of user.
If user is not given, the username is taken from the initialized auth
credentials.
:param medium Anime or manga (tokens.Medium.Anime or tokens.Medium.Manga)
:param user The user whose list should be grabbed. May use credentials[0].
]
call[name[helpers].check_creds, parameter[name[credentials], name[header]]]
variable[list_url] assign[=] call[name[helpers].get_list_url, parameter[name[medium], name[user]]]
variable[list_resp] assign[=] call[name[requests].get, parameter[name[list_url]]]
if compare[name[constants].TOO_MANY_REQUESTS in name[list_resp].text] begin[:]
return[call[name[helpers].reschedule, parameter[name[get_list], name[constants].DEFAULT_WAIT_SECS, name[medium], name[user]]]]
variable[list_soup] assign[=] call[name[BeautifulSoup], parameter[name[list_resp].text, constant[lxml]]]
return[call[name[objects].MediumList, parameter[name[medium], name[list_soup]]]]
|
keyword[def] identifier[get_list] ( identifier[medium] , identifier[user] , identifier[credentials] ):
literal[string]
identifier[helpers] . identifier[check_creds] ( identifier[credentials] , identifier[header] )
identifier[list_url] = identifier[helpers] . identifier[get_list_url] ( identifier[medium] , identifier[user] )
identifier[list_resp] = identifier[requests] . identifier[get] ( identifier[list_url] , identifier[headers] = identifier[header] )
keyword[if] identifier[constants] . identifier[TOO_MANY_REQUESTS] keyword[in] identifier[list_resp] . identifier[text] :
keyword[return] identifier[helpers] . identifier[reschedule] ( identifier[get_list] , identifier[constants] . identifier[DEFAULT_WAIT_SECS] , identifier[medium] ,
identifier[user] )
identifier[list_soup] = identifier[BeautifulSoup] ( identifier[list_resp] . identifier[text] , literal[string] )
keyword[return] identifier[objects] . identifier[MediumList] ( identifier[medium] , identifier[list_soup] )
|
def get_list(medium, user, credentials):
"""Returns a MediumList (Anime or Manga depends on [medium]) of user.
If user is not given, the username is taken from the initialized auth
credentials.
:param medium Anime or manga (tokens.Medium.Anime or tokens.Medium.Manga)
:param user The user whose list should be grabbed. May use credentials[0].
"""
helpers.check_creds(credentials, header)
list_url = helpers.get_list_url(medium, user)
#for some reason, we don't need auth.
list_resp = requests.get(list_url, headers=header)
if constants.TOO_MANY_REQUESTS in list_resp.text:
return helpers.reschedule(get_list, constants.DEFAULT_WAIT_SECS, medium, user) # depends on [control=['if'], data=[]]
list_soup = BeautifulSoup(list_resp.text, 'lxml')
return objects.MediumList(medium, list_soup)
|
def add_shared_configs(p, base_dir=''):
"""Add configargparser/argparse configs for shared argument.
Arguments:
p - configargparse.ArgParser object
base_dir - base directory for file/path defaults.
"""
p.add('--host', default='localhost',
help="Service host")
p.add('--port', '-p', type=int, default=8000,
help="Service port")
p.add('--app-host', default=None,
help="Local application host for reverse proxy deployment, "
"as opposed to service --host (must also specify --app-port)")
p.add('--app-port', type=int, default=None,
help="Local application port for reverse proxy deployment, "
"as opposed to service --port (must also specify --app-host)")
p.add('--image-dir', '-d', default=os.path.join(base_dir, 'testimages'),
help="Image file directory")
p.add('--generator-dir', default=os.path.join(base_dir, 'iiif/generators'),
help="Generator directory for manipulator='gen'")
p.add('--tile-height', type=int, default=512,
help="Tile height")
p.add('--tile-width', type=int, default=512,
help="Tile width")
p.add('--gauth-client-secret', default=os.path.join(base_dir, 'client_secret.json'),
help="Name of file with Google auth client secret")
p.add('--include-osd', action='store_true',
help="Include a page with OpenSeadragon for each source")
p.add('--access-cookie-lifetime', type=int, default=3600,
help="Set access cookie lifetime for authenticated access in seconds")
p.add('--access-token-lifetime', type=int, default=10,
help="Set access token lifetime for authenticated access in seconds")
p.add('--config', is_config_file=True, default=None,
help='Read config from given file path')
p.add('--debug', action='store_true',
help="Set debug mode for web application. INSECURE!")
p.add('--verbose', '-v', action='store_true',
help="Be verbose")
p.add('--quiet', '-q', action='store_true',
help="Minimal output only")
|
def function[add_shared_configs, parameter[p, base_dir]]:
constant[Add configargparser/argparse configs for shared argument.
Arguments:
p - configargparse.ArgParser object
base_dir - base directory for file/path defaults.
]
call[name[p].add, parameter[constant[--host]]]
call[name[p].add, parameter[constant[--port], constant[-p]]]
call[name[p].add, parameter[constant[--app-host]]]
call[name[p].add, parameter[constant[--app-port]]]
call[name[p].add, parameter[constant[--image-dir], constant[-d]]]
call[name[p].add, parameter[constant[--generator-dir]]]
call[name[p].add, parameter[constant[--tile-height]]]
call[name[p].add, parameter[constant[--tile-width]]]
call[name[p].add, parameter[constant[--gauth-client-secret]]]
call[name[p].add, parameter[constant[--include-osd]]]
call[name[p].add, parameter[constant[--access-cookie-lifetime]]]
call[name[p].add, parameter[constant[--access-token-lifetime]]]
call[name[p].add, parameter[constant[--config]]]
call[name[p].add, parameter[constant[--debug]]]
call[name[p].add, parameter[constant[--verbose], constant[-v]]]
call[name[p].add, parameter[constant[--quiet], constant[-q]]]
|
keyword[def] identifier[add_shared_configs] ( identifier[p] , identifier[base_dir] = literal[string] ):
literal[string]
identifier[p] . identifier[add] ( literal[string] , identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string]
literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = keyword[None] ,
identifier[help] = literal[string]
literal[string] )
identifier[p] . identifier[add] ( literal[string] , literal[string] , identifier[default] = identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , literal[string] ),
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[default] = identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , literal[string] ),
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[default] = identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , literal[string] ),
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[is_config_file] = keyword[True] , identifier[default] = keyword[None] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
|
def add_shared_configs(p, base_dir=''):
"""Add configargparser/argparse configs for shared argument.
Arguments:
p - configargparse.ArgParser object
base_dir - base directory for file/path defaults.
"""
p.add('--host', default='localhost', help='Service host')
p.add('--port', '-p', type=int, default=8000, help='Service port')
p.add('--app-host', default=None, help='Local application host for reverse proxy deployment, as opposed to service --host (must also specify --app-port)')
p.add('--app-port', type=int, default=None, help='Local application port for reverse proxy deployment, as opposed to service --port (must also specify --app-host)')
p.add('--image-dir', '-d', default=os.path.join(base_dir, 'testimages'), help='Image file directory')
p.add('--generator-dir', default=os.path.join(base_dir, 'iiif/generators'), help="Generator directory for manipulator='gen'")
p.add('--tile-height', type=int, default=512, help='Tile height')
p.add('--tile-width', type=int, default=512, help='Tile width')
p.add('--gauth-client-secret', default=os.path.join(base_dir, 'client_secret.json'), help='Name of file with Google auth client secret')
p.add('--include-osd', action='store_true', help='Include a page with OpenSeadragon for each source')
p.add('--access-cookie-lifetime', type=int, default=3600, help='Set access cookie lifetime for authenticated access in seconds')
p.add('--access-token-lifetime', type=int, default=10, help='Set access token lifetime for authenticated access in seconds')
p.add('--config', is_config_file=True, default=None, help='Read config from given file path')
p.add('--debug', action='store_true', help='Set debug mode for web application. INSECURE!')
p.add('--verbose', '-v', action='store_true', help='Be verbose')
p.add('--quiet', '-q', action='store_true', help='Minimal output only')
|
def is_equal(self, other):
"""
Computes whether two Partial Orderings contain the same information
"""
if not (hasattr(other, 'get_domain') or hasattr(other, 'upper') or hasattr(other, 'lower')):
other = self.coerce(other)
if self.is_domain_equal(other) \
and len(self.upper.symmetric_difference(other.upper)) == 0 \
and len(self.lower.symmetric_difference(other.lower)) == 0:
return True
return False
|
def function[is_equal, parameter[self, other]]:
constant[
Computes whether two Partial Orderings contain the same information
]
if <ast.UnaryOp object at 0x7da204345cf0> begin[:]
variable[other] assign[=] call[name[self].coerce, parameter[name[other]]]
if <ast.BoolOp object at 0x7da204346740> begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[is_equal] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] keyword[not] ( identifier[hasattr] ( identifier[other] , literal[string] ) keyword[or] identifier[hasattr] ( identifier[other] , literal[string] ) keyword[or] identifier[hasattr] ( identifier[other] , literal[string] )):
identifier[other] = identifier[self] . identifier[coerce] ( identifier[other] )
keyword[if] identifier[self] . identifier[is_domain_equal] ( identifier[other] ) keyword[and] identifier[len] ( identifier[self] . identifier[upper] . identifier[symmetric_difference] ( identifier[other] . identifier[upper] ))== literal[int] keyword[and] identifier[len] ( identifier[self] . identifier[lower] . identifier[symmetric_difference] ( identifier[other] . identifier[lower] ))== literal[int] :
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def is_equal(self, other):
"""
Computes whether two Partial Orderings contain the same information
"""
if not (hasattr(other, 'get_domain') or hasattr(other, 'upper') or hasattr(other, 'lower')):
other = self.coerce(other) # depends on [control=['if'], data=[]]
if self.is_domain_equal(other) and len(self.upper.symmetric_difference(other.upper)) == 0 and (len(self.lower.symmetric_difference(other.lower)) == 0):
return True # depends on [control=['if'], data=[]]
return False
|
def wait_until_queue_empty(self, channels, report=True, clear_end=True):
"""
Waits until all queues of channels are empty.
"""
state = {'message': ''}
self.logger.debug("wait_until_queue_empty: report=%s %s"
% (str(report), str([channel+':'+str(len(self.queues[channel])) for channel in channels]), ))
queues = []
for channel in channels:
queues += self.queues[channel][:]
def print_progress():
if report:
self.logger.debug("all_empty=%s" % (str(all_empty),))
sys.__stderr__.write('\b' * len(state['message']))
sys.__stderr__.write("\033[K")
state['message'] = "%.2f kB/s // %.2fkB of %.2fkB // %.2f%%" \
% (self.bytes_speed / 1024, self.bytes_sent / 1024, self.bytes_total / 1024,
(self.bytes_sent / self.bytes_total * 100) if self.bytes_total else 0)
sys.__stderr__.write(state['message'])
sys.__stderr__.flush()
while True:
all_empty = all(m['_sent'] for m in queues)
print_progress()
if all_empty:
break
time.sleep(0.2)
print_progress()
if report and clear_end:
sys.__stderr__.write('\b' * len(state['message']))
sys.__stderr__.write("\033[K")
sys.__stderr__.flush()
|
def function[wait_until_queue_empty, parameter[self, channels, report, clear_end]]:
constant[
Waits until all queues of channels are empty.
]
variable[state] assign[=] dictionary[[<ast.Constant object at 0x7da18f58d180>], [<ast.Constant object at 0x7da18f58e500>]]
call[name[self].logger.debug, parameter[binary_operation[constant[wait_until_queue_empty: report=%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18f58c880>, <ast.Call object at 0x7da18f58f700>]]]]]
variable[queues] assign[=] list[[]]
for taget[name[channel]] in starred[name[channels]] begin[:]
<ast.AugAssign object at 0x7da207f98940>
def function[print_progress, parameter[]]:
if name[report] begin[:]
call[name[self].logger.debug, parameter[binary_operation[constant[all_empty=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da207f994b0>]]]]]
call[name[sys].__stderr__.write, parameter[binary_operation[constant[] * call[name[len], parameter[call[name[state]][constant[message]]]]]]]
call[name[sys].__stderr__.write, parameter[constant[[K]]]
call[name[state]][constant[message]] assign[=] binary_operation[constant[%.2f kB/s // %.2fkB of %.2fkB // %.2f%%] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da207f987c0>, <ast.BinOp object at 0x7da207f99960>, <ast.BinOp object at 0x7da207f9ab60>, <ast.IfExp object at 0x7da207f99f30>]]]
call[name[sys].__stderr__.write, parameter[call[name[state]][constant[message]]]]
call[name[sys].__stderr__.flush, parameter[]]
while constant[True] begin[:]
variable[all_empty] assign[=] call[name[all], parameter[<ast.GeneratorExp object at 0x7da18f813dc0>]]
call[name[print_progress], parameter[]]
if name[all_empty] begin[:]
break
call[name[time].sleep, parameter[constant[0.2]]]
call[name[print_progress], parameter[]]
if <ast.BoolOp object at 0x7da18f8118a0> begin[:]
call[name[sys].__stderr__.write, parameter[binary_operation[constant[] * call[name[len], parameter[call[name[state]][constant[message]]]]]]]
call[name[sys].__stderr__.write, parameter[constant[[K]]]
call[name[sys].__stderr__.flush, parameter[]]
|
keyword[def] identifier[wait_until_queue_empty] ( identifier[self] , identifier[channels] , identifier[report] = keyword[True] , identifier[clear_end] = keyword[True] ):
literal[string]
identifier[state] ={ literal[string] : literal[string] }
identifier[self] . identifier[logger] . identifier[debug] ( literal[string]
%( identifier[str] ( identifier[report] ), identifier[str] ([ identifier[channel] + literal[string] + identifier[str] ( identifier[len] ( identifier[self] . identifier[queues] [ identifier[channel] ])) keyword[for] identifier[channel] keyword[in] identifier[channels] ]),))
identifier[queues] =[]
keyword[for] identifier[channel] keyword[in] identifier[channels] :
identifier[queues] += identifier[self] . identifier[queues] [ identifier[channel] ][:]
keyword[def] identifier[print_progress] ():
keyword[if] identifier[report] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %( identifier[str] ( identifier[all_empty] ),))
identifier[sys] . identifier[__stderr__] . identifier[write] ( literal[string] * identifier[len] ( identifier[state] [ literal[string] ]))
identifier[sys] . identifier[__stderr__] . identifier[write] ( literal[string] )
identifier[state] [ literal[string] ]= literal[string] %( identifier[self] . identifier[bytes_speed] / literal[int] , identifier[self] . identifier[bytes_sent] / literal[int] , identifier[self] . identifier[bytes_total] / literal[int] ,
( identifier[self] . identifier[bytes_sent] / identifier[self] . identifier[bytes_total] * literal[int] ) keyword[if] identifier[self] . identifier[bytes_total] keyword[else] literal[int] )
identifier[sys] . identifier[__stderr__] . identifier[write] ( identifier[state] [ literal[string] ])
identifier[sys] . identifier[__stderr__] . identifier[flush] ()
keyword[while] keyword[True] :
identifier[all_empty] = identifier[all] ( identifier[m] [ literal[string] ] keyword[for] identifier[m] keyword[in] identifier[queues] )
identifier[print_progress] ()
keyword[if] identifier[all_empty] :
keyword[break]
identifier[time] . identifier[sleep] ( literal[int] )
identifier[print_progress] ()
keyword[if] identifier[report] keyword[and] identifier[clear_end] :
identifier[sys] . identifier[__stderr__] . identifier[write] ( literal[string] * identifier[len] ( identifier[state] [ literal[string] ]))
identifier[sys] . identifier[__stderr__] . identifier[write] ( literal[string] )
identifier[sys] . identifier[__stderr__] . identifier[flush] ()
|
def wait_until_queue_empty(self, channels, report=True, clear_end=True):
"""
Waits until all queues of channels are empty.
"""
state = {'message': ''}
self.logger.debug('wait_until_queue_empty: report=%s %s' % (str(report), str([channel + ':' + str(len(self.queues[channel])) for channel in channels])))
queues = []
for channel in channels:
queues += self.queues[channel][:] # depends on [control=['for'], data=['channel']]
def print_progress():
if report:
self.logger.debug('all_empty=%s' % (str(all_empty),))
sys.__stderr__.write('\x08' * len(state['message']))
sys.__stderr__.write('\x1b[K')
state['message'] = '%.2f kB/s // %.2fkB of %.2fkB // %.2f%%' % (self.bytes_speed / 1024, self.bytes_sent / 1024, self.bytes_total / 1024, self.bytes_sent / self.bytes_total * 100 if self.bytes_total else 0)
sys.__stderr__.write(state['message'])
sys.__stderr__.flush() # depends on [control=['if'], data=[]]
while True:
all_empty = all((m['_sent'] for m in queues))
print_progress()
if all_empty:
break # depends on [control=['if'], data=[]]
time.sleep(0.2) # depends on [control=['while'], data=[]]
print_progress()
if report and clear_end:
sys.__stderr__.write('\x08' * len(state['message']))
sys.__stderr__.write('\x1b[K')
sys.__stderr__.flush() # depends on [control=['if'], data=[]]
|
def interpolate2dStructuredIDW(grid, mask, kernel=15, power=2, fx=1, fy=1):
'''
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
'''
weights = np.empty(shape=((2*kernel+1,2*kernel+1)))
for xi in range(-kernel,kernel+1):
for yi in range(-kernel,kernel+1):
dist = ((fx*xi)**2+(fy*yi)**2)
if dist:
weights[xi+kernel,yi+kernel] = 1 / dist**(0.5*power)
return _calc(grid, mask, kernel, weights)
|
def function[interpolate2dStructuredIDW, parameter[grid, mask, kernel, power, fx, fy]]:
constant[
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
]
variable[weights] assign[=] call[name[np].empty, parameter[]]
for taget[name[xi]] in starred[call[name[range], parameter[<ast.UnaryOp object at 0x7da20c76f370>, binary_operation[name[kernel] + constant[1]]]]] begin[:]
for taget[name[yi]] in starred[call[name[range], parameter[<ast.UnaryOp object at 0x7da20c76c820>, binary_operation[name[kernel] + constant[1]]]]] begin[:]
variable[dist] assign[=] binary_operation[binary_operation[binary_operation[name[fx] * name[xi]] ** constant[2]] + binary_operation[binary_operation[name[fy] * name[yi]] ** constant[2]]]
if name[dist] begin[:]
call[name[weights]][tuple[[<ast.BinOp object at 0x7da204622fe0>, <ast.BinOp object at 0x7da204622590>]]] assign[=] binary_operation[constant[1] / binary_operation[name[dist] ** binary_operation[constant[0.5] * name[power]]]]
return[call[name[_calc], parameter[name[grid], name[mask], name[kernel], name[weights]]]]
|
keyword[def] identifier[interpolate2dStructuredIDW] ( identifier[grid] , identifier[mask] , identifier[kernel] = literal[int] , identifier[power] = literal[int] , identifier[fx] = literal[int] , identifier[fy] = literal[int] ):
literal[string]
identifier[weights] = identifier[np] . identifier[empty] ( identifier[shape] =(( literal[int] * identifier[kernel] + literal[int] , literal[int] * identifier[kernel] + literal[int] )))
keyword[for] identifier[xi] keyword[in] identifier[range] (- identifier[kernel] , identifier[kernel] + literal[int] ):
keyword[for] identifier[yi] keyword[in] identifier[range] (- identifier[kernel] , identifier[kernel] + literal[int] ):
identifier[dist] =(( identifier[fx] * identifier[xi] )** literal[int] +( identifier[fy] * identifier[yi] )** literal[int] )
keyword[if] identifier[dist] :
identifier[weights] [ identifier[xi] + identifier[kernel] , identifier[yi] + identifier[kernel] ]= literal[int] / identifier[dist] **( literal[int] * identifier[power] )
keyword[return] identifier[_calc] ( identifier[grid] , identifier[mask] , identifier[kernel] , identifier[weights] )
|
def interpolate2dStructuredIDW(grid, mask, kernel=15, power=2, fx=1, fy=1):
"""
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
"""
weights = np.empty(shape=(2 * kernel + 1, 2 * kernel + 1))
for xi in range(-kernel, kernel + 1):
for yi in range(-kernel, kernel + 1):
dist = (fx * xi) ** 2 + (fy * yi) ** 2
if dist:
weights[xi + kernel, yi + kernel] = 1 / dist ** (0.5 * power) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['yi']] # depends on [control=['for'], data=['xi']]
return _calc(grid, mask, kernel, weights)
|
def _derive_list_path(self):
"""Guess otu list file path produced by Mothur"""
base, ext = path.splitext(self._input_filename)
return '%s.unique.%s.list' % (base, self.__get_method_abbrev())
|
def function[_derive_list_path, parameter[self]]:
constant[Guess otu list file path produced by Mothur]
<ast.Tuple object at 0x7da1b0b55420> assign[=] call[name[path].splitext, parameter[name[self]._input_filename]]
return[binary_operation[constant[%s.unique.%s.list] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0b71b40>, <ast.Call object at 0x7da1b0b729b0>]]]]
|
keyword[def] identifier[_derive_list_path] ( identifier[self] ):
literal[string]
identifier[base] , identifier[ext] = identifier[path] . identifier[splitext] ( identifier[self] . identifier[_input_filename] )
keyword[return] literal[string] %( identifier[base] , identifier[self] . identifier[__get_method_abbrev] ())
|
def _derive_list_path(self):
"""Guess otu list file path produced by Mothur"""
(base, ext) = path.splitext(self._input_filename)
return '%s.unique.%s.list' % (base, self.__get_method_abbrev())
|
def top_path(sources, sinks, net_flux):
"""
Use the Dijkstra algorithm for finding the shortest path
connecting a set of source states from a set of sink states.
Parameters
----------
sources : array_like, int
One-dimensional list of nodes to define the source states.
sinks : array_like, int
One-dimensional list of nodes to define the sink states.
net_flux : np.ndarray, shape = [n_states, n_states]
Net flux of the MSM
Returns
-------
top_path : np.ndarray
Array corresponding to the top path between sources and
sinks. It is an array of states visited along the path.
flux : float
Flux traveling through this path -- this is equal to the
minimum flux over edges in the path.
See Also
--------
msmbuilder.tpt.paths : function for calculating many high
flux paths through a network.
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Dijkstra, E. W. A Note on Two Problems in Connexion with Graphs.
Numeriche Mathematik 1, 269-271 (1959).
.. [5] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
"""
sources = np.array(sources, dtype=np.int).reshape((-1,))
sinks = np.array(sinks, dtype=np.int).reshape((-1,))
n_states = net_flux.shape[0]
queue = list(sources)
# nodes to check (the "queue")
# going to use list.pop method so I can't keep it as an array
visited = np.zeros(n_states).astype(np.bool)
# have we already checked this node?
previous_node = np.ones(n_states).astype(np.int) * -1
# what node was found before finding this one
min_fluxes = np.ones(n_states) * -1 * np.inf
# what is the flux of the highest flux path
# from this node to the source set.
min_fluxes[sources] = np.inf
# source states are connected to the source
# so this distance is zero which means the flux is infinite
while len(queue) > 0: # iterate until there's nothing to check anymore
test_node = queue.pop(min_fluxes[queue].argmax())
# find the node in the queue that has the
# highest flux path to it from the source set
visited[test_node] = True
if np.all(visited[sinks]):
# if we've visited all of the sink states, then we just have to choose
# the path that goes to the sink state that is closest to the source
break
# if test_node in sinks: # I *think* we want to break ... or are there paths we still
# need to check?
# continue
# I think if sinks is more than one state we have to check everything
# now update the distances for each neighbor of the test_node:
neighbors = np.where(net_flux[test_node, :] > 0)[0]
if len(neighbors) == 0:
continue
new_fluxes = net_flux[test_node, neighbors].flatten()
# flux from test_node to each neighbor
new_fluxes[np.where(new_fluxes > min_fluxes[test_node])] = min_fluxes[test_node]
# previous step to get to test_node was lower flux, so that is still the path flux
ind = np.where((1 - visited[neighbors]) & (new_fluxes > min_fluxes[neighbors]))
min_fluxes[neighbors[ind]] = new_fluxes[ind]
previous_node[neighbors[ind]] = test_node
# each of these neighbors came from this test_node
# we don't want to update the nodes that have already been visited
queue.extend(neighbors[ind])
top_path = []
# populate the path in reverse
top_path.append(int(sinks[min_fluxes[sinks].argmax()]))
# find the closest sink state
while previous_node[top_path[-1]] != -1:
top_path.append(previous_node[top_path[-1]])
return np.array(top_path[::-1]), min_fluxes[top_path[0]]
|
def function[top_path, parameter[sources, sinks, net_flux]]:
constant[
Use the Dijkstra algorithm for finding the shortest path
connecting a set of source states from a set of sink states.
Parameters
----------
sources : array_like, int
One-dimensional list of nodes to define the source states.
sinks : array_like, int
One-dimensional list of nodes to define the sink states.
net_flux : np.ndarray, shape = [n_states, n_states]
Net flux of the MSM
Returns
-------
top_path : np.ndarray
Array corresponding to the top path between sources and
sinks. It is an array of states visited along the path.
flux : float
Flux traveling through this path -- this is equal to the
minimum flux over edges in the path.
See Also
--------
msmbuilder.tpt.paths : function for calculating many high
flux paths through a network.
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Dijkstra, E. W. A Note on Two Problems in Connexion with Graphs.
Numeriche Mathematik 1, 269-271 (1959).
.. [5] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
]
variable[sources] assign[=] call[call[name[np].array, parameter[name[sources]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b06c6110>]]]]
variable[sinks] assign[=] call[call[name[np].array, parameter[name[sinks]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b07af850>]]]]
variable[n_states] assign[=] call[name[net_flux].shape][constant[0]]
variable[queue] assign[=] call[name[list], parameter[name[sources]]]
variable[visited] assign[=] call[call[name[np].zeros, parameter[name[n_states]]].astype, parameter[name[np].bool]]
variable[previous_node] assign[=] binary_operation[call[call[name[np].ones, parameter[name[n_states]]].astype, parameter[name[np].int]] * <ast.UnaryOp object at 0x7da1b07ae590>]
variable[min_fluxes] assign[=] binary_operation[binary_operation[call[name[np].ones, parameter[name[n_states]]] * <ast.UnaryOp object at 0x7da1b07ad6f0>] * name[np].inf]
call[name[min_fluxes]][name[sources]] assign[=] name[np].inf
while compare[call[name[len], parameter[name[queue]]] greater[>] constant[0]] begin[:]
variable[test_node] assign[=] call[name[queue].pop, parameter[call[call[name[min_fluxes]][name[queue]].argmax, parameter[]]]]
call[name[visited]][name[test_node]] assign[=] constant[True]
if call[name[np].all, parameter[call[name[visited]][name[sinks]]]] begin[:]
break
variable[neighbors] assign[=] call[call[name[np].where, parameter[compare[call[name[net_flux]][tuple[[<ast.Name object at 0x7da1b07ac5b0>, <ast.Slice object at 0x7da1b07ad750>]]] greater[>] constant[0]]]]][constant[0]]
if compare[call[name[len], parameter[name[neighbors]]] equal[==] constant[0]] begin[:]
continue
variable[new_fluxes] assign[=] call[call[name[net_flux]][tuple[[<ast.Name object at 0x7da1b07ae110>, <ast.Name object at 0x7da1b07ae3b0>]]].flatten, parameter[]]
call[name[new_fluxes]][call[name[np].where, parameter[compare[name[new_fluxes] greater[>] call[name[min_fluxes]][name[test_node]]]]]] assign[=] call[name[min_fluxes]][name[test_node]]
variable[ind] assign[=] call[name[np].where, parameter[binary_operation[binary_operation[constant[1] - call[name[visited]][name[neighbors]]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[new_fluxes] greater[>] call[name[min_fluxes]][name[neighbors]]]]]]
call[name[min_fluxes]][call[name[neighbors]][name[ind]]] assign[=] call[name[new_fluxes]][name[ind]]
call[name[previous_node]][call[name[neighbors]][name[ind]]] assign[=] name[test_node]
call[name[queue].extend, parameter[call[name[neighbors]][name[ind]]]]
variable[top_path] assign[=] list[[]]
call[name[top_path].append, parameter[call[name[int], parameter[call[name[sinks]][call[call[name[min_fluxes]][name[sinks]].argmax, parameter[]]]]]]]
while compare[call[name[previous_node]][call[name[top_path]][<ast.UnaryOp object at 0x7da1b063e9e0>]] not_equal[!=] <ast.UnaryOp object at 0x7da1b063d480>] begin[:]
call[name[top_path].append, parameter[call[name[previous_node]][call[name[top_path]][<ast.UnaryOp object at 0x7da1b063ea40>]]]]
return[tuple[[<ast.Call object at 0x7da1b063feb0>, <ast.Subscript object at 0x7da1b0785300>]]]
|
keyword[def] identifier[top_path] ( identifier[sources] , identifier[sinks] , identifier[net_flux] ):
literal[string]
identifier[sources] = identifier[np] . identifier[array] ( identifier[sources] , identifier[dtype] = identifier[np] . identifier[int] ). identifier[reshape] ((- literal[int] ,))
identifier[sinks] = identifier[np] . identifier[array] ( identifier[sinks] , identifier[dtype] = identifier[np] . identifier[int] ). identifier[reshape] ((- literal[int] ,))
identifier[n_states] = identifier[net_flux] . identifier[shape] [ literal[int] ]
identifier[queue] = identifier[list] ( identifier[sources] )
identifier[visited] = identifier[np] . identifier[zeros] ( identifier[n_states] ). identifier[astype] ( identifier[np] . identifier[bool] )
identifier[previous_node] = identifier[np] . identifier[ones] ( identifier[n_states] ). identifier[astype] ( identifier[np] . identifier[int] )*- literal[int]
identifier[min_fluxes] = identifier[np] . identifier[ones] ( identifier[n_states] )*- literal[int] * identifier[np] . identifier[inf]
identifier[min_fluxes] [ identifier[sources] ]= identifier[np] . identifier[inf]
keyword[while] identifier[len] ( identifier[queue] )> literal[int] :
identifier[test_node] = identifier[queue] . identifier[pop] ( identifier[min_fluxes] [ identifier[queue] ]. identifier[argmax] ())
identifier[visited] [ identifier[test_node] ]= keyword[True]
keyword[if] identifier[np] . identifier[all] ( identifier[visited] [ identifier[sinks] ]):
keyword[break]
identifier[neighbors] = identifier[np] . identifier[where] ( identifier[net_flux] [ identifier[test_node] ,:]> literal[int] )[ literal[int] ]
keyword[if] identifier[len] ( identifier[neighbors] )== literal[int] :
keyword[continue]
identifier[new_fluxes] = identifier[net_flux] [ identifier[test_node] , identifier[neighbors] ]. identifier[flatten] ()
identifier[new_fluxes] [ identifier[np] . identifier[where] ( identifier[new_fluxes] > identifier[min_fluxes] [ identifier[test_node] ])]= identifier[min_fluxes] [ identifier[test_node] ]
identifier[ind] = identifier[np] . identifier[where] (( literal[int] - identifier[visited] [ identifier[neighbors] ])&( identifier[new_fluxes] > identifier[min_fluxes] [ identifier[neighbors] ]))
identifier[min_fluxes] [ identifier[neighbors] [ identifier[ind] ]]= identifier[new_fluxes] [ identifier[ind] ]
identifier[previous_node] [ identifier[neighbors] [ identifier[ind] ]]= identifier[test_node]
identifier[queue] . identifier[extend] ( identifier[neighbors] [ identifier[ind] ])
identifier[top_path] =[]
identifier[top_path] . identifier[append] ( identifier[int] ( identifier[sinks] [ identifier[min_fluxes] [ identifier[sinks] ]. identifier[argmax] ()]))
keyword[while] identifier[previous_node] [ identifier[top_path] [- literal[int] ]]!=- literal[int] :
identifier[top_path] . identifier[append] ( identifier[previous_node] [ identifier[top_path] [- literal[int] ]])
keyword[return] identifier[np] . identifier[array] ( identifier[top_path] [::- literal[int] ]), identifier[min_fluxes] [ identifier[top_path] [ literal[int] ]]
|
def top_path(sources, sinks, net_flux):
"""
Use the Dijkstra algorithm for finding the shortest path
connecting a set of source states from a set of sink states.
Parameters
----------
sources : array_like, int
One-dimensional list of nodes to define the source states.
sinks : array_like, int
One-dimensional list of nodes to define the sink states.
net_flux : np.ndarray, shape = [n_states, n_states]
Net flux of the MSM
Returns
-------
top_path : np.ndarray
Array corresponding to the top path between sources and
sinks. It is an array of states visited along the path.
flux : float
Flux traveling through this path -- this is equal to the
minimum flux over edges in the path.
See Also
--------
msmbuilder.tpt.paths : function for calculating many high
flux paths through a network.
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Dijkstra, E. W. A Note on Two Problems in Connexion with Graphs.
Numeriche Mathematik 1, 269-271 (1959).
.. [5] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
"""
sources = np.array(sources, dtype=np.int).reshape((-1,))
sinks = np.array(sinks, dtype=np.int).reshape((-1,))
n_states = net_flux.shape[0]
queue = list(sources)
# nodes to check (the "queue")
# going to use list.pop method so I can't keep it as an array
visited = np.zeros(n_states).astype(np.bool)
# have we already checked this node?
previous_node = np.ones(n_states).astype(np.int) * -1
# what node was found before finding this one
min_fluxes = np.ones(n_states) * -1 * np.inf
# what is the flux of the highest flux path
# from this node to the source set.
min_fluxes[sources] = np.inf
# source states are connected to the source
# so this distance is zero which means the flux is infinite
while len(queue) > 0: # iterate until there's nothing to check anymore
test_node = queue.pop(min_fluxes[queue].argmax())
# find the node in the queue that has the
# highest flux path to it from the source set
visited[test_node] = True
if np.all(visited[sinks]):
# if we've visited all of the sink states, then we just have to choose
# the path that goes to the sink state that is closest to the source
break # depends on [control=['if'], data=[]]
# if test_node in sinks: # I *think* we want to break ... or are there paths we still
# need to check?
# continue
# I think if sinks is more than one state we have to check everything
# now update the distances for each neighbor of the test_node:
neighbors = np.where(net_flux[test_node, :] > 0)[0]
if len(neighbors) == 0:
continue # depends on [control=['if'], data=[]]
new_fluxes = net_flux[test_node, neighbors].flatten()
# flux from test_node to each neighbor
new_fluxes[np.where(new_fluxes > min_fluxes[test_node])] = min_fluxes[test_node]
# previous step to get to test_node was lower flux, so that is still the path flux
ind = np.where(1 - visited[neighbors] & (new_fluxes > min_fluxes[neighbors]))
min_fluxes[neighbors[ind]] = new_fluxes[ind]
previous_node[neighbors[ind]] = test_node
# each of these neighbors came from this test_node
# we don't want to update the nodes that have already been visited
queue.extend(neighbors[ind]) # depends on [control=['while'], data=[]]
top_path = []
# populate the path in reverse
top_path.append(int(sinks[min_fluxes[sinks].argmax()]))
# find the closest sink state
while previous_node[top_path[-1]] != -1:
top_path.append(previous_node[top_path[-1]]) # depends on [control=['while'], data=[]]
return (np.array(top_path[::-1]), min_fluxes[top_path[0]])
|
def combine_action_handlers(*handlers):
"""
This function combines the given action handlers into a single function
which will call all of them.
"""
# make sure each of the given handlers is callable
for handler in handlers:
# if the handler is not a function
if not (iscoroutinefunction(handler) or iscoroutine(handler)):
# yell loudly
raise ValueError("Provided handler is not a coroutine: %s" % handler)
# the combined action handler
async def combined_handler(*args, **kwds):
# goes over every given handler
for handler in handlers:
# call the handler
await handler(*args, **kwds)
# return the combined action handler
return combined_handler
|
def function[combine_action_handlers, parameter[]]:
constant[
This function combines the given action handlers into a single function
which will call all of them.
]
for taget[name[handler]] in starred[name[handlers]] begin[:]
if <ast.UnaryOp object at 0x7da18f00f340> begin[:]
<ast.Raise object at 0x7da18f00f190>
<ast.AsyncFunctionDef object at 0x7da18f00d4e0>
return[name[combined_handler]]
|
keyword[def] identifier[combine_action_handlers] (* identifier[handlers] ):
literal[string]
keyword[for] identifier[handler] keyword[in] identifier[handlers] :
keyword[if] keyword[not] ( identifier[iscoroutinefunction] ( identifier[handler] ) keyword[or] identifier[iscoroutine] ( identifier[handler] )):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[handler] )
keyword[async] keyword[def] identifier[combined_handler] (* identifier[args] ,** identifier[kwds] ):
keyword[for] identifier[handler] keyword[in] identifier[handlers] :
keyword[await] identifier[handler] (* identifier[args] ,** identifier[kwds] )
keyword[return] identifier[combined_handler]
|
def combine_action_handlers(*handlers):
"""
This function combines the given action handlers into a single function
which will call all of them.
"""
# make sure each of the given handlers is callable
for handler in handlers:
# if the handler is not a function
if not (iscoroutinefunction(handler) or iscoroutine(handler)):
# yell loudly
raise ValueError('Provided handler is not a coroutine: %s' % handler) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['handler']]
# the combined action handler
async def combined_handler(*args, **kwds):
# goes over every given handler
for handler in handlers:
# call the handler
await handler(*args, **kwds) # depends on [control=['for'], data=['handler']]
# return the combined action handler
return combined_handler
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.