code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def read_user_yes_no(question, default_value):
"""Prompt the user to reply with 'yes' or 'no' (or equivalent values).
Note:
Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'
:param str question: Question to the user
:param default_value: Value that will be returned if no input happens
"""
# Please see http://click.pocoo.org/4/api/#click.prompt
return click.prompt(
question,
default=default_value,
type=click.BOOL
) | def function[read_user_yes_no, parameter[question, default_value]]:
constant[Prompt the user to reply with 'yes' or 'no' (or equivalent values).
Note:
Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'
:param str question: Question to the user
:param default_value: Value that will be returned if no input happens
]
return[call[name[click].prompt, parameter[name[question]]]] | keyword[def] identifier[read_user_yes_no] ( identifier[question] , identifier[default_value] ):
literal[string]
keyword[return] identifier[click] . identifier[prompt] (
identifier[question] ,
identifier[default] = identifier[default_value] ,
identifier[type] = identifier[click] . identifier[BOOL]
) | def read_user_yes_no(question, default_value):
"""Prompt the user to reply with 'yes' or 'no' (or equivalent values).
Note:
Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'
:param str question: Question to the user
:param default_value: Value that will be returned if no input happens
"""
# Please see http://click.pocoo.org/4/api/#click.prompt
return click.prompt(question, default=default_value, type=click.BOOL) |
def getContinuousData(self, referenceName=None, start=None, end=None):
"""
Returns a set number of simulated continuous data.
:param referenceName: name of reference to "search" on
:param start: start coordinate of query
:param end: end coordinate of query
:return: Yields continuous list
"""
randomNumberGenerator = random.Random()
randomNumberGenerator.seed(self._randomSeed)
for i in range(100):
gaContinuous = self._generateSimulatedContinuous(
randomNumberGenerator)
match = (
gaContinuous.start < end and
gaContinuous.end > start and
gaContinuous.reference_name == referenceName)
if match:
yield gaContinuous | def function[getContinuousData, parameter[self, referenceName, start, end]]:
constant[
Returns a set number of simulated continuous data.
:param referenceName: name of reference to "search" on
:param start: start coordinate of query
:param end: end coordinate of query
:return: Yields continuous list
]
variable[randomNumberGenerator] assign[=] call[name[random].Random, parameter[]]
call[name[randomNumberGenerator].seed, parameter[name[self]._randomSeed]]
for taget[name[i]] in starred[call[name[range], parameter[constant[100]]]] begin[:]
variable[gaContinuous] assign[=] call[name[self]._generateSimulatedContinuous, parameter[name[randomNumberGenerator]]]
variable[match] assign[=] <ast.BoolOp object at 0x7da207f033a0>
if name[match] begin[:]
<ast.Yield object at 0x7da207f00b50> | keyword[def] identifier[getContinuousData] ( identifier[self] , identifier[referenceName] = keyword[None] , identifier[start] = keyword[None] , identifier[end] = keyword[None] ):
literal[string]
identifier[randomNumberGenerator] = identifier[random] . identifier[Random] ()
identifier[randomNumberGenerator] . identifier[seed] ( identifier[self] . identifier[_randomSeed] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[gaContinuous] = identifier[self] . identifier[_generateSimulatedContinuous] (
identifier[randomNumberGenerator] )
identifier[match] =(
identifier[gaContinuous] . identifier[start] < identifier[end] keyword[and]
identifier[gaContinuous] . identifier[end] > identifier[start] keyword[and]
identifier[gaContinuous] . identifier[reference_name] == identifier[referenceName] )
keyword[if] identifier[match] :
keyword[yield] identifier[gaContinuous] | def getContinuousData(self, referenceName=None, start=None, end=None):
"""
Returns a set number of simulated continuous data.
:param referenceName: name of reference to "search" on
:param start: start coordinate of query
:param end: end coordinate of query
:return: Yields continuous list
"""
randomNumberGenerator = random.Random()
randomNumberGenerator.seed(self._randomSeed)
for i in range(100):
gaContinuous = self._generateSimulatedContinuous(randomNumberGenerator)
match = gaContinuous.start < end and gaContinuous.end > start and (gaContinuous.reference_name == referenceName)
if match:
yield gaContinuous # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def has_default_file(cls):
"""Check if a configuration file exists."""
for filename in cls.config_files:
for searchpath in cls.config_searchpath:
path = os.path.join(searchpath, filename)
if os.path.exists(path):
return True
return False | def function[has_default_file, parameter[cls]]:
constant[Check if a configuration file exists.]
for taget[name[filename]] in starred[name[cls].config_files] begin[:]
for taget[name[searchpath]] in starred[name[cls].config_searchpath] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[searchpath], name[filename]]]
if call[name[os].path.exists, parameter[name[path]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[has_default_file] ( identifier[cls] ):
literal[string]
keyword[for] identifier[filename] keyword[in] identifier[cls] . identifier[config_files] :
keyword[for] identifier[searchpath] keyword[in] identifier[cls] . identifier[config_searchpath] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[searchpath] , identifier[filename] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def has_default_file(cls):
"""Check if a configuration file exists."""
for filename in cls.config_files:
for searchpath in cls.config_searchpath:
path = os.path.join(searchpath, filename)
if os.path.exists(path):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['searchpath']] # depends on [control=['for'], data=['filename']]
return False |
def _onMotion(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt) | def function[_onMotion, parameter[self, evt]]:
constant[Start measuring on an axis.]
variable[x] assign[=] call[name[evt].GetX, parameter[]]
variable[y] assign[=] binary_operation[name[self].figure.bbox.height - call[name[evt].GetY, parameter[]]]
call[name[evt].Skip, parameter[]]
call[name[FigureCanvasBase].motion_notify_event, parameter[name[self], name[x], name[y]]] | keyword[def] identifier[_onMotion] ( identifier[self] , identifier[evt] ):
literal[string]
identifier[x] = identifier[evt] . identifier[GetX] ()
identifier[y] = identifier[self] . identifier[figure] . identifier[bbox] . identifier[height] - identifier[evt] . identifier[GetY] ()
identifier[evt] . identifier[Skip] ()
identifier[FigureCanvasBase] . identifier[motion_notify_event] ( identifier[self] , identifier[x] , identifier[y] , identifier[guiEvent] = identifier[evt] ) | def _onMotion(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt) |
def disable(self, threads=True):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call') | def function[disable, parameter[self, threads]]:
constant[
Disable profiling.
]
if name[self].enabled_start begin[:]
call[name[sys].settrace, parameter[constant[None]]]
call[name[self]._disable, parameter[]] | keyword[def] identifier[disable] ( identifier[self] , identifier[threads] = keyword[True] ):
literal[string]
keyword[if] identifier[self] . identifier[enabled_start] :
identifier[sys] . identifier[settrace] ( keyword[None] )
identifier[self] . identifier[_disable] ()
keyword[else] :
identifier[warn] ( literal[string] ) | def disable(self, threads=True):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable() # depends on [control=['if'], data=[]]
else:
warn('Duplicate "disable" call') |
def start(self, version=None, want_rgb=True, **kwargs):
"""Launch the game."""
del want_rgb # Unused
if not os.path.isdir(self.data_dir):
raise sc_process.SC2LaunchError(
"Expected to find StarCraft II installed at '%s'. If it's not "
"installed, do that and run it once so auto-detection works. If "
"auto-detection failed repeatedly, then set the SC2PATH environment "
"variable with the correct location." % self.data_dir)
version = version or FLAGS.sc2_version
if isinstance(version, lib.Version) and not version.data_version:
# This is for old replays that don't have the embedded data_version.
version = self._get_version(version.game_version)
elif isinstance(version, six.string_types):
version = self._get_version(version)
elif not version:
version = self._get_version("latest")
if version.build_version < lib.VERSIONS["3.16.1"].build_version:
raise sc_process.SC2LaunchError(
"SC2 Binaries older than 3.16.1 don't support the api.")
if FLAGS.sc2_dev_build:
version = version._replace(build_version=0)
exec_path = os.path.join(
self.data_dir, "Versions/Base%05d" % version.build_version,
self._exec_name)
if not os.path.exists(exec_path):
raise sc_process.SC2LaunchError("No SC2 binary found at: %s" % exec_path)
return sc_process.StarcraftProcess(
self, exec_path=exec_path, version=version, **kwargs) | def function[start, parameter[self, version, want_rgb]]:
constant[Launch the game.]
<ast.Delete object at 0x7da20c7c9f90>
if <ast.UnaryOp object at 0x7da20c7c9540> begin[:]
<ast.Raise object at 0x7da20c7c9510>
variable[version] assign[=] <ast.BoolOp object at 0x7da20c7c9180>
if <ast.BoolOp object at 0x7da20c7c8dc0> begin[:]
variable[version] assign[=] call[name[self]._get_version, parameter[name[version].game_version]]
if compare[name[version].build_version less[<] call[name[lib].VERSIONS][constant[3.16.1]].build_version] begin[:]
<ast.Raise object at 0x7da20e954430>
if name[FLAGS].sc2_dev_build begin[:]
variable[version] assign[=] call[name[version]._replace, parameter[]]
variable[exec_path] assign[=] call[name[os].path.join, parameter[name[self].data_dir, binary_operation[constant[Versions/Base%05d] <ast.Mod object at 0x7da2590d6920> name[version].build_version], name[self]._exec_name]]
if <ast.UnaryOp object at 0x7da20e9576d0> begin[:]
<ast.Raise object at 0x7da20e957f70>
return[call[name[sc_process].StarcraftProcess, parameter[name[self]]]] | keyword[def] identifier[start] ( identifier[self] , identifier[version] = keyword[None] , identifier[want_rgb] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[del] identifier[want_rgb]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[data_dir] ):
keyword[raise] identifier[sc_process] . identifier[SC2LaunchError] (
literal[string]
literal[string]
literal[string]
literal[string] % identifier[self] . identifier[data_dir] )
identifier[version] = identifier[version] keyword[or] identifier[FLAGS] . identifier[sc2_version]
keyword[if] identifier[isinstance] ( identifier[version] , identifier[lib] . identifier[Version] ) keyword[and] keyword[not] identifier[version] . identifier[data_version] :
identifier[version] = identifier[self] . identifier[_get_version] ( identifier[version] . identifier[game_version] )
keyword[elif] identifier[isinstance] ( identifier[version] , identifier[six] . identifier[string_types] ):
identifier[version] = identifier[self] . identifier[_get_version] ( identifier[version] )
keyword[elif] keyword[not] identifier[version] :
identifier[version] = identifier[self] . identifier[_get_version] ( literal[string] )
keyword[if] identifier[version] . identifier[build_version] < identifier[lib] . identifier[VERSIONS] [ literal[string] ]. identifier[build_version] :
keyword[raise] identifier[sc_process] . identifier[SC2LaunchError] (
literal[string] )
keyword[if] identifier[FLAGS] . identifier[sc2_dev_build] :
identifier[version] = identifier[version] . identifier[_replace] ( identifier[build_version] = literal[int] )
identifier[exec_path] = identifier[os] . identifier[path] . identifier[join] (
identifier[self] . identifier[data_dir] , literal[string] % identifier[version] . identifier[build_version] ,
identifier[self] . identifier[_exec_name] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[exec_path] ):
keyword[raise] identifier[sc_process] . identifier[SC2LaunchError] ( literal[string] % identifier[exec_path] )
keyword[return] identifier[sc_process] . identifier[StarcraftProcess] (
identifier[self] , identifier[exec_path] = identifier[exec_path] , identifier[version] = identifier[version] ,** identifier[kwargs] ) | def start(self, version=None, want_rgb=True, **kwargs):
"""Launch the game."""
del want_rgb # Unused
if not os.path.isdir(self.data_dir):
raise sc_process.SC2LaunchError("Expected to find StarCraft II installed at '%s'. If it's not installed, do that and run it once so auto-detection works. If auto-detection failed repeatedly, then set the SC2PATH environment variable with the correct location." % self.data_dir) # depends on [control=['if'], data=[]]
version = version or FLAGS.sc2_version
if isinstance(version, lib.Version) and (not version.data_version):
# This is for old replays that don't have the embedded data_version.
version = self._get_version(version.game_version) # depends on [control=['if'], data=[]]
elif isinstance(version, six.string_types):
version = self._get_version(version) # depends on [control=['if'], data=[]]
elif not version:
version = self._get_version('latest') # depends on [control=['if'], data=[]]
if version.build_version < lib.VERSIONS['3.16.1'].build_version:
raise sc_process.SC2LaunchError("SC2 Binaries older than 3.16.1 don't support the api.") # depends on [control=['if'], data=[]]
if FLAGS.sc2_dev_build:
version = version._replace(build_version=0) # depends on [control=['if'], data=[]]
exec_path = os.path.join(self.data_dir, 'Versions/Base%05d' % version.build_version, self._exec_name)
if not os.path.exists(exec_path):
raise sc_process.SC2LaunchError('No SC2 binary found at: %s' % exec_path) # depends on [control=['if'], data=[]]
return sc_process.StarcraftProcess(self, exec_path=exec_path, version=version, **kwargs) |
async def get_oauth_verifier(oauth_token):
"""
Open authorize page in a browser,
print the url if it didn't work
Arguments
---------
oauth_token : str
The oauth token received in :func:`get_oauth_token`
Returns
-------
str
The PIN entered by the user
"""
url = "https://api.twitter.com/oauth/authorize?oauth_token=" + oauth_token
try:
browser = webbrowser.open(url)
await asyncio.sleep(2)
if not browser:
raise RuntimeError
except RuntimeError:
print("could not open a browser\ngo here to enter your PIN: " + url)
verifier = input("\nEnter your PIN: ")
return verifier | <ast.AsyncFunctionDef object at 0x7da1b0004b80> | keyword[async] keyword[def] identifier[get_oauth_verifier] ( identifier[oauth_token] ):
literal[string]
identifier[url] = literal[string] + identifier[oauth_token]
keyword[try] :
identifier[browser] = identifier[webbrowser] . identifier[open] ( identifier[url] )
keyword[await] identifier[asyncio] . identifier[sleep] ( literal[int] )
keyword[if] keyword[not] identifier[browser] :
keyword[raise] identifier[RuntimeError]
keyword[except] identifier[RuntimeError] :
identifier[print] ( literal[string] + identifier[url] )
identifier[verifier] = identifier[input] ( literal[string] )
keyword[return] identifier[verifier] | async def get_oauth_verifier(oauth_token):
"""
Open authorize page in a browser,
print the url if it didn't work
Arguments
---------
oauth_token : str
The oauth token received in :func:`get_oauth_token`
Returns
-------
str
The PIN entered by the user
"""
url = 'https://api.twitter.com/oauth/authorize?oauth_token=' + oauth_token
try:
browser = webbrowser.open(url)
await asyncio.sleep(2)
if not browser:
raise RuntimeError # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except RuntimeError:
print('could not open a browser\ngo here to enter your PIN: ' + url) # depends on [control=['except'], data=[]]
verifier = input('\nEnter your PIN: ')
return verifier |
def p_identifier(self, tree):
''' V ::= IDENTIFIER '''
tree.value = tree.attr
tree.svalue = tree.attr | def function[p_identifier, parameter[self, tree]]:
constant[ V ::= IDENTIFIER ]
name[tree].value assign[=] name[tree].attr
name[tree].svalue assign[=] name[tree].attr | keyword[def] identifier[p_identifier] ( identifier[self] , identifier[tree] ):
literal[string]
identifier[tree] . identifier[value] = identifier[tree] . identifier[attr]
identifier[tree] . identifier[svalue] = identifier[tree] . identifier[attr] | def p_identifier(self, tree):
""" V ::= IDENTIFIER """
tree.value = tree.attr
tree.svalue = tree.attr |
def _fmt_files(filelist):
""" Produce a file listing.
"""
depth = max(i.path.count('/') for i in filelist)
pad = ['\uFFFE'] * depth
base_indent = ' ' * 38
indent = 0
result = []
prev_path = pad
sorted_files = sorted((i.path.split('/')[:-1]+pad, i.path.rsplit('/', 1)[-1], i) for i in filelist)
for path, name, fileinfo in sorted_files:
path = path[:depth]
if path != prev_path:
common = min([depth] + [idx
for idx, (dirname, prev_name) in enumerate(zip(path, prev_path))
if dirname != prev_name
])
#result.append("!!%r %r" % (indent, common))
#result.append("!!%r" % (prev_path,))
#result.append("!!%r" % (path,))
while indent > common:
indent -= 1
result.append("%s%s/" % (base_indent, ' ' * indent))
for dirname in path[common:]:
if dirname == '\uFFFE':
break
result.append("%s%s\\ %s" % (base_indent, ' ' * indent, dirname))
indent += 1
##result.append("!!%r %r" % (path, name))
result.append(" %s %s %s %s| %s" % (
{0: "off ", 1: " ", 2: "high"}.get(fileinfo.prio, "????"),
fmt.iso_datetime(fileinfo.mtime),
fmt.human_size(fileinfo.size),
' ' * indent, name,
))
prev_path = path
while indent > 0:
indent -= 1
result.append("%s%s/" % (base_indent, ' ' * indent))
result.append("%s= %d file(s)" % (base_indent, len(filelist)))
return '\n'.join(result) | def function[_fmt_files, parameter[filelist]]:
constant[ Produce a file listing.
]
variable[depth] assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da20c6ab790>]]
variable[pad] assign[=] binary_operation[list[[<ast.Constant object at 0x7da20c6a9c60>]] * name[depth]]
variable[base_indent] assign[=] binary_operation[constant[ ] * constant[38]]
variable[indent] assign[=] constant[0]
variable[result] assign[=] list[[]]
variable[prev_path] assign[=] name[pad]
variable[sorted_files] assign[=] call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da207f013c0>]]
for taget[tuple[[<ast.Name object at 0x7da20c6a9300>, <ast.Name object at 0x7da20c6abbe0>, <ast.Name object at 0x7da20c6a98a0>]]] in starred[name[sorted_files]] begin[:]
variable[path] assign[=] call[name[path]][<ast.Slice object at 0x7da20c6aa0e0>]
if compare[name[path] not_equal[!=] name[prev_path]] begin[:]
variable[common] assign[=] call[name[min], parameter[binary_operation[list[[<ast.Name object at 0x7da20c6a84f0>]] + <ast.ListComp object at 0x7da20c6a86d0>]]]
while compare[name[indent] greater[>] name[common]] begin[:]
<ast.AugAssign object at 0x7da20c6a88b0>
call[name[result].append, parameter[binary_operation[constant[%s%s/] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6ab640>, <ast.BinOp object at 0x7da20c6a9900>]]]]]
for taget[name[dirname]] in starred[call[name[path]][<ast.Slice object at 0x7da20c6a99f0>]] begin[:]
if compare[name[dirname] equal[==] constant[]] begin[:]
break
call[name[result].append, parameter[binary_operation[constant[%s%s\ %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6a95a0>, <ast.BinOp object at 0x7da20c6aa110>, <ast.Name object at 0x7da20c6a9810>]]]]]
<ast.AugAssign object at 0x7da20c6a9ae0>
call[name[result].append, parameter[binary_operation[constant[ %s %s %s %s| %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20c6abe80>, <ast.Call object at 0x7da20c6a9b70>, <ast.Call object at 0x7da20c6aa170>, <ast.BinOp object at 0x7da20c6a8820>, <ast.Name object at 0x7da20c6aacb0>]]]]]
variable[prev_path] assign[=] name[path]
while compare[name[indent] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20c6aa920>
call[name[result].append, parameter[binary_operation[constant[%s%s/] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6ab730>, <ast.BinOp object at 0x7da20c6a80d0>]]]]]
call[name[result].append, parameter[binary_operation[constant[%s= %d file(s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6aa8c0>, <ast.Call object at 0x7da20c6a9a80>]]]]]
return[call[constant[
].join, parameter[name[result]]]] | keyword[def] identifier[_fmt_files] ( identifier[filelist] ):
literal[string]
identifier[depth] = identifier[max] ( identifier[i] . identifier[path] . identifier[count] ( literal[string] ) keyword[for] identifier[i] keyword[in] identifier[filelist] )
identifier[pad] =[ literal[string] ]* identifier[depth]
identifier[base_indent] = literal[string] * literal[int]
identifier[indent] = literal[int]
identifier[result] =[]
identifier[prev_path] = identifier[pad]
identifier[sorted_files] = identifier[sorted] (( identifier[i] . identifier[path] . identifier[split] ( literal[string] )[:- literal[int] ]+ identifier[pad] , identifier[i] . identifier[path] . identifier[rsplit] ( literal[string] , literal[int] )[- literal[int] ], identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[filelist] )
keyword[for] identifier[path] , identifier[name] , identifier[fileinfo] keyword[in] identifier[sorted_files] :
identifier[path] = identifier[path] [: identifier[depth] ]
keyword[if] identifier[path] != identifier[prev_path] :
identifier[common] = identifier[min] ([ identifier[depth] ]+[ identifier[idx]
keyword[for] identifier[idx] ,( identifier[dirname] , identifier[prev_name] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[path] , identifier[prev_path] ))
keyword[if] identifier[dirname] != identifier[prev_name]
])
keyword[while] identifier[indent] > identifier[common] :
identifier[indent] -= literal[int]
identifier[result] . identifier[append] ( literal[string] %( identifier[base_indent] , literal[string] * identifier[indent] ))
keyword[for] identifier[dirname] keyword[in] identifier[path] [ identifier[common] :]:
keyword[if] identifier[dirname] == literal[string] :
keyword[break]
identifier[result] . identifier[append] ( literal[string] %( identifier[base_indent] , literal[string] * identifier[indent] , identifier[dirname] ))
identifier[indent] += literal[int]
identifier[result] . identifier[append] ( literal[string] %(
{ literal[int] : literal[string] , literal[int] : literal[string] , literal[int] : literal[string] }. identifier[get] ( identifier[fileinfo] . identifier[prio] , literal[string] ),
identifier[fmt] . identifier[iso_datetime] ( identifier[fileinfo] . identifier[mtime] ),
identifier[fmt] . identifier[human_size] ( identifier[fileinfo] . identifier[size] ),
literal[string] * identifier[indent] , identifier[name] ,
))
identifier[prev_path] = identifier[path]
keyword[while] identifier[indent] > literal[int] :
identifier[indent] -= literal[int]
identifier[result] . identifier[append] ( literal[string] %( identifier[base_indent] , literal[string] * identifier[indent] ))
identifier[result] . identifier[append] ( literal[string] %( identifier[base_indent] , identifier[len] ( identifier[filelist] )))
keyword[return] literal[string] . identifier[join] ( identifier[result] ) | def _fmt_files(filelist):
""" Produce a file listing.
"""
depth = max((i.path.count('/') for i in filelist))
pad = ['\ufffe'] * depth
base_indent = ' ' * 38
indent = 0
result = []
prev_path = pad
sorted_files = sorted(((i.path.split('/')[:-1] + pad, i.path.rsplit('/', 1)[-1], i) for i in filelist))
for (path, name, fileinfo) in sorted_files:
path = path[:depth]
if path != prev_path:
common = min([depth] + [idx for (idx, (dirname, prev_name)) in enumerate(zip(path, prev_path)) if dirname != prev_name])
#result.append("!!%r %r" % (indent, common))
#result.append("!!%r" % (prev_path,))
#result.append("!!%r" % (path,))
while indent > common:
indent -= 1
result.append('%s%s/' % (base_indent, ' ' * indent)) # depends on [control=['while'], data=['indent']]
for dirname in path[common:]:
if dirname == '\ufffe':
break # depends on [control=['if'], data=[]]
result.append('%s%s\\ %s' % (base_indent, ' ' * indent, dirname))
indent += 1 # depends on [control=['for'], data=['dirname']] # depends on [control=['if'], data=['path', 'prev_path']]
##result.append("!!%r %r" % (path, name))
result.append(' %s %s %s %s| %s' % ({0: 'off ', 1: ' ', 2: 'high'}.get(fileinfo.prio, '????'), fmt.iso_datetime(fileinfo.mtime), fmt.human_size(fileinfo.size), ' ' * indent, name))
prev_path = path # depends on [control=['for'], data=[]]
while indent > 0:
indent -= 1
result.append('%s%s/' % (base_indent, ' ' * indent)) # depends on [control=['while'], data=['indent']]
result.append('%s= %d file(s)' % (base_indent, len(filelist)))
return '\n'.join(result) |
def get_book_hierarchy_design_session(self, proxy):
"""Gets the ``OsidSession`` associated with the book hierarchy design service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.BookHierarchyDesignSession) - a
``BookHierarchyDesignSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_book_hierarchy_design()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_book_hierarchy_design()`` is ``true``.*
"""
if not self.supports_book_hierarchy_design():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.BookHierarchyDesignSession(proxy=proxy, runtime=self._runtime) | def function[get_book_hierarchy_design_session, parameter[self, proxy]]:
constant[Gets the ``OsidSession`` associated with the book hierarchy design service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.BookHierarchyDesignSession) - a
``BookHierarchyDesignSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_book_hierarchy_design()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_book_hierarchy_design()`` is ``true``.*
]
if <ast.UnaryOp object at 0x7da18dc04bb0> begin[:]
<ast.Raise object at 0x7da18dc07160>
return[call[name[sessions].BookHierarchyDesignSession, parameter[]]] | keyword[def] identifier[get_book_hierarchy_design_session] ( identifier[self] , identifier[proxy] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[supports_book_hierarchy_design] ():
keyword[raise] identifier[errors] . identifier[Unimplemented] ()
keyword[return] identifier[sessions] . identifier[BookHierarchyDesignSession] ( identifier[proxy] = identifier[proxy] , identifier[runtime] = identifier[self] . identifier[_runtime] ) | def get_book_hierarchy_design_session(self, proxy):
"""Gets the ``OsidSession`` associated with the book hierarchy design service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.BookHierarchyDesignSession) - a
``BookHierarchyDesignSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_book_hierarchy_design()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_book_hierarchy_design()`` is ``true``.*
"""
if not self.supports_book_hierarchy_design():
raise errors.Unimplemented() # depends on [control=['if'], data=[]]
# pylint: disable=no-member
return sessions.BookHierarchyDesignSession(proxy=proxy, runtime=self._runtime) |
def read_file(self, filename):
"""
Guess the filetype and read the file into row sets
"""
#print("Reading file", filename)
try:
fh = open(filename, 'rb')
table_set = any_tableset(fh) # guess the type...
except:
#traceback.print_exc()
# Cannot find the schema.
table_set = None
return table_set | def function[read_file, parameter[self, filename]]:
constant[
Guess the filetype and read the file into row sets
]
<ast.Try object at 0x7da1afe07880>
return[name[table_set]] | keyword[def] identifier[read_file] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[try] :
identifier[fh] = identifier[open] ( identifier[filename] , literal[string] )
identifier[table_set] = identifier[any_tableset] ( identifier[fh] )
keyword[except] :
identifier[table_set] = keyword[None]
keyword[return] identifier[table_set] | def read_file(self, filename):
"""
Guess the filetype and read the file into row sets
"""
#print("Reading file", filename)
try:
fh = open(filename, 'rb')
table_set = any_tableset(fh) # guess the type... # depends on [control=['try'], data=[]]
except:
#traceback.print_exc()
# Cannot find the schema.
table_set = None # depends on [control=['except'], data=[]]
return table_set |
def delete_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_
:arg job_id: The ID of the job to delete
:arg force: True if the job should be forcefully deleted, default False
:arg wait_for_completion: Should this request wait until the operation
has completed before returning, default True
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request(
"DELETE", _make_path("_ml", "anomaly_detectors", job_id), params=params
) | def function[delete_job, parameter[self, job_id, params]]:
constant[
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_
:arg job_id: The ID of the job to delete
:arg force: True if the job should be forcefully deleted, default False
:arg wait_for_completion: Should this request wait until the operation
has completed before returning, default True
]
if compare[name[job_id] in name[SKIP_IN_PATH]] begin[:]
<ast.Raise object at 0x7da1b212ebc0>
return[call[name[self].transport.perform_request, parameter[constant[DELETE], call[name[_make_path], parameter[constant[_ml], constant[anomaly_detectors], name[job_id]]]]]] | keyword[def] identifier[delete_job] ( identifier[self] , identifier[job_id] , identifier[params] = keyword[None] ):
literal[string]
keyword[if] identifier[job_id] keyword[in] identifier[SKIP_IN_PATH] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] (
literal[string] , identifier[_make_path] ( literal[string] , literal[string] , identifier[job_id] ), identifier[params] = identifier[params]
) | def delete_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_
:arg job_id: The ID of the job to delete
:arg force: True if the job should be forcefully deleted, default False
:arg wait_for_completion: Should this request wait until the operation
has completed before returning, default True
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.") # depends on [control=['if'], data=[]]
return self.transport.perform_request('DELETE', _make_path('_ml', 'anomaly_detectors', job_id), params=params) |
def proto_unknown(theABF):
"""protocol: unknown."""
abf=ABF(theABF)
abf.log.info("analyzing as an unknown protocol")
plot=ABFplot(abf)
plot.rainbow=False
plot.title=None
plot.figure_height,plot.figure_width=SQUARESIZE,SQUARESIZE
plot.kwargs["lw"]=.5
plot.figure_chronological()
plt.gca().set_axis_bgcolor('#AAAAAA') # different background if unknown protocol
frameAndSave(abf,"UNKNOWN") | def function[proto_unknown, parameter[theABF]]:
constant[protocol: unknown.]
variable[abf] assign[=] call[name[ABF], parameter[name[theABF]]]
call[name[abf].log.info, parameter[constant[analyzing as an unknown protocol]]]
variable[plot] assign[=] call[name[ABFplot], parameter[name[abf]]]
name[plot].rainbow assign[=] constant[False]
name[plot].title assign[=] constant[None]
<ast.Tuple object at 0x7da1afe06b30> assign[=] tuple[[<ast.Name object at 0x7da1afe04c40>, <ast.Name object at 0x7da1afe04e50>]]
call[name[plot].kwargs][constant[lw]] assign[=] constant[0.5]
call[name[plot].figure_chronological, parameter[]]
call[call[name[plt].gca, parameter[]].set_axis_bgcolor, parameter[constant[#AAAAAA]]]
call[name[frameAndSave], parameter[name[abf], constant[UNKNOWN]]] | keyword[def] identifier[proto_unknown] ( identifier[theABF] ):
literal[string]
identifier[abf] = identifier[ABF] ( identifier[theABF] )
identifier[abf] . identifier[log] . identifier[info] ( literal[string] )
identifier[plot] = identifier[ABFplot] ( identifier[abf] )
identifier[plot] . identifier[rainbow] = keyword[False]
identifier[plot] . identifier[title] = keyword[None]
identifier[plot] . identifier[figure_height] , identifier[plot] . identifier[figure_width] = identifier[SQUARESIZE] , identifier[SQUARESIZE]
identifier[plot] . identifier[kwargs] [ literal[string] ]= literal[int]
identifier[plot] . identifier[figure_chronological] ()
identifier[plt] . identifier[gca] (). identifier[set_axis_bgcolor] ( literal[string] )
identifier[frameAndSave] ( identifier[abf] , literal[string] ) | def proto_unknown(theABF):
"""protocol: unknown."""
abf = ABF(theABF)
abf.log.info('analyzing as an unknown protocol')
plot = ABFplot(abf)
plot.rainbow = False
plot.title = None
(plot.figure_height, plot.figure_width) = (SQUARESIZE, SQUARESIZE)
plot.kwargs['lw'] = 0.5
plot.figure_chronological()
plt.gca().set_axis_bgcolor('#AAAAAA') # different background if unknown protocol
frameAndSave(abf, 'UNKNOWN') |
def animate_glyphs(*args, **kwargs):
"""Deprecated: please use animation_control."""
warnings.warn("Please use animation_control(...)", DeprecationWarning, stacklevel=2)
animation_control(*args, **kwargs) | def function[animate_glyphs, parameter[]]:
constant[Deprecated: please use animation_control.]
call[name[warnings].warn, parameter[constant[Please use animation_control(...)], name[DeprecationWarning]]]
call[name[animation_control], parameter[<ast.Starred object at 0x7da18eb57eb0>]] | keyword[def] identifier[animate_glyphs] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string] , identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] )
identifier[animation_control] (* identifier[args] ,** identifier[kwargs] ) | def animate_glyphs(*args, **kwargs):
"""Deprecated: please use animation_control."""
warnings.warn('Please use animation_control(...)', DeprecationWarning, stacklevel=2)
animation_control(*args, **kwargs) |
def download(self, layer, force=False):
"""
Download a given layer to the local cache.
Parameters
----------
layer samcli.commands.local.lib.provider.Layer
Layer representing the layer to be downloaded.
force bool
True to download the layer even if it exists already on the system
Returns
-------
Path
Path object that represents where the layer is download to
"""
if layer.is_defined_within_template:
LOG.info("%s is a local Layer in the template", layer.name)
layer.codeuri = resolve_code_path(self.cwd, layer.codeuri)
return layer
# disabling no-member due to https://github.com/PyCQA/pylint/issues/1660
layer_path = Path(self.layer_cache).joinpath(layer.name).resolve() # pylint: disable=no-member
is_layer_downloaded = self._is_layer_cached(layer_path)
layer.codeuri = str(layer_path)
if is_layer_downloaded and not force:
LOG.info("%s is already cached. Skipping download", layer.arn)
return layer
layer_zip_path = layer.codeuri + '.zip'
layer_zip_uri = self._fetch_layer_uri(layer)
unzip_from_uri(layer_zip_uri,
layer_zip_path,
unzip_output_dir=layer.codeuri,
progressbar_label='Downloading {}'.format(layer.layer_arn))
return layer | def function[download, parameter[self, layer, force]]:
constant[
Download a given layer to the local cache.
Parameters
----------
layer samcli.commands.local.lib.provider.Layer
Layer representing the layer to be downloaded.
force bool
True to download the layer even if it exists already on the system
Returns
-------
Path
Path object that represents where the layer is download to
]
if name[layer].is_defined_within_template begin[:]
call[name[LOG].info, parameter[constant[%s is a local Layer in the template], name[layer].name]]
name[layer].codeuri assign[=] call[name[resolve_code_path], parameter[name[self].cwd, name[layer].codeuri]]
return[name[layer]]
variable[layer_path] assign[=] call[call[call[name[Path], parameter[name[self].layer_cache]].joinpath, parameter[name[layer].name]].resolve, parameter[]]
variable[is_layer_downloaded] assign[=] call[name[self]._is_layer_cached, parameter[name[layer_path]]]
name[layer].codeuri assign[=] call[name[str], parameter[name[layer_path]]]
if <ast.BoolOp object at 0x7da1b1f62260> begin[:]
call[name[LOG].info, parameter[constant[%s is already cached. Skipping download], name[layer].arn]]
return[name[layer]]
variable[layer_zip_path] assign[=] binary_operation[name[layer].codeuri + constant[.zip]]
variable[layer_zip_uri] assign[=] call[name[self]._fetch_layer_uri, parameter[name[layer]]]
call[name[unzip_from_uri], parameter[name[layer_zip_uri], name[layer_zip_path]]]
return[name[layer]] | keyword[def] identifier[download] ( identifier[self] , identifier[layer] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] identifier[layer] . identifier[is_defined_within_template] :
identifier[LOG] . identifier[info] ( literal[string] , identifier[layer] . identifier[name] )
identifier[layer] . identifier[codeuri] = identifier[resolve_code_path] ( identifier[self] . identifier[cwd] , identifier[layer] . identifier[codeuri] )
keyword[return] identifier[layer]
identifier[layer_path] = identifier[Path] ( identifier[self] . identifier[layer_cache] ). identifier[joinpath] ( identifier[layer] . identifier[name] ). identifier[resolve] ()
identifier[is_layer_downloaded] = identifier[self] . identifier[_is_layer_cached] ( identifier[layer_path] )
identifier[layer] . identifier[codeuri] = identifier[str] ( identifier[layer_path] )
keyword[if] identifier[is_layer_downloaded] keyword[and] keyword[not] identifier[force] :
identifier[LOG] . identifier[info] ( literal[string] , identifier[layer] . identifier[arn] )
keyword[return] identifier[layer]
identifier[layer_zip_path] = identifier[layer] . identifier[codeuri] + literal[string]
identifier[layer_zip_uri] = identifier[self] . identifier[_fetch_layer_uri] ( identifier[layer] )
identifier[unzip_from_uri] ( identifier[layer_zip_uri] ,
identifier[layer_zip_path] ,
identifier[unzip_output_dir] = identifier[layer] . identifier[codeuri] ,
identifier[progressbar_label] = literal[string] . identifier[format] ( identifier[layer] . identifier[layer_arn] ))
keyword[return] identifier[layer] | def download(self, layer, force=False):
"""
Download a given layer to the local cache.
Parameters
----------
layer samcli.commands.local.lib.provider.Layer
Layer representing the layer to be downloaded.
force bool
True to download the layer even if it exists already on the system
Returns
-------
Path
Path object that represents where the layer is download to
"""
if layer.is_defined_within_template:
LOG.info('%s is a local Layer in the template', layer.name)
layer.codeuri = resolve_code_path(self.cwd, layer.codeuri)
return layer # depends on [control=['if'], data=[]]
# disabling no-member due to https://github.com/PyCQA/pylint/issues/1660
layer_path = Path(self.layer_cache).joinpath(layer.name).resolve() # pylint: disable=no-member
is_layer_downloaded = self._is_layer_cached(layer_path)
layer.codeuri = str(layer_path)
if is_layer_downloaded and (not force):
LOG.info('%s is already cached. Skipping download', layer.arn)
return layer # depends on [control=['if'], data=[]]
layer_zip_path = layer.codeuri + '.zip'
layer_zip_uri = self._fetch_layer_uri(layer)
unzip_from_uri(layer_zip_uri, layer_zip_path, unzip_output_dir=layer.codeuri, progressbar_label='Downloading {}'.format(layer.layer_arn))
return layer |
def main_loop_iteration(timeout=None):
"""Return the number of RemoteDispatcher.handle_read() calls made by this
iteration"""
prev_nr_read = nr_handle_read
asyncore.loop(count=1, timeout=timeout, use_poll=True)
return nr_handle_read - prev_nr_read | def function[main_loop_iteration, parameter[timeout]]:
constant[Return the number of RemoteDispatcher.handle_read() calls made by this
iteration]
variable[prev_nr_read] assign[=] name[nr_handle_read]
call[name[asyncore].loop, parameter[]]
return[binary_operation[name[nr_handle_read] - name[prev_nr_read]]] | keyword[def] identifier[main_loop_iteration] ( identifier[timeout] = keyword[None] ):
literal[string]
identifier[prev_nr_read] = identifier[nr_handle_read]
identifier[asyncore] . identifier[loop] ( identifier[count] = literal[int] , identifier[timeout] = identifier[timeout] , identifier[use_poll] = keyword[True] )
keyword[return] identifier[nr_handle_read] - identifier[prev_nr_read] | def main_loop_iteration(timeout=None):
"""Return the number of RemoteDispatcher.handle_read() calls made by this
iteration"""
prev_nr_read = nr_handle_read
asyncore.loop(count=1, timeout=timeout, use_poll=True)
return nr_handle_read - prev_nr_read |
def do_INDEX(self, _id):
"""Get index details for a document by its id.
INDEX 772210180J"""
doc = doc_by_id(_id)
if not doc:
return self.error('id "{}" not found'.format(_id))
for field in config.FIELDS:
key = field['key']
if key in doc:
self._print_field_index_details(doc[key], _id) | def function[do_INDEX, parameter[self, _id]]:
constant[Get index details for a document by its id.
INDEX 772210180J]
variable[doc] assign[=] call[name[doc_by_id], parameter[name[_id]]]
if <ast.UnaryOp object at 0x7da1b26aceb0> begin[:]
return[call[name[self].error, parameter[call[constant[id "{}" not found].format, parameter[name[_id]]]]]]
for taget[name[field]] in starred[name[config].FIELDS] begin[:]
variable[key] assign[=] call[name[field]][constant[key]]
if compare[name[key] in name[doc]] begin[:]
call[name[self]._print_field_index_details, parameter[call[name[doc]][name[key]], name[_id]]] | keyword[def] identifier[do_INDEX] ( identifier[self] , identifier[_id] ):
literal[string]
identifier[doc] = identifier[doc_by_id] ( identifier[_id] )
keyword[if] keyword[not] identifier[doc] :
keyword[return] identifier[self] . identifier[error] ( literal[string] . identifier[format] ( identifier[_id] ))
keyword[for] identifier[field] keyword[in] identifier[config] . identifier[FIELDS] :
identifier[key] = identifier[field] [ literal[string] ]
keyword[if] identifier[key] keyword[in] identifier[doc] :
identifier[self] . identifier[_print_field_index_details] ( identifier[doc] [ identifier[key] ], identifier[_id] ) | def do_INDEX(self, _id):
"""Get index details for a document by its id.
INDEX 772210180J"""
doc = doc_by_id(_id)
if not doc:
return self.error('id "{}" not found'.format(_id)) # depends on [control=['if'], data=[]]
for field in config.FIELDS:
key = field['key']
if key in doc:
self._print_field_index_details(doc[key], _id) # depends on [control=['if'], data=['key', 'doc']] # depends on [control=['for'], data=['field']] |
def _dstationarystate(self, k, param):
"""Returns the dstationarystate ."""
if self._distributionmodel:
return self.model.dstationarystate(k, param)
else:
return self.model.dstationarystate(param) | def function[_dstationarystate, parameter[self, k, param]]:
constant[Returns the dstationarystate .]
if name[self]._distributionmodel begin[:]
return[call[name[self].model.dstationarystate, parameter[name[k], name[param]]]] | keyword[def] identifier[_dstationarystate] ( identifier[self] , identifier[k] , identifier[param] ):
literal[string]
keyword[if] identifier[self] . identifier[_distributionmodel] :
keyword[return] identifier[self] . identifier[model] . identifier[dstationarystate] ( identifier[k] , identifier[param] )
keyword[else] :
keyword[return] identifier[self] . identifier[model] . identifier[dstationarystate] ( identifier[param] ) | def _dstationarystate(self, k, param):
"""Returns the dstationarystate ."""
if self._distributionmodel:
return self.model.dstationarystate(k, param) # depends on [control=['if'], data=[]]
else:
return self.model.dstationarystate(param) |
def free_function(
self,
name=None,
function=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None):
"""
Returns reference to free function declaration that matches
a defined criteria.
"""
return (
self._find_single(
scopedef.scopedef_t._impl_matchers[namespace_t.free_function],
name=name,
function=function,
decl_type=self._impl_decl_types[namespace_t.free_function],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
) | def function[free_function, parameter[self, name, function, return_type, arg_types, header_dir, header_file, recursive]]:
constant[
Returns reference to free function declaration that matches
a defined criteria.
]
return[call[name[self]._find_single, parameter[call[name[scopedef].scopedef_t._impl_matchers][name[namespace_t].free_function]]]] | keyword[def] identifier[free_function] (
identifier[self] ,
identifier[name] = keyword[None] ,
identifier[function] = keyword[None] ,
identifier[return_type] = keyword[None] ,
identifier[arg_types] = keyword[None] ,
identifier[header_dir] = keyword[None] ,
identifier[header_file] = keyword[None] ,
identifier[recursive] = keyword[None] ):
literal[string]
keyword[return] (
identifier[self] . identifier[_find_single] (
identifier[scopedef] . identifier[scopedef_t] . identifier[_impl_matchers] [ identifier[namespace_t] . identifier[free_function] ],
identifier[name] = identifier[name] ,
identifier[function] = identifier[function] ,
identifier[decl_type] = identifier[self] . identifier[_impl_decl_types] [ identifier[namespace_t] . identifier[free_function] ],
identifier[return_type] = identifier[return_type] ,
identifier[arg_types] = identifier[arg_types] ,
identifier[header_dir] = identifier[header_dir] ,
identifier[header_file] = identifier[header_file] ,
identifier[recursive] = identifier[recursive] )
) | def free_function(self, name=None, function=None, return_type=None, arg_types=None, header_dir=None, header_file=None, recursive=None):
"""
Returns reference to free function declaration that matches
a defined criteria.
"""
return self._find_single(scopedef.scopedef_t._impl_matchers[namespace_t.free_function], name=name, function=function, decl_type=self._impl_decl_types[namespace_t.free_function], return_type=return_type, arg_types=arg_types, header_dir=header_dir, header_file=header_file, recursive=recursive) |
def wallet_frontiers(self, wallet):
"""
Returns a list of pairs of account and block hash representing the
head block starting for accounts from **wallet**
:param wallet: Wallet to return frontiers for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_frontiers(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
}
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_frontiers', payload)
return resp.get('frontiers') or {} | def function[wallet_frontiers, parameter[self, wallet]]:
constant[
Returns a list of pairs of account and block hash representing the
head block starting for accounts from **wallet**
:param wallet: Wallet to return frontiers for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_frontiers(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
}
]
variable[wallet] assign[=] call[name[self]._process_value, parameter[name[wallet], constant[wallet]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b25194b0>], [<ast.Name object at 0x7da1b251af20>]]
variable[resp] assign[=] call[name[self].call, parameter[constant[wallet_frontiers], name[payload]]]
return[<ast.BoolOp object at 0x7da1b251ada0>] | keyword[def] identifier[wallet_frontiers] ( identifier[self] , identifier[wallet] ):
literal[string]
identifier[wallet] = identifier[self] . identifier[_process_value] ( identifier[wallet] , literal[string] )
identifier[payload] ={ literal[string] : identifier[wallet] }
identifier[resp] = identifier[self] . identifier[call] ( literal[string] , identifier[payload] )
keyword[return] identifier[resp] . identifier[get] ( literal[string] ) keyword[or] {} | def wallet_frontiers(self, wallet):
"""
Returns a list of pairs of account and block hash representing the
head block starting for accounts from **wallet**
:param wallet: Wallet to return frontiers for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_frontiers(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
}
"""
wallet = self._process_value(wallet, 'wallet')
payload = {'wallet': wallet}
resp = self.call('wallet_frontiers', payload)
return resp.get('frontiers') or {} |
def _deriv_logaddexp2(x1, x2):
"""The derivative of f(x, y) = log2(2^x + 2^y)"""
y1 = np.exp2(x1)
y2 = np.exp2(x2)
df_dx1 = y1 / (y1 + y2)
df_dx2 = y2 / (y1 + y2)
return np.vstack([df_dx1, df_dx2]).T | def function[_deriv_logaddexp2, parameter[x1, x2]]:
constant[The derivative of f(x, y) = log2(2^x + 2^y)]
variable[y1] assign[=] call[name[np].exp2, parameter[name[x1]]]
variable[y2] assign[=] call[name[np].exp2, parameter[name[x2]]]
variable[df_dx1] assign[=] binary_operation[name[y1] / binary_operation[name[y1] + name[y2]]]
variable[df_dx2] assign[=] binary_operation[name[y2] / binary_operation[name[y1] + name[y2]]]
return[call[name[np].vstack, parameter[list[[<ast.Name object at 0x7da18f722fb0>, <ast.Name object at 0x7da18f7230a0>]]]].T] | keyword[def] identifier[_deriv_logaddexp2] ( identifier[x1] , identifier[x2] ):
literal[string]
identifier[y1] = identifier[np] . identifier[exp2] ( identifier[x1] )
identifier[y2] = identifier[np] . identifier[exp2] ( identifier[x2] )
identifier[df_dx1] = identifier[y1] /( identifier[y1] + identifier[y2] )
identifier[df_dx2] = identifier[y2] /( identifier[y1] + identifier[y2] )
keyword[return] identifier[np] . identifier[vstack] ([ identifier[df_dx1] , identifier[df_dx2] ]). identifier[T] | def _deriv_logaddexp2(x1, x2):
"""The derivative of f(x, y) = log2(2^x + 2^y)"""
y1 = np.exp2(x1)
y2 = np.exp2(x2)
df_dx1 = y1 / (y1 + y2)
df_dx2 = y2 / (y1 + y2)
return np.vstack([df_dx1, df_dx2]).T |
def connectMSExchange(server):
"""
Creates a connection for the inputted server to a Microsoft Exchange server.
:param server | <smtplib.SMTP>
:usage |>>> import smtplib
|>>> import projex.notify
|>>> smtp = smtplib.SMTP('mail.server.com')
|>>> projex.notify.connectMSExchange(smtp)
:return (<bool> success, <str> reason)
"""
if not sspi:
return False, 'No sspi module found.'
# send the SMTP EHLO command
code, response = server.ehlo()
if code != SMTP_EHLO_OKAY:
return False, 'Server did not respond to EHLO command.'
sspi_client = sspi.ClientAuth('NTLM')
# generate NTLM Type 1 message
sec_buffer = None
err, sec_buffer = sspi_client.authorize(sec_buffer)
# noinspection PyShadowingBuiltins
buffer = sec_buffer[0].Buffer
ntlm_message = base64.encodestring(buffer).replace('\n', '')
# send NTLM Type 1 message -- Authentication Request
code, response = server.docmd('AUTH', 'NTLM ' + ntlm_message)
# verify the NTLM Type 2 response -- Challenge Message
if code != SMTP_AUTH_CHALLENGE:
msg = 'Server did not respond as expected to NTLM negotiate message'
return False, msg
# generate NTLM Type 3 message
err, sec_buffer = sspi_client.authorize(base64.decodestring(response))
# noinspection PyShadowingBuiltins
buffer = sec_buffer[0].Buffer
ntlm_message = base64.encodestring(buffer).replace('\n', '')
# send the NTLM Type 3 message -- Response Message
code, response = server.docmd('', ntlm_message)
if code != SMTP_AUTH_OKAY:
return False, response
return True, '' | def function[connectMSExchange, parameter[server]]:
constant[
Creates a connection for the inputted server to a Microsoft Exchange server.
:param server | <smtplib.SMTP>
:usage |>>> import smtplib
|>>> import projex.notify
|>>> smtp = smtplib.SMTP('mail.server.com')
|>>> projex.notify.connectMSExchange(smtp)
:return (<bool> success, <str> reason)
]
if <ast.UnaryOp object at 0x7da1b2879cc0> begin[:]
return[tuple[[<ast.Constant object at 0x7da1b2879570>, <ast.Constant object at 0x7da1b287bbb0>]]]
<ast.Tuple object at 0x7da1b28790c0> assign[=] call[name[server].ehlo, parameter[]]
if compare[name[code] not_equal[!=] name[SMTP_EHLO_OKAY]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b2878bb0>, <ast.Constant object at 0x7da1b2878ca0>]]]
variable[sspi_client] assign[=] call[name[sspi].ClientAuth, parameter[constant[NTLM]]]
variable[sec_buffer] assign[=] constant[None]
<ast.Tuple object at 0x7da1b287b880> assign[=] call[name[sspi_client].authorize, parameter[name[sec_buffer]]]
variable[buffer] assign[=] call[name[sec_buffer]][constant[0]].Buffer
variable[ntlm_message] assign[=] call[call[name[base64].encodestring, parameter[name[buffer]]].replace, parameter[constant[
], constant[]]]
<ast.Tuple object at 0x7da1b287bca0> assign[=] call[name[server].docmd, parameter[constant[AUTH], binary_operation[constant[NTLM ] + name[ntlm_message]]]]
if compare[name[code] not_equal[!=] name[SMTP_AUTH_CHALLENGE]] begin[:]
variable[msg] assign[=] constant[Server did not respond as expected to NTLM negotiate message]
return[tuple[[<ast.Constant object at 0x7da1b287b250>, <ast.Name object at 0x7da1b287b730>]]]
<ast.Tuple object at 0x7da1b2879ea0> assign[=] call[name[sspi_client].authorize, parameter[call[name[base64].decodestring, parameter[name[response]]]]]
variable[buffer] assign[=] call[name[sec_buffer]][constant[0]].Buffer
variable[ntlm_message] assign[=] call[call[name[base64].encodestring, parameter[name[buffer]]].replace, parameter[constant[
], constant[]]]
<ast.Tuple object at 0x7da1b287b2e0> assign[=] call[name[server].docmd, parameter[constant[], name[ntlm_message]]]
if compare[name[code] not_equal[!=] name[SMTP_AUTH_OKAY]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b287b850>, <ast.Name object at 0x7da1b287b160>]]]
return[tuple[[<ast.Constant object at 0x7da1b2878c70>, <ast.Constant object at 0x7da1b2879660>]]] | keyword[def] identifier[connectMSExchange] ( identifier[server] ):
literal[string]
keyword[if] keyword[not] identifier[sspi] :
keyword[return] keyword[False] , literal[string]
identifier[code] , identifier[response] = identifier[server] . identifier[ehlo] ()
keyword[if] identifier[code] != identifier[SMTP_EHLO_OKAY] :
keyword[return] keyword[False] , literal[string]
identifier[sspi_client] = identifier[sspi] . identifier[ClientAuth] ( literal[string] )
identifier[sec_buffer] = keyword[None]
identifier[err] , identifier[sec_buffer] = identifier[sspi_client] . identifier[authorize] ( identifier[sec_buffer] )
identifier[buffer] = identifier[sec_buffer] [ literal[int] ]. identifier[Buffer]
identifier[ntlm_message] = identifier[base64] . identifier[encodestring] ( identifier[buffer] ). identifier[replace] ( literal[string] , literal[string] )
identifier[code] , identifier[response] = identifier[server] . identifier[docmd] ( literal[string] , literal[string] + identifier[ntlm_message] )
keyword[if] identifier[code] != identifier[SMTP_AUTH_CHALLENGE] :
identifier[msg] = literal[string]
keyword[return] keyword[False] , identifier[msg]
identifier[err] , identifier[sec_buffer] = identifier[sspi_client] . identifier[authorize] ( identifier[base64] . identifier[decodestring] ( identifier[response] ))
identifier[buffer] = identifier[sec_buffer] [ literal[int] ]. identifier[Buffer]
identifier[ntlm_message] = identifier[base64] . identifier[encodestring] ( identifier[buffer] ). identifier[replace] ( literal[string] , literal[string] )
identifier[code] , identifier[response] = identifier[server] . identifier[docmd] ( literal[string] , identifier[ntlm_message] )
keyword[if] identifier[code] != identifier[SMTP_AUTH_OKAY] :
keyword[return] keyword[False] , identifier[response]
keyword[return] keyword[True] , literal[string] | def connectMSExchange(server):
"""
Creates a connection for the inputted server to a Microsoft Exchange server.
:param server | <smtplib.SMTP>
:usage |>>> import smtplib
|>>> import projex.notify
|>>> smtp = smtplib.SMTP('mail.server.com')
|>>> projex.notify.connectMSExchange(smtp)
:return (<bool> success, <str> reason)
"""
if not sspi:
return (False, 'No sspi module found.') # depends on [control=['if'], data=[]]
# send the SMTP EHLO command
(code, response) = server.ehlo()
if code != SMTP_EHLO_OKAY:
return (False, 'Server did not respond to EHLO command.') # depends on [control=['if'], data=[]]
sspi_client = sspi.ClientAuth('NTLM')
# generate NTLM Type 1 message
sec_buffer = None
(err, sec_buffer) = sspi_client.authorize(sec_buffer)
# noinspection PyShadowingBuiltins
buffer = sec_buffer[0].Buffer
ntlm_message = base64.encodestring(buffer).replace('\n', '')
# send NTLM Type 1 message -- Authentication Request
(code, response) = server.docmd('AUTH', 'NTLM ' + ntlm_message)
# verify the NTLM Type 2 response -- Challenge Message
if code != SMTP_AUTH_CHALLENGE:
msg = 'Server did not respond as expected to NTLM negotiate message'
return (False, msg) # depends on [control=['if'], data=[]]
# generate NTLM Type 3 message
(err, sec_buffer) = sspi_client.authorize(base64.decodestring(response))
# noinspection PyShadowingBuiltins
buffer = sec_buffer[0].Buffer
ntlm_message = base64.encodestring(buffer).replace('\n', '')
# send the NTLM Type 3 message -- Response Message
(code, response) = server.docmd('', ntlm_message)
if code != SMTP_AUTH_OKAY:
return (False, response) # depends on [control=['if'], data=[]]
return (True, '') |
def _expand_error_codes(code_parts):
"""Return an expanded set of error codes to ignore."""
codes = set(ErrorRegistry.get_error_codes())
expanded_codes = set()
try:
for part in code_parts:
# Dealing with split-lined configurations; The part might begin
# with a whitespace due to the newline character.
part = part.strip()
if not part:
continue
codes_to_add = {code for code in codes
if code.startswith(part)}
if not codes_to_add:
log.warning(
'Error code passed is not a prefix of any '
'known errors: %s', part)
expanded_codes.update(codes_to_add)
except TypeError as e:
raise IllegalConfiguration(e)
return expanded_codes | def function[_expand_error_codes, parameter[code_parts]]:
constant[Return an expanded set of error codes to ignore.]
variable[codes] assign[=] call[name[set], parameter[call[name[ErrorRegistry].get_error_codes, parameter[]]]]
variable[expanded_codes] assign[=] call[name[set], parameter[]]
<ast.Try object at 0x7da1b1ec31c0>
return[name[expanded_codes]] | keyword[def] identifier[_expand_error_codes] ( identifier[code_parts] ):
literal[string]
identifier[codes] = identifier[set] ( identifier[ErrorRegistry] . identifier[get_error_codes] ())
identifier[expanded_codes] = identifier[set] ()
keyword[try] :
keyword[for] identifier[part] keyword[in] identifier[code_parts] :
identifier[part] = identifier[part] . identifier[strip] ()
keyword[if] keyword[not] identifier[part] :
keyword[continue]
identifier[codes_to_add] ={ identifier[code] keyword[for] identifier[code] keyword[in] identifier[codes]
keyword[if] identifier[code] . identifier[startswith] ( identifier[part] )}
keyword[if] keyword[not] identifier[codes_to_add] :
identifier[log] . identifier[warning] (
literal[string]
literal[string] , identifier[part] )
identifier[expanded_codes] . identifier[update] ( identifier[codes_to_add] )
keyword[except] identifier[TypeError] keyword[as] identifier[e] :
keyword[raise] identifier[IllegalConfiguration] ( identifier[e] )
keyword[return] identifier[expanded_codes] | def _expand_error_codes(code_parts):
"""Return an expanded set of error codes to ignore."""
codes = set(ErrorRegistry.get_error_codes())
expanded_codes = set()
try:
for part in code_parts:
# Dealing with split-lined configurations; The part might begin
# with a whitespace due to the newline character.
part = part.strip()
if not part:
continue # depends on [control=['if'], data=[]]
codes_to_add = {code for code in codes if code.startswith(part)}
if not codes_to_add:
log.warning('Error code passed is not a prefix of any known errors: %s', part) # depends on [control=['if'], data=[]]
expanded_codes.update(codes_to_add) # depends on [control=['for'], data=['part']] # depends on [control=['try'], data=[]]
except TypeError as e:
raise IllegalConfiguration(e) # depends on [control=['except'], data=['e']]
return expanded_codes |
def _initBlockMajorMap(self):
"""Parses /proc/devices to initialize device class - major number map
for block devices.
"""
self._mapMajorDevclass = {}
try:
fp = open(devicesFile, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading device information from file: %s'
% devicesFile)
skip = True
for line in data.splitlines():
if skip:
if re.match('block.*:', line, re.IGNORECASE):
skip = False
else:
mobj = re.match('\s*(\d+)\s+([\w\-]+)$', line)
if mobj:
major = int(mobj.group(1))
devtype = mobj.group(2)
self._mapMajorDevclass[major] = devtype
if devtype == 'device-mapper':
self._dmMajorNum = major | def function[_initBlockMajorMap, parameter[self]]:
constant[Parses /proc/devices to initialize device class - major number map
for block devices.
]
name[self]._mapMajorDevclass assign[=] dictionary[[], []]
<ast.Try object at 0x7da1b1042ad0>
variable[skip] assign[=] constant[True]
for taget[name[line]] in starred[call[name[data].splitlines, parameter[]]] begin[:]
if name[skip] begin[:]
if call[name[re].match, parameter[constant[block.*:], name[line], name[re].IGNORECASE]] begin[:]
variable[skip] assign[=] constant[False] | keyword[def] identifier[_initBlockMajorMap] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_mapMajorDevclass] ={}
keyword[try] :
identifier[fp] = identifier[open] ( identifier[devicesFile] , literal[string] )
identifier[data] = identifier[fp] . identifier[read] ()
identifier[fp] . identifier[close] ()
keyword[except] :
keyword[raise] identifier[IOError] ( literal[string]
% identifier[devicesFile] )
identifier[skip] = keyword[True]
keyword[for] identifier[line] keyword[in] identifier[data] . identifier[splitlines] ():
keyword[if] identifier[skip] :
keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[line] , identifier[re] . identifier[IGNORECASE] ):
identifier[skip] = keyword[False]
keyword[else] :
identifier[mobj] = identifier[re] . identifier[match] ( literal[string] , identifier[line] )
keyword[if] identifier[mobj] :
identifier[major] = identifier[int] ( identifier[mobj] . identifier[group] ( literal[int] ))
identifier[devtype] = identifier[mobj] . identifier[group] ( literal[int] )
identifier[self] . identifier[_mapMajorDevclass] [ identifier[major] ]= identifier[devtype]
keyword[if] identifier[devtype] == literal[string] :
identifier[self] . identifier[_dmMajorNum] = identifier[major] | def _initBlockMajorMap(self):
"""Parses /proc/devices to initialize device class - major number map
for block devices.
"""
self._mapMajorDevclass = {}
try:
fp = open(devicesFile, 'r')
data = fp.read()
fp.close() # depends on [control=['try'], data=[]]
except:
raise IOError('Failed reading device information from file: %s' % devicesFile) # depends on [control=['except'], data=[]]
skip = True
for line in data.splitlines():
if skip:
if re.match('block.*:', line, re.IGNORECASE):
skip = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
mobj = re.match('\\s*(\\d+)\\s+([\\w\\-]+)$', line)
if mobj:
major = int(mobj.group(1))
devtype = mobj.group(2)
self._mapMajorDevclass[major] = devtype
if devtype == 'device-mapper':
self._dmMajorNum = major # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] |
def load_module_from_modpath(parts, path=None, use_sys=1):
"""Load a python module from its split name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package split on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules[".".join(parts)]
except KeyError:
pass
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = ".".join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname)
elif use_sys:
# because it may have been indirectly loaded through a parent
module = sys.modules.get(curname)
if module is None:
mp_file, mp_filename, mp_desc = imp.find_module(part, path)
module = imp.load_module(curname, mp_file, mp_filename, mp_desc)
# mp_file still needs to be closed.
if mp_file:
mp_file.close()
if prevmodule:
setattr(prevmodule, part, module)
_file = getattr(module, "__file__", "")
prevmodule = module
if not _file and util.is_namespace(curname):
continue
if not _file and len(modpath) != len(parts):
raise ImportError("no module in %s" % ".".join(parts[len(modpath) :]))
path = [os.path.dirname(_file)]
return module | def function[load_module_from_modpath, parameter[parts, path, use_sys]]:
constant[Load a python module from its split name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package split on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
]
if name[use_sys] begin[:]
<ast.Try object at 0x7da1b1edb5b0>
variable[modpath] assign[=] list[[]]
variable[prevmodule] assign[=] constant[None]
for taget[name[part]] in starred[name[parts]] begin[:]
call[name[modpath].append, parameter[name[part]]]
variable[curname] assign[=] call[constant[.].join, parameter[name[modpath]]]
variable[module] assign[=] constant[None]
if compare[call[name[len], parameter[name[modpath]]] not_equal[!=] call[name[len], parameter[name[parts]]]] begin[:]
variable[module] assign[=] call[name[sys].modules.get, parameter[name[curname]]]
if compare[name[module] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b1e7fdc0> assign[=] call[name[imp].find_module, parameter[name[part], name[path]]]
variable[module] assign[=] call[name[imp].load_module, parameter[name[curname], name[mp_file], name[mp_filename], name[mp_desc]]]
if name[mp_file] begin[:]
call[name[mp_file].close, parameter[]]
if name[prevmodule] begin[:]
call[name[setattr], parameter[name[prevmodule], name[part], name[module]]]
variable[_file] assign[=] call[name[getattr], parameter[name[module], constant[__file__], constant[]]]
variable[prevmodule] assign[=] name[module]
if <ast.BoolOp object at 0x7da1b1e7d360> begin[:]
continue
if <ast.BoolOp object at 0x7da1b1e7ef50> begin[:]
<ast.Raise object at 0x7da1b1e7e200>
variable[path] assign[=] list[[<ast.Call object at 0x7da1b1e7e0b0>]]
return[name[module]] | keyword[def] identifier[load_module_from_modpath] ( identifier[parts] , identifier[path] = keyword[None] , identifier[use_sys] = literal[int] ):
literal[string]
keyword[if] identifier[use_sys] :
keyword[try] :
keyword[return] identifier[sys] . identifier[modules] [ literal[string] . identifier[join] ( identifier[parts] )]
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[modpath] =[]
identifier[prevmodule] = keyword[None]
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[modpath] . identifier[append] ( identifier[part] )
identifier[curname] = literal[string] . identifier[join] ( identifier[modpath] )
identifier[module] = keyword[None]
keyword[if] identifier[len] ( identifier[modpath] )!= identifier[len] ( identifier[parts] ):
identifier[module] = identifier[sys] . identifier[modules] . identifier[get] ( identifier[curname] )
keyword[elif] identifier[use_sys] :
identifier[module] = identifier[sys] . identifier[modules] . identifier[get] ( identifier[curname] )
keyword[if] identifier[module] keyword[is] keyword[None] :
identifier[mp_file] , identifier[mp_filename] , identifier[mp_desc] = identifier[imp] . identifier[find_module] ( identifier[part] , identifier[path] )
identifier[module] = identifier[imp] . identifier[load_module] ( identifier[curname] , identifier[mp_file] , identifier[mp_filename] , identifier[mp_desc] )
keyword[if] identifier[mp_file] :
identifier[mp_file] . identifier[close] ()
keyword[if] identifier[prevmodule] :
identifier[setattr] ( identifier[prevmodule] , identifier[part] , identifier[module] )
identifier[_file] = identifier[getattr] ( identifier[module] , literal[string] , literal[string] )
identifier[prevmodule] = identifier[module]
keyword[if] keyword[not] identifier[_file] keyword[and] identifier[util] . identifier[is_namespace] ( identifier[curname] ):
keyword[continue]
keyword[if] keyword[not] identifier[_file] keyword[and] identifier[len] ( identifier[modpath] )!= identifier[len] ( identifier[parts] ):
keyword[raise] identifier[ImportError] ( literal[string] % literal[string] . identifier[join] ( identifier[parts] [ identifier[len] ( identifier[modpath] ):]))
identifier[path] =[ identifier[os] . identifier[path] . identifier[dirname] ( identifier[_file] )]
keyword[return] identifier[module] | def load_module_from_modpath(parts, path=None, use_sys=1):
"""Load a python module from its split name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package split on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules['.'.join(parts)] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = '.'.join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname) # depends on [control=['if'], data=[]]
elif use_sys:
# because it may have been indirectly loaded through a parent
module = sys.modules.get(curname) # depends on [control=['if'], data=[]]
if module is None:
(mp_file, mp_filename, mp_desc) = imp.find_module(part, path)
module = imp.load_module(curname, mp_file, mp_filename, mp_desc)
# mp_file still needs to be closed.
if mp_file:
mp_file.close() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['module']]
if prevmodule:
setattr(prevmodule, part, module) # depends on [control=['if'], data=[]]
_file = getattr(module, '__file__', '')
prevmodule = module
if not _file and util.is_namespace(curname):
continue # depends on [control=['if'], data=[]]
if not _file and len(modpath) != len(parts):
raise ImportError('no module in %s' % '.'.join(parts[len(modpath):])) # depends on [control=['if'], data=[]]
path = [os.path.dirname(_file)] # depends on [control=['for'], data=['part']]
return module |
def _set_residue_map(self):
"""
map each residue to the corresponding molecule.
"""
self.map_residue_to_mol = {}
lookup = {}
for idx, mol in enumerate(self.mols):
if not mol.formula in lookup:
mol.translate_sites(indices=range(len(mol)),
vector=-mol.center_of_mass)
lookup[mol.formula] = mol.copy()
self.map_residue_to_mol["ml{}".format(idx + 1)] = lookup[mol.formula] | def function[_set_residue_map, parameter[self]]:
constant[
map each residue to the corresponding molecule.
]
name[self].map_residue_to_mol assign[=] dictionary[[], []]
variable[lookup] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c991450>, <ast.Name object at 0x7da20c992c50>]]] in starred[call[name[enumerate], parameter[name[self].mols]]] begin[:]
if <ast.UnaryOp object at 0x7da20c9923e0> begin[:]
call[name[mol].translate_sites, parameter[]]
call[name[lookup]][name[mol].formula] assign[=] call[name[mol].copy, parameter[]]
call[name[self].map_residue_to_mol][call[constant[ml{}].format, parameter[binary_operation[name[idx] + constant[1]]]]] assign[=] call[name[lookup]][name[mol].formula] | keyword[def] identifier[_set_residue_map] ( identifier[self] ):
literal[string]
identifier[self] . identifier[map_residue_to_mol] ={}
identifier[lookup] ={}
keyword[for] identifier[idx] , identifier[mol] keyword[in] identifier[enumerate] ( identifier[self] . identifier[mols] ):
keyword[if] keyword[not] identifier[mol] . identifier[formula] keyword[in] identifier[lookup] :
identifier[mol] . identifier[translate_sites] ( identifier[indices] = identifier[range] ( identifier[len] ( identifier[mol] )),
identifier[vector] =- identifier[mol] . identifier[center_of_mass] )
identifier[lookup] [ identifier[mol] . identifier[formula] ]= identifier[mol] . identifier[copy] ()
identifier[self] . identifier[map_residue_to_mol] [ literal[string] . identifier[format] ( identifier[idx] + literal[int] )]= identifier[lookup] [ identifier[mol] . identifier[formula] ] | def _set_residue_map(self):
"""
map each residue to the corresponding molecule.
"""
self.map_residue_to_mol = {}
lookup = {}
for (idx, mol) in enumerate(self.mols):
if not mol.formula in lookup:
mol.translate_sites(indices=range(len(mol)), vector=-mol.center_of_mass)
lookup[mol.formula] = mol.copy() # depends on [control=['if'], data=[]]
self.map_residue_to_mol['ml{}'.format(idx + 1)] = lookup[mol.formula] # depends on [control=['for'], data=[]] |
def info(self):
"""
Get info an stats about the the current index, including the number of documents, memory consumption, etc
"""
res = self.redis.execute_command('FT.INFO', self.index_name)
it = six.moves.map(to_string, res)
return dict(six.moves.zip(it, it)) | def function[info, parameter[self]]:
constant[
Get info an stats about the the current index, including the number of documents, memory consumption, etc
]
variable[res] assign[=] call[name[self].redis.execute_command, parameter[constant[FT.INFO], name[self].index_name]]
variable[it] assign[=] call[name[six].moves.map, parameter[name[to_string], name[res]]]
return[call[name[dict], parameter[call[name[six].moves.zip, parameter[name[it], name[it]]]]]] | keyword[def] identifier[info] ( identifier[self] ):
literal[string]
identifier[res] = identifier[self] . identifier[redis] . identifier[execute_command] ( literal[string] , identifier[self] . identifier[index_name] )
identifier[it] = identifier[six] . identifier[moves] . identifier[map] ( identifier[to_string] , identifier[res] )
keyword[return] identifier[dict] ( identifier[six] . identifier[moves] . identifier[zip] ( identifier[it] , identifier[it] )) | def info(self):
"""
Get info an stats about the the current index, including the number of documents, memory consumption, etc
"""
res = self.redis.execute_command('FT.INFO', self.index_name)
it = six.moves.map(to_string, res)
return dict(six.moves.zip(it, it)) |
def set_fig_title(self, title, **kwargs):
"""Set overall figure title.
Set title for overall figure. This is not for a specific plot.
It will place the title at the top of the figure with a call to ``fig.suptitle``.
Args:
title (str): Figure title.
Keywork Arguments:
x/y (float, optional): The x/y location of the text in figure coordinates.
Defaults are 0.5 for x and 0.98 for y.
horizontalalignment/ha (str, optional): The horizontal alignment of
the text relative to (x, y). Optionas are 'center', 'left', or 'right'.
Default is 'center'.
verticalalignment/va (str, optional): The vertical alignment of the text
relative to (x, y). Optionas are 'top', 'center', 'bottom',
or 'baseline'. Default is 'top'.
fontsize/size (int, optional): The font size of the text. Default is 20.
"""
prop_default = {
'fontsize': 20,
}
for prop, default in prop_default.items():
kwargs[prop] = kwargs.get(prop, default)
self.figure.fig_title = title
self.figure.fig_title_kwargs = kwargs
return | def function[set_fig_title, parameter[self, title]]:
constant[Set overall figure title.
Set title for overall figure. This is not for a specific plot.
It will place the title at the top of the figure with a call to ``fig.suptitle``.
Args:
title (str): Figure title.
Keywork Arguments:
x/y (float, optional): The x/y location of the text in figure coordinates.
Defaults are 0.5 for x and 0.98 for y.
horizontalalignment/ha (str, optional): The horizontal alignment of
the text relative to (x, y). Optionas are 'center', 'left', or 'right'.
Default is 'center'.
verticalalignment/va (str, optional): The vertical alignment of the text
relative to (x, y). Optionas are 'top', 'center', 'bottom',
or 'baseline'. Default is 'top'.
fontsize/size (int, optional): The font size of the text. Default is 20.
]
variable[prop_default] assign[=] dictionary[[<ast.Constant object at 0x7da18c4ce590>], [<ast.Constant object at 0x7da18c4cd120>]]
for taget[tuple[[<ast.Name object at 0x7da18c4cf970>, <ast.Name object at 0x7da18c4cf9a0>]]] in starred[call[name[prop_default].items, parameter[]]] begin[:]
call[name[kwargs]][name[prop]] assign[=] call[name[kwargs].get, parameter[name[prop], name[default]]]
name[self].figure.fig_title assign[=] name[title]
name[self].figure.fig_title_kwargs assign[=] name[kwargs]
return[None] | keyword[def] identifier[set_fig_title] ( identifier[self] , identifier[title] ,** identifier[kwargs] ):
literal[string]
identifier[prop_default] ={
literal[string] : literal[int] ,
}
keyword[for] identifier[prop] , identifier[default] keyword[in] identifier[prop_default] . identifier[items] ():
identifier[kwargs] [ identifier[prop] ]= identifier[kwargs] . identifier[get] ( identifier[prop] , identifier[default] )
identifier[self] . identifier[figure] . identifier[fig_title] = identifier[title]
identifier[self] . identifier[figure] . identifier[fig_title_kwargs] = identifier[kwargs]
keyword[return] | def set_fig_title(self, title, **kwargs):
"""Set overall figure title.
Set title for overall figure. This is not for a specific plot.
It will place the title at the top of the figure with a call to ``fig.suptitle``.
Args:
title (str): Figure title.
Keywork Arguments:
x/y (float, optional): The x/y location of the text in figure coordinates.
Defaults are 0.5 for x and 0.98 for y.
horizontalalignment/ha (str, optional): The horizontal alignment of
the text relative to (x, y). Optionas are 'center', 'left', or 'right'.
Default is 'center'.
verticalalignment/va (str, optional): The vertical alignment of the text
relative to (x, y). Optionas are 'top', 'center', 'bottom',
or 'baseline'. Default is 'top'.
fontsize/size (int, optional): The font size of the text. Default is 20.
"""
prop_default = {'fontsize': 20}
for (prop, default) in prop_default.items():
kwargs[prop] = kwargs.get(prop, default) # depends on [control=['for'], data=[]]
self.figure.fig_title = title
self.figure.fig_title_kwargs = kwargs
return |
async def limit(self, use = 1):
"""
Acquire "resources", wait until enough "resources" are acquired. For each loop,
`limit` number of "resources" are permitted.
:param use: number of "resouces" to be used.
:return: True if is limited
"""
c = self._counter
self._counter = c + use
if self._task is None:
self._task = self._container.subroutine(self._limiter_task(), False)
if c >= self._bottom_line:
# Limited
await RateLimitingEvent.createMatcher(self, c // self._limit)
return True
else:
return False | <ast.AsyncFunctionDef object at 0x7da2044c1060> | keyword[async] keyword[def] identifier[limit] ( identifier[self] , identifier[use] = literal[int] ):
literal[string]
identifier[c] = identifier[self] . identifier[_counter]
identifier[self] . identifier[_counter] = identifier[c] + identifier[use]
keyword[if] identifier[self] . identifier[_task] keyword[is] keyword[None] :
identifier[self] . identifier[_task] = identifier[self] . identifier[_container] . identifier[subroutine] ( identifier[self] . identifier[_limiter_task] (), keyword[False] )
keyword[if] identifier[c] >= identifier[self] . identifier[_bottom_line] :
keyword[await] identifier[RateLimitingEvent] . identifier[createMatcher] ( identifier[self] , identifier[c] // identifier[self] . identifier[_limit] )
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | async def limit(self, use=1):
"""
Acquire "resources", wait until enough "resources" are acquired. For each loop,
`limit` number of "resources" are permitted.
:param use: number of "resouces" to be used.
:return: True if is limited
"""
c = self._counter
self._counter = c + use
if self._task is None:
self._task = self._container.subroutine(self._limiter_task(), False) # depends on [control=['if'], data=[]]
if c >= self._bottom_line:
# Limited
await RateLimitingEvent.createMatcher(self, c // self._limit)
return True # depends on [control=['if'], data=['c']]
else:
return False |
def _run_lint_on_file(file_path,
linter_functions,
tool_options,
fix_what_you_can):
"""Run each function in linter_functions on filename.
If fix_what_you_can is specified, then the first error that has a
possible replacement will be automatically fixed on this file.
"""
with io.open(file_path, "r+", encoding="utf-8") as found_file:
file_contents = found_file.read()
file_lines = file_contents.splitlines(True)
try:
errors = lint(file_path[len(os.getcwd()) + 1:],
file_contents,
linter_functions,
**tool_options)
except RuntimeError as err:
msg = ("""RuntimeError in processing """
"""{0} - {1}""".format(file_path, str(err)))
errors = [("polysquarelinter/failure",
LinterFailure(msg, 0, None))]
if fix_what_you_can:
for error_index, error in enumerate(errors):
if error[1].replacement is not None:
_apply_replacement(error, found_file, file_lines)
errors[error_index] = (error[0],
LinterFailure(error[1].description +
" ... FIXED",
error[1].line,
error[1].replacement))
break
return [FileLinterFailure(file_path, e) for e in errors] | def function[_run_lint_on_file, parameter[file_path, linter_functions, tool_options, fix_what_you_can]]:
constant[Run each function in linter_functions on filename.
If fix_what_you_can is specified, then the first error that has a
possible replacement will be automatically fixed on this file.
]
with call[name[io].open, parameter[name[file_path], constant[r+]]] begin[:]
variable[file_contents] assign[=] call[name[found_file].read, parameter[]]
variable[file_lines] assign[=] call[name[file_contents].splitlines, parameter[constant[True]]]
<ast.Try object at 0x7da18fe90700>
if name[fix_what_you_can] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18fe92350>, <ast.Name object at 0x7da18fe90820>]]] in starred[call[name[enumerate], parameter[name[errors]]]] begin[:]
if compare[call[name[error]][constant[1]].replacement is_not constant[None]] begin[:]
call[name[_apply_replacement], parameter[name[error], name[found_file], name[file_lines]]]
call[name[errors]][name[error_index]] assign[=] tuple[[<ast.Subscript object at 0x7da18fe91ab0>, <ast.Call object at 0x7da18fe90d00>]]
break
return[<ast.ListComp object at 0x7da18fe91b40>] | keyword[def] identifier[_run_lint_on_file] ( identifier[file_path] ,
identifier[linter_functions] ,
identifier[tool_options] ,
identifier[fix_what_you_can] ):
literal[string]
keyword[with] identifier[io] . identifier[open] ( identifier[file_path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[found_file] :
identifier[file_contents] = identifier[found_file] . identifier[read] ()
identifier[file_lines] = identifier[file_contents] . identifier[splitlines] ( keyword[True] )
keyword[try] :
identifier[errors] = identifier[lint] ( identifier[file_path] [ identifier[len] ( identifier[os] . identifier[getcwd] ())+ literal[int] :],
identifier[file_contents] ,
identifier[linter_functions] ,
** identifier[tool_options] )
keyword[except] identifier[RuntimeError] keyword[as] identifier[err] :
identifier[msg] =( literal[string]
literal[string] . identifier[format] ( identifier[file_path] , identifier[str] ( identifier[err] )))
identifier[errors] =[( literal[string] ,
identifier[LinterFailure] ( identifier[msg] , literal[int] , keyword[None] ))]
keyword[if] identifier[fix_what_you_can] :
keyword[for] identifier[error_index] , identifier[error] keyword[in] identifier[enumerate] ( identifier[errors] ):
keyword[if] identifier[error] [ literal[int] ]. identifier[replacement] keyword[is] keyword[not] keyword[None] :
identifier[_apply_replacement] ( identifier[error] , identifier[found_file] , identifier[file_lines] )
identifier[errors] [ identifier[error_index] ]=( identifier[error] [ literal[int] ],
identifier[LinterFailure] ( identifier[error] [ literal[int] ]. identifier[description] +
literal[string] ,
identifier[error] [ literal[int] ]. identifier[line] ,
identifier[error] [ literal[int] ]. identifier[replacement] ))
keyword[break]
keyword[return] [ identifier[FileLinterFailure] ( identifier[file_path] , identifier[e] ) keyword[for] identifier[e] keyword[in] identifier[errors] ] | def _run_lint_on_file(file_path, linter_functions, tool_options, fix_what_you_can):
"""Run each function in linter_functions on filename.
If fix_what_you_can is specified, then the first error that has a
possible replacement will be automatically fixed on this file.
"""
with io.open(file_path, 'r+', encoding='utf-8') as found_file:
file_contents = found_file.read()
file_lines = file_contents.splitlines(True)
try:
errors = lint(file_path[len(os.getcwd()) + 1:], file_contents, linter_functions, **tool_options) # depends on [control=['try'], data=[]]
except RuntimeError as err:
msg = 'RuntimeError in processing {0} - {1}'.format(file_path, str(err))
errors = [('polysquarelinter/failure', LinterFailure(msg, 0, None))] # depends on [control=['except'], data=['err']]
if fix_what_you_can:
for (error_index, error) in enumerate(errors):
if error[1].replacement is not None:
_apply_replacement(error, found_file, file_lines)
errors[error_index] = (error[0], LinterFailure(error[1].description + ' ... FIXED', error[1].line, error[1].replacement))
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return [FileLinterFailure(file_path, e) for e in errors] # depends on [control=['with'], data=['found_file']] |
def add_service(service, zone=None, permanent=True):
'''
Add a service for zone. If zone is omitted, default zone will be used.
CLI Example:
.. code-block:: bash
salt '*' firewalld.add_service ssh
To assign a service to a specific zone:
.. code-block:: bash
salt '*' firewalld.add_service ssh my_zone
'''
if zone:
cmd = '--zone={0} --add-service={1}'.format(zone, service)
else:
cmd = '--add-service={0}'.format(service)
if permanent:
cmd += ' --permanent'
return __firewall_cmd(cmd) | def function[add_service, parameter[service, zone, permanent]]:
constant[
Add a service for zone. If zone is omitted, default zone will be used.
CLI Example:
.. code-block:: bash
salt '*' firewalld.add_service ssh
To assign a service to a specific zone:
.. code-block:: bash
salt '*' firewalld.add_service ssh my_zone
]
if name[zone] begin[:]
variable[cmd] assign[=] call[constant[--zone={0} --add-service={1}].format, parameter[name[zone], name[service]]]
if name[permanent] begin[:]
<ast.AugAssign object at 0x7da18bcc9bd0>
return[call[name[__firewall_cmd], parameter[name[cmd]]]] | keyword[def] identifier[add_service] ( identifier[service] , identifier[zone] = keyword[None] , identifier[permanent] = keyword[True] ):
literal[string]
keyword[if] identifier[zone] :
identifier[cmd] = literal[string] . identifier[format] ( identifier[zone] , identifier[service] )
keyword[else] :
identifier[cmd] = literal[string] . identifier[format] ( identifier[service] )
keyword[if] identifier[permanent] :
identifier[cmd] += literal[string]
keyword[return] identifier[__firewall_cmd] ( identifier[cmd] ) | def add_service(service, zone=None, permanent=True):
"""
Add a service for zone. If zone is omitted, default zone will be used.
CLI Example:
.. code-block:: bash
salt '*' firewalld.add_service ssh
To assign a service to a specific zone:
.. code-block:: bash
salt '*' firewalld.add_service ssh my_zone
"""
if zone:
cmd = '--zone={0} --add-service={1}'.format(zone, service) # depends on [control=['if'], data=[]]
else:
cmd = '--add-service={0}'.format(service)
if permanent:
cmd += ' --permanent' # depends on [control=['if'], data=[]]
return __firewall_cmd(cmd) |
def convert_magicc6_to_magicc7_variables(variables, inverse=False):
"""
Convert MAGICC6 variables to MAGICC7 variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert MAGICC7 variables to MAGICC6
variables
Raises
------
ValueError
If you try to convert HFC245ca, or some variant thereof, you will get a
ValueError. The reason is that this variable was never meant to be included in
MAGICC6, it was just an accident. See, for example, the text in the
description section of ``pymagicc/MAGICC6/run/HISTRCP_HFC245fa_CONC.IN``:
"...HFC245fa, rather than HFC245ca, is the actually used isomer.".
Returns
-------
``type(variables)``
Set of converted variables
"""
if isinstance(variables, (list, pd.Index)):
return [
_apply_convert_magicc6_to_magicc7_variables(v, inverse) for v in variables
]
else:
return _apply_convert_magicc6_to_magicc7_variables(variables, inverse) | def function[convert_magicc6_to_magicc7_variables, parameter[variables, inverse]]:
constant[
Convert MAGICC6 variables to MAGICC7 variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert MAGICC7 variables to MAGICC6
variables
Raises
------
ValueError
If you try to convert HFC245ca, or some variant thereof, you will get a
ValueError. The reason is that this variable was never meant to be included in
MAGICC6, it was just an accident. See, for example, the text in the
description section of ``pymagicc/MAGICC6/run/HISTRCP_HFC245fa_CONC.IN``:
"...HFC245fa, rather than HFC245ca, is the actually used isomer.".
Returns
-------
``type(variables)``
Set of converted variables
]
if call[name[isinstance], parameter[name[variables], tuple[[<ast.Name object at 0x7da2041d8610>, <ast.Attribute object at 0x7da2041da680>]]]] begin[:]
return[<ast.ListComp object at 0x7da2041d8460>] | keyword[def] identifier[convert_magicc6_to_magicc7_variables] ( identifier[variables] , identifier[inverse] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[variables] ,( identifier[list] , identifier[pd] . identifier[Index] )):
keyword[return] [
identifier[_apply_convert_magicc6_to_magicc7_variables] ( identifier[v] , identifier[inverse] ) keyword[for] identifier[v] keyword[in] identifier[variables]
]
keyword[else] :
keyword[return] identifier[_apply_convert_magicc6_to_magicc7_variables] ( identifier[variables] , identifier[inverse] ) | def convert_magicc6_to_magicc7_variables(variables, inverse=False):
"""
Convert MAGICC6 variables to MAGICC7 variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert MAGICC7 variables to MAGICC6
variables
Raises
------
ValueError
If you try to convert HFC245ca, or some variant thereof, you will get a
ValueError. The reason is that this variable was never meant to be included in
MAGICC6, it was just an accident. See, for example, the text in the
description section of ``pymagicc/MAGICC6/run/HISTRCP_HFC245fa_CONC.IN``:
"...HFC245fa, rather than HFC245ca, is the actually used isomer.".
Returns
-------
``type(variables)``
Set of converted variables
"""
if isinstance(variables, (list, pd.Index)):
return [_apply_convert_magicc6_to_magicc7_variables(v, inverse) for v in variables] # depends on [control=['if'], data=[]]
else:
return _apply_convert_magicc6_to_magicc7_variables(variables, inverse) |
def get_response(self, **kwargs):
"""
Returns a Response object containing this object's status code and a
JSON object containing the key "error" with the value of this object's
error message in the body. Keyword args are passed through to
the Response.
"""
return Response(
json.dumps({"error": self.message}), # pylint: disable=exception-message-attribute
status_code=self.status_code,
content_type="application/json",
charset="utf-8",
**kwargs
) | def function[get_response, parameter[self]]:
constant[
Returns a Response object containing this object's status code and a
JSON object containing the key "error" with the value of this object's
error message in the body. Keyword args are passed through to
the Response.
]
return[call[name[Response], parameter[call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da18f720670>], [<ast.Attribute object at 0x7da18f722320>]]]]]]] | keyword[def] identifier[get_response] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[Response] (
identifier[json] . identifier[dumps] ({ literal[string] : identifier[self] . identifier[message] }),
identifier[status_code] = identifier[self] . identifier[status_code] ,
identifier[content_type] = literal[string] ,
identifier[charset] = literal[string] ,
** identifier[kwargs]
) | def get_response(self, **kwargs):
"""
Returns a Response object containing this object's status code and a
JSON object containing the key "error" with the value of this object's
error message in the body. Keyword args are passed through to
the Response.
""" # pylint: disable=exception-message-attribute
return Response(json.dumps({'error': self.message}), status_code=self.status_code, content_type='application/json', charset='utf-8', **kwargs) |
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
# Set the memory required based on parameter values
m = 128 * r * (N + p + 2)
try:
return _scrypt(
password=password, salt=salt, n=N, r=r, p=p, maxmem=m, dklen=olen)
except:
raise ValueError | def function[scrypt, parameter[password, salt, N, r, p, olen]]:
constant[Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
]
call[name[check_args], parameter[name[password], name[salt], name[N], name[r], name[p], name[olen]]]
variable[m] assign[=] binary_operation[binary_operation[constant[128] * name[r]] * binary_operation[binary_operation[name[N] + name[p]] + constant[2]]]
<ast.Try object at 0x7da20c9905e0> | keyword[def] identifier[scrypt] ( identifier[password] , identifier[salt] , identifier[N] = identifier[SCRYPT_N] , identifier[r] = identifier[SCRYPT_r] , identifier[p] = identifier[SCRYPT_p] , identifier[olen] = literal[int] ):
literal[string]
identifier[check_args] ( identifier[password] , identifier[salt] , identifier[N] , identifier[r] , identifier[p] , identifier[olen] )
identifier[m] = literal[int] * identifier[r] *( identifier[N] + identifier[p] + literal[int] )
keyword[try] :
keyword[return] identifier[_scrypt] (
identifier[password] = identifier[password] , identifier[salt] = identifier[salt] , identifier[n] = identifier[N] , identifier[r] = identifier[r] , identifier[p] = identifier[p] , identifier[maxmem] = identifier[m] , identifier[dklen] = identifier[olen] )
keyword[except] :
keyword[raise] identifier[ValueError] | def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
# Set the memory required based on parameter values
m = 128 * r * (N + p + 2)
try:
return _scrypt(password=password, salt=salt, n=N, r=r, p=p, maxmem=m, dklen=olen) # depends on [control=['try'], data=[]]
except:
raise ValueError # depends on [control=['except'], data=[]] |
def htmlFromThing(thing,title):
"""create pretty formatted HTML from a things dictionary."""
try:
thing2 = copy.copy(thing)
except:
print("crashed copying the thing! I can't document it.")
return False
stuff=analyzeThing(thing2)
names2=list(stuff.keys())
for i,name in enumerate(names2):
if name.startswith("_"):
names2[i]="zzzzzzzzzz"+name
html="""<html><head><style>
body {font-family: courier, monospace;}
.name {font-weight: bold;}
.type {font-style: italic; font-family: serif; color: #AAA;}
.desc {}
.itemEval {background-color: #DDFFDD;}
.itemEvalFail {}
table {font-size: .8em;
margin-top: 20px;
border-collapse: collapse;}
tr {border: 1px solid #CCC; vertical-align: text-top;}
td {padding: 2px 10px 2px 10px;}
.credits {text-align: center;
opacity: 0.5;
margin-top: 50px;
font-size: .8em;
font-family: sans-serif;}
</style></head><body>"""
if title:
html+='<span style="color: #CCC;">title: </span>%s<br>'%title
textTitle=""
textType=""
try:
textTitle=websafe(str(thing))
textType=websafe(type(thing).__name__)
except:
pass
html+='<span style="color: #CCC;">value: </span>%s<br>'%textTitle
html+='<span style="color: #CCC;"> type: </span>%s<br>'%textType
html+='<table cellpadding=3 align="center">'
html+='<tr style="background-color: #000; color: #FFF; font-weight: bold;">'
html+='<td>property</td><td>type</td><td>value</td>'
html+='<td>evaluated (without arguments)</td></tr>'
for name in sorted(names2):
if name.startswith("zzzzzzzzzz"):
name=name[10:]
itemName=str(name)
itemType=websafe(stuff[name][0])
itemStr=websafe(stuff[name][1])
itemEval=websafe(stuff[name][2])
color="DDDDFF"
color2=""
if "method" in itemType:
itemName+="()"
color="FFDDDD"
if itemName.startswith("_"):
color="EEEEEE"
if itemStr.startswith("<") and not ", " in itemStr:
itemStr="""<span style="color: #CCC; font-family: serif;
font-style: italic;">%s</span>"""%itemStr
else:
color2="DDFFDD"
if itemEval=="":
itemEval="FAILED TO EVALUATE"
html+='<tr>'
html+='<td class="name" style="background-color: #%s;">%s</td>'%(color,itemName)
html+='<td class="type">%s</td>'%(itemType)
html+='<td class="itemStr" style="background-color: #%s;">%s</td>'%(color2,itemStr)
if itemEval=="FAILED TO EVALUATE":
html+='<td class="itemEvalFail"></td>'
else:
html+='<td class="itemEval">%s</td>'%(itemEval)
html+='</tr>'
dt=datetime.datetime.now()
html+="""</table><p class="credits">
page automatically generated by
<a href="https://pypi.python.org/pypi/webinspect/">webinspect</a>
(version %s) %s</p>
</body></html>"""%(__version__,dt.strftime("at %I:%M %p on %B %d, %Y"))
return html | def function[htmlFromThing, parameter[thing, title]]:
constant[create pretty formatted HTML from a things dictionary.]
<ast.Try object at 0x7da204960460>
variable[stuff] assign[=] call[name[analyzeThing], parameter[name[thing2]]]
variable[names2] assign[=] call[name[list], parameter[call[name[stuff].keys, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da204962d70>, <ast.Name object at 0x7da204960100>]]] in starred[call[name[enumerate], parameter[name[names2]]]] begin[:]
if call[name[name].startswith, parameter[constant[_]]] begin[:]
call[name[names2]][name[i]] assign[=] binary_operation[constant[zzzzzzzzzz] + name[name]]
variable[html] assign[=] constant[<html><head><style>
body {font-family: courier, monospace;}
.name {font-weight: bold;}
.type {font-style: italic; font-family: serif; color: #AAA;}
.desc {}
.itemEval {background-color: #DDFFDD;}
.itemEvalFail {}
table {font-size: .8em;
margin-top: 20px;
border-collapse: collapse;}
tr {border: 1px solid #CCC; vertical-align: text-top;}
td {padding: 2px 10px 2px 10px;}
.credits {text-align: center;
opacity: 0.5;
margin-top: 50px;
font-size: .8em;
font-family: sans-serif;}
</style></head><body>]
if name[title] begin[:]
<ast.AugAssign object at 0x7da204961fc0>
variable[textTitle] assign[=] constant[]
variable[textType] assign[=] constant[]
<ast.Try object at 0x7da2049605b0>
<ast.AugAssign object at 0x7da204963100>
<ast.AugAssign object at 0x7da2049623b0>
<ast.AugAssign object at 0x7da204961e70>
<ast.AugAssign object at 0x7da2049631f0>
<ast.AugAssign object at 0x7da204963e80>
<ast.AugAssign object at 0x7da204962710>
for taget[name[name]] in starred[call[name[sorted], parameter[name[names2]]]] begin[:]
if call[name[name].startswith, parameter[constant[zzzzzzzzzz]]] begin[:]
variable[name] assign[=] call[name[name]][<ast.Slice object at 0x7da204962da0>]
variable[itemName] assign[=] call[name[str], parameter[name[name]]]
variable[itemType] assign[=] call[name[websafe], parameter[call[call[name[stuff]][name[name]]][constant[0]]]]
variable[itemStr] assign[=] call[name[websafe], parameter[call[call[name[stuff]][name[name]]][constant[1]]]]
variable[itemEval] assign[=] call[name[websafe], parameter[call[call[name[stuff]][name[name]]][constant[2]]]]
variable[color] assign[=] constant[DDDDFF]
variable[color2] assign[=] constant[]
if compare[constant[method] in name[itemType]] begin[:]
<ast.AugAssign object at 0x7da207f9bcd0>
variable[color] assign[=] constant[FFDDDD]
if call[name[itemName].startswith, parameter[constant[_]]] begin[:]
variable[color] assign[=] constant[EEEEEE]
if <ast.BoolOp object at 0x7da207f99de0> begin[:]
variable[itemStr] assign[=] binary_operation[constant[<span style="color: #CCC; font-family: serif;
font-style: italic;">%s</span>] <ast.Mod object at 0x7da2590d6920> name[itemStr]]
<ast.AugAssign object at 0x7da207f9b760>
<ast.AugAssign object at 0x7da207f9abc0>
<ast.AugAssign object at 0x7da207f9b460>
<ast.AugAssign object at 0x7da207f9a2c0>
if compare[name[itemEval] equal[==] constant[FAILED TO EVALUATE]] begin[:]
<ast.AugAssign object at 0x7da207f98d60>
<ast.AugAssign object at 0x7da207f994e0>
variable[dt] assign[=] call[name[datetime].datetime.now, parameter[]]
<ast.AugAssign object at 0x7da207f9af20>
return[name[html]] | keyword[def] identifier[htmlFromThing] ( identifier[thing] , identifier[title] ):
literal[string]
keyword[try] :
identifier[thing2] = identifier[copy] . identifier[copy] ( identifier[thing] )
keyword[except] :
identifier[print] ( literal[string] )
keyword[return] keyword[False]
identifier[stuff] = identifier[analyzeThing] ( identifier[thing2] )
identifier[names2] = identifier[list] ( identifier[stuff] . identifier[keys] ())
keyword[for] identifier[i] , identifier[name] keyword[in] identifier[enumerate] ( identifier[names2] ):
keyword[if] identifier[name] . identifier[startswith] ( literal[string] ):
identifier[names2] [ identifier[i] ]= literal[string] + identifier[name]
identifier[html] = literal[string]
keyword[if] identifier[title] :
identifier[html] += literal[string] % identifier[title]
identifier[textTitle] = literal[string]
identifier[textType] = literal[string]
keyword[try] :
identifier[textTitle] = identifier[websafe] ( identifier[str] ( identifier[thing] ))
identifier[textType] = identifier[websafe] ( identifier[type] ( identifier[thing] ). identifier[__name__] )
keyword[except] :
keyword[pass]
identifier[html] += literal[string] % identifier[textTitle]
identifier[html] += literal[string] % identifier[textType]
identifier[html] += literal[string]
identifier[html] += literal[string]
identifier[html] += literal[string]
identifier[html] += literal[string]
keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[names2] ):
keyword[if] identifier[name] . identifier[startswith] ( literal[string] ):
identifier[name] = identifier[name] [ literal[int] :]
identifier[itemName] = identifier[str] ( identifier[name] )
identifier[itemType] = identifier[websafe] ( identifier[stuff] [ identifier[name] ][ literal[int] ])
identifier[itemStr] = identifier[websafe] ( identifier[stuff] [ identifier[name] ][ literal[int] ])
identifier[itemEval] = identifier[websafe] ( identifier[stuff] [ identifier[name] ][ literal[int] ])
identifier[color] = literal[string]
identifier[color2] = literal[string]
keyword[if] literal[string] keyword[in] identifier[itemType] :
identifier[itemName] += literal[string]
identifier[color] = literal[string]
keyword[if] identifier[itemName] . identifier[startswith] ( literal[string] ):
identifier[color] = literal[string]
keyword[if] identifier[itemStr] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not] literal[string] keyword[in] identifier[itemStr] :
identifier[itemStr] = literal[string] % identifier[itemStr]
keyword[else] :
identifier[color2] = literal[string]
keyword[if] identifier[itemEval] == literal[string] :
identifier[itemEval] = literal[string]
identifier[html] += literal[string]
identifier[html] += literal[string] %( identifier[color] , identifier[itemName] )
identifier[html] += literal[string] %( identifier[itemType] )
identifier[html] += literal[string] %( identifier[color2] , identifier[itemStr] )
keyword[if] identifier[itemEval] == literal[string] :
identifier[html] += literal[string]
keyword[else] :
identifier[html] += literal[string] %( identifier[itemEval] )
identifier[html] += literal[string]
identifier[dt] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[html] += literal[string] %( identifier[__version__] , identifier[dt] . identifier[strftime] ( literal[string] ))
keyword[return] identifier[html] | def htmlFromThing(thing, title):
"""create pretty formatted HTML from a things dictionary."""
try:
thing2 = copy.copy(thing) # depends on [control=['try'], data=[]]
except:
print("crashed copying the thing! I can't document it.")
return False # depends on [control=['except'], data=[]]
stuff = analyzeThing(thing2)
names2 = list(stuff.keys())
for (i, name) in enumerate(names2):
if name.startswith('_'):
names2[i] = 'zzzzzzzzzz' + name # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
html = '<html><head><style>\n body {font-family: courier, monospace;}\n .name {font-weight: bold;}\n .type {font-style: italic; font-family: serif; color: #AAA;}\n .desc {}\n .itemEval {background-color: #DDFFDD;}\n .itemEvalFail {}\n table {font-size: .8em;\n margin-top: 20px;\n border-collapse: collapse;}\n tr {border: 1px solid #CCC; vertical-align: text-top;}\n td {padding: 2px 10px 2px 10px;}\n .credits {text-align: center;\n opacity: 0.5;\n margin-top: 50px;\n font-size: .8em;\n font-family: sans-serif;}\n </style></head><body>'
if title:
html += '<span style="color: #CCC;">title: </span>%s<br>' % title # depends on [control=['if'], data=[]]
textTitle = ''
textType = ''
try:
textTitle = websafe(str(thing))
textType = websafe(type(thing).__name__) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
html += '<span style="color: #CCC;">value: </span>%s<br>' % textTitle
html += '<span style="color: #CCC;"> type: </span>%s<br>' % textType
html += '<table cellpadding=3 align="center">'
html += '<tr style="background-color: #000; color: #FFF; font-weight: bold;">'
html += '<td>property</td><td>type</td><td>value</td>'
html += '<td>evaluated (without arguments)</td></tr>'
for name in sorted(names2):
if name.startswith('zzzzzzzzzz'):
name = name[10:] # depends on [control=['if'], data=[]]
itemName = str(name)
itemType = websafe(stuff[name][0])
itemStr = websafe(stuff[name][1])
itemEval = websafe(stuff[name][2])
color = 'DDDDFF'
color2 = ''
if 'method' in itemType:
itemName += '()'
color = 'FFDDDD' # depends on [control=['if'], data=[]]
if itemName.startswith('_'):
color = 'EEEEEE' # depends on [control=['if'], data=[]]
if itemStr.startswith('<') and (not ', ' in itemStr):
itemStr = '<span style="color: #CCC; font-family: serif;\n font-style: italic;">%s</span>' % itemStr # depends on [control=['if'], data=[]]
else:
color2 = 'DDFFDD'
if itemEval == '':
itemEval = 'FAILED TO EVALUATE' # depends on [control=['if'], data=['itemEval']]
html += '<tr>'
html += '<td class="name" style="background-color: #%s;">%s</td>' % (color, itemName)
html += '<td class="type">%s</td>' % itemType
html += '<td class="itemStr" style="background-color: #%s;">%s</td>' % (color2, itemStr)
if itemEval == 'FAILED TO EVALUATE':
html += '<td class="itemEvalFail"></td>' # depends on [control=['if'], data=[]]
else:
html += '<td class="itemEval">%s</td>' % itemEval
html += '</tr>' # depends on [control=['for'], data=['name']]
dt = datetime.datetime.now()
html += '</table><p class="credits">\n page automatically generated by\n <a href="https://pypi.python.org/pypi/webinspect/">webinspect</a>\n (version %s) %s</p>\n </body></html>' % (__version__, dt.strftime('at %I:%M %p on %B %d, %Y'))
return html |
def get(self, sid):
"""
Constructs a SyncStreamContext
:param sid: Stream SID or unique name.
:returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext
:rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext
"""
return SyncStreamContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) | def function[get, parameter[self, sid]]:
constant[
Constructs a SyncStreamContext
:param sid: Stream SID or unique name.
:returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext
:rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext
]
return[call[name[SyncStreamContext], parameter[name[self]._version]]] | keyword[def] identifier[get] ( identifier[self] , identifier[sid] ):
literal[string]
keyword[return] identifier[SyncStreamContext] ( identifier[self] . identifier[_version] , identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[sid] = identifier[sid] ,) | def get(self, sid):
"""
Constructs a SyncStreamContext
:param sid: Stream SID or unique name.
:returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext
:rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext
"""
return SyncStreamContext(self._version, service_sid=self._solution['service_sid'], sid=sid) |
def remove_binding(self, binding):
""" Remove a binding
Remove an object from the MongoDB storage for caching
Args:
binding (AtlasServiceBinding.Binding): binding
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveBinding: Failed to remove the binding
"""
# query
query = { "binding_id" : binding.binding_id, "instance_id" : binding.instance.instance_id }
# delete the binding
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Binding")
# return the result
if result is not None and result.deleted_count == 1:
binding.provisioned = False
else:
raise ErrStorageRemoveBinding(binding.binding_id) | def function[remove_binding, parameter[self, binding]]:
constant[ Remove a binding
Remove an object from the MongoDB storage for caching
Args:
binding (AtlasServiceBinding.Binding): binding
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveBinding: Failed to remove the binding
]
variable[query] assign[=] dictionary[[<ast.Constant object at 0x7da1b27268f0>, <ast.Constant object at 0x7da1b2725f60>], [<ast.Attribute object at 0x7da1b2726e00>, <ast.Attribute object at 0x7da1b27260e0>]]
<ast.Try object at 0x7da1b28d88e0>
if <ast.BoolOp object at 0x7da1b2750ca0> begin[:]
name[binding].provisioned assign[=] constant[False] | keyword[def] identifier[remove_binding] ( identifier[self] , identifier[binding] ):
literal[string]
identifier[query] ={ literal[string] : identifier[binding] . identifier[binding_id] , literal[string] : identifier[binding] . identifier[instance] . identifier[instance_id] }
keyword[try] :
identifier[result] = identifier[self] . identifier[broker] . identifier[delete_one] ( identifier[query] )
keyword[except] :
keyword[raise] identifier[ErrStorageMongoConnection] ( literal[string] )
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] keyword[and] identifier[result] . identifier[deleted_count] == literal[int] :
identifier[binding] . identifier[provisioned] = keyword[False]
keyword[else] :
keyword[raise] identifier[ErrStorageRemoveBinding] ( identifier[binding] . identifier[binding_id] ) | def remove_binding(self, binding):
""" Remove a binding
Remove an object from the MongoDB storage for caching
Args:
binding (AtlasServiceBinding.Binding): binding
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveBinding: Failed to remove the binding
"""
# query
query = {'binding_id': binding.binding_id, 'instance_id': binding.instance.instance_id}
# delete the binding
try:
result = self.broker.delete_one(query) # depends on [control=['try'], data=[]]
except:
raise ErrStorageMongoConnection('Remove Binding') # depends on [control=['except'], data=[]]
# return the result
if result is not None and result.deleted_count == 1:
binding.provisioned = False # depends on [control=['if'], data=[]]
else:
raise ErrStorageRemoveBinding(binding.binding_id) |
def reset(self, commit='HEAD', index=True, working_tree=False,
paths=None, **kwargs):
"""Reset our HEAD to the given commit optionally synchronizing
the index and working tree. The reference we refer to will be set to
commit as well.
:param commit:
Commit object, Reference Object or string identifying a revision we
should reset HEAD to.
:param index:
If True, the index will be set to match the given commit. Otherwise
it will not be touched.
:param working_tree:
If True, the working tree will be forcefully adjusted to match the given
commit, possibly overwriting uncommitted changes without warning.
If working_tree is True, index must be true as well
:param paths:
Single path or list of paths relative to the git root directory
that are to be reset. This allows to partially reset individual files.
:param kwargs:
Additional arguments passed to git-reset.
:return: self"""
mode = "--soft"
if index:
mode = "--mixed"
# it appears, some git-versions declare mixed and paths deprecated
# see http://github.com/Byron/GitPython/issues#issue/2
if paths:
mode = None
# END special case
# END handle index
if working_tree:
mode = "--hard"
if not index:
raise ValueError("Cannot reset the working tree if the index is not reset as well")
# END working tree handling
try:
self.repo.git.reset(mode, commit, '--', paths, **kwargs)
except GitCommandError as e:
# git nowadays may use 1 as status to indicate there are still unstaged
# modifications after the reset
if e.status != 1:
raise
# END handle exception
return self | def function[reset, parameter[self, commit, index, working_tree, paths]]:
constant[Reset our HEAD to the given commit optionally synchronizing
the index and working tree. The reference we refer to will be set to
commit as well.
:param commit:
Commit object, Reference Object or string identifying a revision we
should reset HEAD to.
:param index:
If True, the index will be set to match the given commit. Otherwise
it will not be touched.
:param working_tree:
If True, the working tree will be forcefully adjusted to match the given
commit, possibly overwriting uncommitted changes without warning.
If working_tree is True, index must be true as well
:param paths:
Single path or list of paths relative to the git root directory
that are to be reset. This allows to partially reset individual files.
:param kwargs:
Additional arguments passed to git-reset.
:return: self]
variable[mode] assign[=] constant[--soft]
if name[index] begin[:]
variable[mode] assign[=] constant[--mixed]
if name[paths] begin[:]
variable[mode] assign[=] constant[None]
if name[working_tree] begin[:]
variable[mode] assign[=] constant[--hard]
if <ast.UnaryOp object at 0x7da1b1d45840> begin[:]
<ast.Raise object at 0x7da1b1d46e60>
<ast.Try object at 0x7da1b1d45000>
return[name[self]] | keyword[def] identifier[reset] ( identifier[self] , identifier[commit] = literal[string] , identifier[index] = keyword[True] , identifier[working_tree] = keyword[False] ,
identifier[paths] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[mode] = literal[string]
keyword[if] identifier[index] :
identifier[mode] = literal[string]
keyword[if] identifier[paths] :
identifier[mode] = keyword[None]
keyword[if] identifier[working_tree] :
identifier[mode] = literal[string]
keyword[if] keyword[not] identifier[index] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[try] :
identifier[self] . identifier[repo] . identifier[git] . identifier[reset] ( identifier[mode] , identifier[commit] , literal[string] , identifier[paths] ,** identifier[kwargs] )
keyword[except] identifier[GitCommandError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[status] != literal[int] :
keyword[raise]
keyword[return] identifier[self] | def reset(self, commit='HEAD', index=True, working_tree=False, paths=None, **kwargs):
"""Reset our HEAD to the given commit optionally synchronizing
the index and working tree. The reference we refer to will be set to
commit as well.
:param commit:
Commit object, Reference Object or string identifying a revision we
should reset HEAD to.
:param index:
If True, the index will be set to match the given commit. Otherwise
it will not be touched.
:param working_tree:
If True, the working tree will be forcefully adjusted to match the given
commit, possibly overwriting uncommitted changes without warning.
If working_tree is True, index must be true as well
:param paths:
Single path or list of paths relative to the git root directory
that are to be reset. This allows to partially reset individual files.
:param kwargs:
Additional arguments passed to git-reset.
:return: self"""
mode = '--soft'
if index:
mode = '--mixed'
# it appears, some git-versions declare mixed and paths deprecated
# see http://github.com/Byron/GitPython/issues#issue/2
if paths:
mode = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# END special case
# END handle index
if working_tree:
mode = '--hard'
if not index:
raise ValueError('Cannot reset the working tree if the index is not reset as well') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# END working tree handling
try:
self.repo.git.reset(mode, commit, '--', paths, **kwargs) # depends on [control=['try'], data=[]]
except GitCommandError as e:
# git nowadays may use 1 as status to indicate there are still unstaged
# modifications after the reset
if e.status != 1:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']]
# END handle exception
return self |
def copy_entities(self, from_namespace, from_workspace, etype, enames):
"""Copy entities from another workspace.
Args:
from_namespace (str): Source workspace namespace
from_workspace (str): Source workspace name
etype (str): Entity type
enames (list(str)): List of entity names to copy
"""
r = fapi.copy_entities(from_namespace, from_workspace,
self.namespace, self.name, etype, enames,
self.api_url)
fapi._check_response_code(r, 201) | def function[copy_entities, parameter[self, from_namespace, from_workspace, etype, enames]]:
constant[Copy entities from another workspace.
Args:
from_namespace (str): Source workspace namespace
from_workspace (str): Source workspace name
etype (str): Entity type
enames (list(str)): List of entity names to copy
]
variable[r] assign[=] call[name[fapi].copy_entities, parameter[name[from_namespace], name[from_workspace], name[self].namespace, name[self].name, name[etype], name[enames], name[self].api_url]]
call[name[fapi]._check_response_code, parameter[name[r], constant[201]]] | keyword[def] identifier[copy_entities] ( identifier[self] , identifier[from_namespace] , identifier[from_workspace] , identifier[etype] , identifier[enames] ):
literal[string]
identifier[r] = identifier[fapi] . identifier[copy_entities] ( identifier[from_namespace] , identifier[from_workspace] ,
identifier[self] . identifier[namespace] , identifier[self] . identifier[name] , identifier[etype] , identifier[enames] ,
identifier[self] . identifier[api_url] )
identifier[fapi] . identifier[_check_response_code] ( identifier[r] , literal[int] ) | def copy_entities(self, from_namespace, from_workspace, etype, enames):
"""Copy entities from another workspace.
Args:
from_namespace (str): Source workspace namespace
from_workspace (str): Source workspace name
etype (str): Entity type
enames (list(str)): List of entity names to copy
"""
r = fapi.copy_entities(from_namespace, from_workspace, self.namespace, self.name, etype, enames, self.api_url)
fapi._check_response_code(r, 201) |
def _generate_bucket_value(self, bucketing_id):
""" Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE).
Args:
bucketing_id: ID for bucketing.
Returns:
Bucket value corresponding to the provided bucketing ID.
"""
ratio = float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE
return math.floor(ratio * MAX_TRAFFIC_VALUE) | def function[_generate_bucket_value, parameter[self, bucketing_id]]:
constant[ Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE).
Args:
bucketing_id: ID for bucketing.
Returns:
Bucket value corresponding to the provided bucketing ID.
]
variable[ratio] assign[=] binary_operation[call[name[float], parameter[call[name[self]._generate_unsigned_hash_code_32_bit, parameter[name[bucketing_id]]]]] / name[MAX_HASH_VALUE]]
return[call[name[math].floor, parameter[binary_operation[name[ratio] * name[MAX_TRAFFIC_VALUE]]]]] | keyword[def] identifier[_generate_bucket_value] ( identifier[self] , identifier[bucketing_id] ):
literal[string]
identifier[ratio] = identifier[float] ( identifier[self] . identifier[_generate_unsigned_hash_code_32_bit] ( identifier[bucketing_id] ))/ identifier[MAX_HASH_VALUE]
keyword[return] identifier[math] . identifier[floor] ( identifier[ratio] * identifier[MAX_TRAFFIC_VALUE] ) | def _generate_bucket_value(self, bucketing_id):
""" Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE).
Args:
bucketing_id: ID for bucketing.
Returns:
Bucket value corresponding to the provided bucketing ID.
"""
ratio = float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE
return math.floor(ratio * MAX_TRAFFIC_VALUE) |
def queue_email(to_addresses, from_address, subject, body, commit=True, html=True, session=None):
"""
Add a mail to the queue to be sent.
WARNING: Commits by default!
:param to_addresses: The names and addresses to send the email to, i.e. "Steve<steve@fig14.com>, info@fig14.com"
:param from_address: Who the email is from i.e. "Stephen Brown <s@fig14.com>"
:param subject: The email subject
:param body: The html / text body of the email
:param commit: Whether to commit to the database
:param html: Is this a html email?
:param session: The sqlalchemy session or None to use db.session
"""
from models import QueuedEmail
if session is None:
session = _db.session
log.info('Queuing mail to %s: %s' % (to_addresses, subject))
queued_email = QueuedEmail(html, to_addresses, from_address, subject, body, STATUS_QUEUED)
session.add(queued_email)
session.commit()
return queued_email | def function[queue_email, parameter[to_addresses, from_address, subject, body, commit, html, session]]:
constant[
Add a mail to the queue to be sent.
WARNING: Commits by default!
:param to_addresses: The names and addresses to send the email to, i.e. "Steve<steve@fig14.com>, info@fig14.com"
:param from_address: Who the email is from i.e. "Stephen Brown <s@fig14.com>"
:param subject: The email subject
:param body: The html / text body of the email
:param commit: Whether to commit to the database
:param html: Is this a html email?
:param session: The sqlalchemy session or None to use db.session
]
from relative_module[models] import module[QueuedEmail]
if compare[name[session] is constant[None]] begin[:]
variable[session] assign[=] name[_db].session
call[name[log].info, parameter[binary_operation[constant[Queuing mail to %s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26aec80>, <ast.Name object at 0x7da1b26ad600>]]]]]
variable[queued_email] assign[=] call[name[QueuedEmail], parameter[name[html], name[to_addresses], name[from_address], name[subject], name[body], name[STATUS_QUEUED]]]
call[name[session].add, parameter[name[queued_email]]]
call[name[session].commit, parameter[]]
return[name[queued_email]] | keyword[def] identifier[queue_email] ( identifier[to_addresses] , identifier[from_address] , identifier[subject] , identifier[body] , identifier[commit] = keyword[True] , identifier[html] = keyword[True] , identifier[session] = keyword[None] ):
literal[string]
keyword[from] identifier[models] keyword[import] identifier[QueuedEmail]
keyword[if] identifier[session] keyword[is] keyword[None] :
identifier[session] = identifier[_db] . identifier[session]
identifier[log] . identifier[info] ( literal[string] %( identifier[to_addresses] , identifier[subject] ))
identifier[queued_email] = identifier[QueuedEmail] ( identifier[html] , identifier[to_addresses] , identifier[from_address] , identifier[subject] , identifier[body] , identifier[STATUS_QUEUED] )
identifier[session] . identifier[add] ( identifier[queued_email] )
identifier[session] . identifier[commit] ()
keyword[return] identifier[queued_email] | def queue_email(to_addresses, from_address, subject, body, commit=True, html=True, session=None):
"""
Add a mail to the queue to be sent.
WARNING: Commits by default!
:param to_addresses: The names and addresses to send the email to, i.e. "Steve<steve@fig14.com>, info@fig14.com"
:param from_address: Who the email is from i.e. "Stephen Brown <s@fig14.com>"
:param subject: The email subject
:param body: The html / text body of the email
:param commit: Whether to commit to the database
:param html: Is this a html email?
:param session: The sqlalchemy session or None to use db.session
"""
from models import QueuedEmail
if session is None:
session = _db.session # depends on [control=['if'], data=['session']]
log.info('Queuing mail to %s: %s' % (to_addresses, subject))
queued_email = QueuedEmail(html, to_addresses, from_address, subject, body, STATUS_QUEUED)
session.add(queued_email)
session.commit()
return queued_email |
def rgb2hex(rgb):
"""
Convert RGB(A) tuple to hex.
"""
if len(rgb) > 3:
rgb = rgb[:-1]
return "#{0:02x}{1:02x}{2:02x}".format(*(int(v*255) for v in rgb)) | def function[rgb2hex, parameter[rgb]]:
constant[
Convert RGB(A) tuple to hex.
]
if compare[call[name[len], parameter[name[rgb]]] greater[>] constant[3]] begin[:]
variable[rgb] assign[=] call[name[rgb]][<ast.Slice object at 0x7da20c7cb610>]
return[call[constant[#{0:02x}{1:02x}{2:02x}].format, parameter[<ast.Starred object at 0x7da20c7cbfd0>]]] | keyword[def] identifier[rgb2hex] ( identifier[rgb] ):
literal[string]
keyword[if] identifier[len] ( identifier[rgb] )> literal[int] :
identifier[rgb] = identifier[rgb] [:- literal[int] ]
keyword[return] literal[string] . identifier[format] (*( identifier[int] ( identifier[v] * literal[int] ) keyword[for] identifier[v] keyword[in] identifier[rgb] )) | def rgb2hex(rgb):
"""
Convert RGB(A) tuple to hex.
"""
if len(rgb) > 3:
rgb = rgb[:-1] # depends on [control=['if'], data=[]]
return '#{0:02x}{1:02x}{2:02x}'.format(*(int(v * 255) for v in rgb)) |
def solve(self, graph, savings_solution, timeout, debug=False, anim=None):
"""Improve initial savings solution using local search
Parameters
----------
graph: :networkx:`NetworkX Graph Obj< >`
Graph instance
savings_solution: SavingsSolution
initial solution of CVRP problem (instance of `SavingsSolution` class)
timeout: int
max processing time in seconds
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
AnimationDing0 object
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
"""
# TODO: If necessary, use timeout to set max processing time of local search
# load threshold for operator (see exchange or relocate operator's description for more information)
op_diff_round_digits = int(cfg_ding0.get('mv_routing', 'operator_diff_round_digits'))
solution = LocalSearchSolution(graph, savings_solution)
# FOR BENCHMARKING OF OPERATOR'S ORDER:
#self.benchmark_operator_order(graph, savings_solution, op_diff_round_digits)
for run in range(10):
start = time.time()
solution = self.operator_exchange(graph, solution, op_diff_round_digits, anim)
time1 = time.time()
if debug:
logger.debug('Elapsed time (exchange, run {1}): {0}, '
'Solution\'s length: {2}'.format(
time1 - start, str(run), solution.length()))
solution = self.operator_relocate(graph, solution, op_diff_round_digits, anim)
time2 = time.time()
if debug:
logger.debug('Elapsed time (relocate, run {1}): {0}, '
'Solution\'s length: {2}'.format(
time2 - time1, str(run), solution.length()))
solution = self.operator_oropt(graph, solution, op_diff_round_digits, anim)
time3 = time.time()
if debug:
logger.debug('Elapsed time (oropt, run {1}): {0}, '
'Solution\'s length: {2}'.format(
time3 - time2, str(run), solution.length()))
return solution | def function[solve, parameter[self, graph, savings_solution, timeout, debug, anim]]:
constant[Improve initial savings solution using local search
Parameters
----------
graph: :networkx:`NetworkX Graph Obj< >`
Graph instance
savings_solution: SavingsSolution
initial solution of CVRP problem (instance of `SavingsSolution` class)
timeout: int
max processing time in seconds
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
AnimationDing0 object
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
]
variable[op_diff_round_digits] assign[=] call[name[int], parameter[call[name[cfg_ding0].get, parameter[constant[mv_routing], constant[operator_diff_round_digits]]]]]
variable[solution] assign[=] call[name[LocalSearchSolution], parameter[name[graph], name[savings_solution]]]
for taget[name[run]] in starred[call[name[range], parameter[constant[10]]]] begin[:]
variable[start] assign[=] call[name[time].time, parameter[]]
variable[solution] assign[=] call[name[self].operator_exchange, parameter[name[graph], name[solution], name[op_diff_round_digits], name[anim]]]
variable[time1] assign[=] call[name[time].time, parameter[]]
if name[debug] begin[:]
call[name[logger].debug, parameter[call[constant[Elapsed time (exchange, run {1}): {0}, Solution's length: {2}].format, parameter[binary_operation[name[time1] - name[start]], call[name[str], parameter[name[run]]], call[name[solution].length, parameter[]]]]]]
variable[solution] assign[=] call[name[self].operator_relocate, parameter[name[graph], name[solution], name[op_diff_round_digits], name[anim]]]
variable[time2] assign[=] call[name[time].time, parameter[]]
if name[debug] begin[:]
call[name[logger].debug, parameter[call[constant[Elapsed time (relocate, run {1}): {0}, Solution's length: {2}].format, parameter[binary_operation[name[time2] - name[time1]], call[name[str], parameter[name[run]]], call[name[solution].length, parameter[]]]]]]
variable[solution] assign[=] call[name[self].operator_oropt, parameter[name[graph], name[solution], name[op_diff_round_digits], name[anim]]]
variable[time3] assign[=] call[name[time].time, parameter[]]
if name[debug] begin[:]
call[name[logger].debug, parameter[call[constant[Elapsed time (oropt, run {1}): {0}, Solution's length: {2}].format, parameter[binary_operation[name[time3] - name[time2]], call[name[str], parameter[name[run]]], call[name[solution].length, parameter[]]]]]]
return[name[solution]] | keyword[def] identifier[solve] ( identifier[self] , identifier[graph] , identifier[savings_solution] , identifier[timeout] , identifier[debug] = keyword[False] , identifier[anim] = keyword[None] ):
literal[string]
identifier[op_diff_round_digits] = identifier[int] ( identifier[cfg_ding0] . identifier[get] ( literal[string] , literal[string] ))
identifier[solution] = identifier[LocalSearchSolution] ( identifier[graph] , identifier[savings_solution] )
keyword[for] identifier[run] keyword[in] identifier[range] ( literal[int] ):
identifier[start] = identifier[time] . identifier[time] ()
identifier[solution] = identifier[self] . identifier[operator_exchange] ( identifier[graph] , identifier[solution] , identifier[op_diff_round_digits] , identifier[anim] )
identifier[time1] = identifier[time] . identifier[time] ()
keyword[if] identifier[debug] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] . identifier[format] (
identifier[time1] - identifier[start] , identifier[str] ( identifier[run] ), identifier[solution] . identifier[length] ()))
identifier[solution] = identifier[self] . identifier[operator_relocate] ( identifier[graph] , identifier[solution] , identifier[op_diff_round_digits] , identifier[anim] )
identifier[time2] = identifier[time] . identifier[time] ()
keyword[if] identifier[debug] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] . identifier[format] (
identifier[time2] - identifier[time1] , identifier[str] ( identifier[run] ), identifier[solution] . identifier[length] ()))
identifier[solution] = identifier[self] . identifier[operator_oropt] ( identifier[graph] , identifier[solution] , identifier[op_diff_round_digits] , identifier[anim] )
identifier[time3] = identifier[time] . identifier[time] ()
keyword[if] identifier[debug] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] . identifier[format] (
identifier[time3] - identifier[time2] , identifier[str] ( identifier[run] ), identifier[solution] . identifier[length] ()))
keyword[return] identifier[solution] | def solve(self, graph, savings_solution, timeout, debug=False, anim=None):
"""Improve initial savings solution using local search
Parameters
----------
graph: :networkx:`NetworkX Graph Obj< >`
Graph instance
savings_solution: SavingsSolution
initial solution of CVRP problem (instance of `SavingsSolution` class)
timeout: int
max processing time in seconds
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
AnimationDing0 object
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
"""
# TODO: If necessary, use timeout to set max processing time of local search
# load threshold for operator (see exchange or relocate operator's description for more information)
op_diff_round_digits = int(cfg_ding0.get('mv_routing', 'operator_diff_round_digits'))
solution = LocalSearchSolution(graph, savings_solution)
# FOR BENCHMARKING OF OPERATOR'S ORDER:
#self.benchmark_operator_order(graph, savings_solution, op_diff_round_digits)
for run in range(10):
start = time.time()
solution = self.operator_exchange(graph, solution, op_diff_round_digits, anim)
time1 = time.time()
if debug:
logger.debug("Elapsed time (exchange, run {1}): {0}, Solution's length: {2}".format(time1 - start, str(run), solution.length())) # depends on [control=['if'], data=[]]
solution = self.operator_relocate(graph, solution, op_diff_round_digits, anim)
time2 = time.time()
if debug:
logger.debug("Elapsed time (relocate, run {1}): {0}, Solution's length: {2}".format(time2 - time1, str(run), solution.length())) # depends on [control=['if'], data=[]]
solution = self.operator_oropt(graph, solution, op_diff_round_digits, anim)
time3 = time.time()
if debug:
logger.debug("Elapsed time (oropt, run {1}): {0}, Solution's length: {2}".format(time3 - time2, str(run), solution.length())) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['run']]
return solution |
def run_script_with_context(script_path, cwd, context):
"""Execute a script after rendering it with Jinja.
:param script_path: Absolute path to the script to run.
:param cwd: The directory to run the script from.
:param context: Cookiecutter project template context.
"""
_, extension = os.path.splitext(script_path)
contents = io.open(script_path, 'r', encoding='utf-8').read()
with tempfile.NamedTemporaryFile(
delete=False,
mode='wb',
suffix=extension
) as temp:
env = StrictEnvironment(
context=context,
keep_trailing_newline=True,
)
template = env.from_string(contents)
output = template.render(**context)
temp.write(output.encode('utf-8'))
run_script(temp.name, cwd) | def function[run_script_with_context, parameter[script_path, cwd, context]]:
constant[Execute a script after rendering it with Jinja.
:param script_path: Absolute path to the script to run.
:param cwd: The directory to run the script from.
:param context: Cookiecutter project template context.
]
<ast.Tuple object at 0x7da1b212ffd0> assign[=] call[name[os].path.splitext, parameter[name[script_path]]]
variable[contents] assign[=] call[call[name[io].open, parameter[name[script_path], constant[r]]].read, parameter[]]
with call[name[tempfile].NamedTemporaryFile, parameter[]] begin[:]
variable[env] assign[=] call[name[StrictEnvironment], parameter[]]
variable[template] assign[=] call[name[env].from_string, parameter[name[contents]]]
variable[output] assign[=] call[name[template].render, parameter[]]
call[name[temp].write, parameter[call[name[output].encode, parameter[constant[utf-8]]]]]
call[name[run_script], parameter[name[temp].name, name[cwd]]] | keyword[def] identifier[run_script_with_context] ( identifier[script_path] , identifier[cwd] , identifier[context] ):
literal[string]
identifier[_] , identifier[extension] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[script_path] )
identifier[contents] = identifier[io] . identifier[open] ( identifier[script_path] , literal[string] , identifier[encoding] = literal[string] ). identifier[read] ()
keyword[with] identifier[tempfile] . identifier[NamedTemporaryFile] (
identifier[delete] = keyword[False] ,
identifier[mode] = literal[string] ,
identifier[suffix] = identifier[extension]
) keyword[as] identifier[temp] :
identifier[env] = identifier[StrictEnvironment] (
identifier[context] = identifier[context] ,
identifier[keep_trailing_newline] = keyword[True] ,
)
identifier[template] = identifier[env] . identifier[from_string] ( identifier[contents] )
identifier[output] = identifier[template] . identifier[render] (** identifier[context] )
identifier[temp] . identifier[write] ( identifier[output] . identifier[encode] ( literal[string] ))
identifier[run_script] ( identifier[temp] . identifier[name] , identifier[cwd] ) | def run_script_with_context(script_path, cwd, context):
"""Execute a script after rendering it with Jinja.
:param script_path: Absolute path to the script to run.
:param cwd: The directory to run the script from.
:param context: Cookiecutter project template context.
"""
(_, extension) = os.path.splitext(script_path)
contents = io.open(script_path, 'r', encoding='utf-8').read()
with tempfile.NamedTemporaryFile(delete=False, mode='wb', suffix=extension) as temp:
env = StrictEnvironment(context=context, keep_trailing_newline=True)
template = env.from_string(contents)
output = template.render(**context)
temp.write(output.encode('utf-8')) # depends on [control=['with'], data=['temp']]
run_script(temp.name, cwd) |
def _binary_sample(image, label, n_samples_per_label, label_count):
""" Sample `nsamples_per_label` points from the binary mask corresponding to `label`
Randomly sample `nsamples_per_label` point form the binary mask corresponding to `label`. Sampling with
replacement is used if the required `nsamples_per_label` is larger than the available `label_count`
:param image: Input 2D raster label image
:type image: uint8 numpy array
:param label: Scalar value of label to consider
:type label: uint8
:param n_samples_per_label: Number of points to sample form the binary mask
:type n_samples_per_label: uint32
:param label_count: Number of points available for `label`
:type label_count: uint32
:return: Sampled label value, row index of samples, col index of samples
"""
h_idx, w_idx = np.where(image == label)
rand_idx = np.random.choice(h_idx.size, size=n_samples_per_label, replace=label_count < n_samples_per_label)
return h_idx[rand_idx], w_idx[rand_idx] | def function[_binary_sample, parameter[image, label, n_samples_per_label, label_count]]:
constant[ Sample `nsamples_per_label` points from the binary mask corresponding to `label`
Randomly sample `nsamples_per_label` point form the binary mask corresponding to `label`. Sampling with
replacement is used if the required `nsamples_per_label` is larger than the available `label_count`
:param image: Input 2D raster label image
:type image: uint8 numpy array
:param label: Scalar value of label to consider
:type label: uint8
:param n_samples_per_label: Number of points to sample form the binary mask
:type n_samples_per_label: uint32
:param label_count: Number of points available for `label`
:type label_count: uint32
:return: Sampled label value, row index of samples, col index of samples
]
<ast.Tuple object at 0x7da18f00cbb0> assign[=] call[name[np].where, parameter[compare[name[image] equal[==] name[label]]]]
variable[rand_idx] assign[=] call[name[np].random.choice, parameter[name[h_idx].size]]
return[tuple[[<ast.Subscript object at 0x7da1b23441c0>, <ast.Subscript object at 0x7da1b2344520>]]] | keyword[def] identifier[_binary_sample] ( identifier[image] , identifier[label] , identifier[n_samples_per_label] , identifier[label_count] ):
literal[string]
identifier[h_idx] , identifier[w_idx] = identifier[np] . identifier[where] ( identifier[image] == identifier[label] )
identifier[rand_idx] = identifier[np] . identifier[random] . identifier[choice] ( identifier[h_idx] . identifier[size] , identifier[size] = identifier[n_samples_per_label] , identifier[replace] = identifier[label_count] < identifier[n_samples_per_label] )
keyword[return] identifier[h_idx] [ identifier[rand_idx] ], identifier[w_idx] [ identifier[rand_idx] ] | def _binary_sample(image, label, n_samples_per_label, label_count):
""" Sample `nsamples_per_label` points from the binary mask corresponding to `label`
Randomly sample `nsamples_per_label` point form the binary mask corresponding to `label`. Sampling with
replacement is used if the required `nsamples_per_label` is larger than the available `label_count`
:param image: Input 2D raster label image
:type image: uint8 numpy array
:param label: Scalar value of label to consider
:type label: uint8
:param n_samples_per_label: Number of points to sample form the binary mask
:type n_samples_per_label: uint32
:param label_count: Number of points available for `label`
:type label_count: uint32
:return: Sampled label value, row index of samples, col index of samples
"""
(h_idx, w_idx) = np.where(image == label)
rand_idx = np.random.choice(h_idx.size, size=n_samples_per_label, replace=label_count < n_samples_per_label)
return (h_idx[rand_idx], w_idx[rand_idx]) |
def verify_refresh_token(self, expired_token) -> bool:
""" Use request information to validate refresh JWT """
try:
decoded_token = jwt.decode(
Security.decrypt(expired_token),
self.app_secret,
options={'verify_exp': False})
if 'refresh_token' in decoded_token and \
decoded_token['refresh_token'] is not None:
try:
jwt.decode(decoded_token['refresh_token'], self.app_secret)
self.data = decoded_token
return True
except (Exception, BaseException) as error:
self.errors.append(error)
return False
except (Exception, BaseException) as error:
self.errors.append(error)
return False
return False | def function[verify_refresh_token, parameter[self, expired_token]]:
constant[ Use request information to validate refresh JWT ]
<ast.Try object at 0x7da18f720040>
return[constant[False]] | keyword[def] identifier[verify_refresh_token] ( identifier[self] , identifier[expired_token] )-> identifier[bool] :
literal[string]
keyword[try] :
identifier[decoded_token] = identifier[jwt] . identifier[decode] (
identifier[Security] . identifier[decrypt] ( identifier[expired_token] ),
identifier[self] . identifier[app_secret] ,
identifier[options] ={ literal[string] : keyword[False] })
keyword[if] literal[string] keyword[in] identifier[decoded_token] keyword[and] identifier[decoded_token] [ literal[string] ] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[jwt] . identifier[decode] ( identifier[decoded_token] [ literal[string] ], identifier[self] . identifier[app_secret] )
identifier[self] . identifier[data] = identifier[decoded_token]
keyword[return] keyword[True]
keyword[except] ( identifier[Exception] , identifier[BaseException] ) keyword[as] identifier[error] :
identifier[self] . identifier[errors] . identifier[append] ( identifier[error] )
keyword[return] keyword[False]
keyword[except] ( identifier[Exception] , identifier[BaseException] ) keyword[as] identifier[error] :
identifier[self] . identifier[errors] . identifier[append] ( identifier[error] )
keyword[return] keyword[False]
keyword[return] keyword[False] | def verify_refresh_token(self, expired_token) -> bool:
""" Use request information to validate refresh JWT """
try:
decoded_token = jwt.decode(Security.decrypt(expired_token), self.app_secret, options={'verify_exp': False})
if 'refresh_token' in decoded_token and decoded_token['refresh_token'] is not None:
try:
jwt.decode(decoded_token['refresh_token'], self.app_secret)
self.data = decoded_token
return True # depends on [control=['try'], data=[]]
except (Exception, BaseException) as error:
self.errors.append(error)
return False # depends on [control=['except'], data=['error']] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (Exception, BaseException) as error:
self.errors.append(error)
return False # depends on [control=['except'], data=['error']]
return False |
def draw_heading(self, writer):
"""
Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``.
"""
if self.dirty == self.STATE_REFRESH:
writer(u''.join(
(self.term.home, self.term.clear,
self.screen.msg_intro, '\n',
self.screen.header, '\n',)))
return True | def function[draw_heading, parameter[self, writer]]:
constant[
Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``.
]
if compare[name[self].dirty equal[==] name[self].STATE_REFRESH] begin[:]
call[name[writer], parameter[call[constant[].join, parameter[tuple[[<ast.Attribute object at 0x7da207f9be50>, <ast.Attribute object at 0x7da207f98730>, <ast.Attribute object at 0x7da207f9bc10>, <ast.Constant object at 0x7da207f99a50>, <ast.Attribute object at 0x7da207f98250>, <ast.Constant object at 0x7da207f9be80>]]]]]]
return[constant[True]] | keyword[def] identifier[draw_heading] ( identifier[self] , identifier[writer] ):
literal[string]
keyword[if] identifier[self] . identifier[dirty] == identifier[self] . identifier[STATE_REFRESH] :
identifier[writer] ( literal[string] . identifier[join] (
( identifier[self] . identifier[term] . identifier[home] , identifier[self] . identifier[term] . identifier[clear] ,
identifier[self] . identifier[screen] . identifier[msg_intro] , literal[string] ,
identifier[self] . identifier[screen] . identifier[header] , literal[string] ,)))
keyword[return] keyword[True] | def draw_heading(self, writer):
"""
Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``.
"""
if self.dirty == self.STATE_REFRESH:
writer(u''.join((self.term.home, self.term.clear, self.screen.msg_intro, '\n', self.screen.header, '\n')))
return True # depends on [control=['if'], data=[]] |
def update(self, *args, **kw):
"""
Update/save this interface information back to SMC. When interface
changes are made, especially to sub interfaces, call `update` on
the top level interface.
Example of changing the IP address of an interface::
>>> engine = Engine('sg_vm')
>>> interface = engine.physical_interface.get(1)
>>> interface.zone_ref = zone_helper('mynewzone')
>>> interface.update()
:raises UpdateElementFailed: failure to save changes
:return: Interface
"""
super(Interface, self).update(*args, **kw)
self._engine._del_cache()
return self | def function[update, parameter[self]]:
constant[
Update/save this interface information back to SMC. When interface
changes are made, especially to sub interfaces, call `update` on
the top level interface.
Example of changing the IP address of an interface::
>>> engine = Engine('sg_vm')
>>> interface = engine.physical_interface.get(1)
>>> interface.zone_ref = zone_helper('mynewzone')
>>> interface.update()
:raises UpdateElementFailed: failure to save changes
:return: Interface
]
call[call[name[super], parameter[name[Interface], name[self]]].update, parameter[<ast.Starred object at 0x7da1b1b039d0>]]
call[name[self]._engine._del_cache, parameter[]]
return[name[self]] | keyword[def] identifier[update] ( identifier[self] ,* identifier[args] ,** identifier[kw] ):
literal[string]
identifier[super] ( identifier[Interface] , identifier[self] ). identifier[update] (* identifier[args] ,** identifier[kw] )
identifier[self] . identifier[_engine] . identifier[_del_cache] ()
keyword[return] identifier[self] | def update(self, *args, **kw):
"""
Update/save this interface information back to SMC. When interface
changes are made, especially to sub interfaces, call `update` on
the top level interface.
Example of changing the IP address of an interface::
>>> engine = Engine('sg_vm')
>>> interface = engine.physical_interface.get(1)
>>> interface.zone_ref = zone_helper('mynewzone')
>>> interface.update()
:raises UpdateElementFailed: failure to save changes
:return: Interface
"""
super(Interface, self).update(*args, **kw)
self._engine._del_cache()
return self |
def delete_file(self, target, path):
"""Delete a file from a device
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param path: The path on the target to the file to delete.
:return: A dictionary with keys being device ids and value being None if successful or an :class:`~.ErrorInfo`
if the operation failed on that device
:raises: :class:`~.ResponseParseError` If the SCI response has unrecognized formatting
"""
command_block = FileSystemServiceCommandBlock()
command_block.add_command(DeleteCommand(path))
root = _parse_command_response(self._sci_api.send_sci("file_system", target, command_block.get_command_string()))
out_dict = {}
for device in root.findall('./file_system/device'):
device_id = device.get('id')
error = device.find('./error')
if error is not None:
out_dict[device_id] = _parse_error_tree(error)
else:
out_dict[device_id] = DeleteCommand.parse_response(device.find('./commands/rm'))
return out_dict | def function[delete_file, parameter[self, target, path]]:
constant[Delete a file from a device
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param path: The path on the target to the file to delete.
:return: A dictionary with keys being device ids and value being None if successful or an :class:`~.ErrorInfo`
if the operation failed on that device
:raises: :class:`~.ResponseParseError` If the SCI response has unrecognized formatting
]
variable[command_block] assign[=] call[name[FileSystemServiceCommandBlock], parameter[]]
call[name[command_block].add_command, parameter[call[name[DeleteCommand], parameter[name[path]]]]]
variable[root] assign[=] call[name[_parse_command_response], parameter[call[name[self]._sci_api.send_sci, parameter[constant[file_system], name[target], call[name[command_block].get_command_string, parameter[]]]]]]
variable[out_dict] assign[=] dictionary[[], []]
for taget[name[device]] in starred[call[name[root].findall, parameter[constant[./file_system/device]]]] begin[:]
variable[device_id] assign[=] call[name[device].get, parameter[constant[id]]]
variable[error] assign[=] call[name[device].find, parameter[constant[./error]]]
if compare[name[error] is_not constant[None]] begin[:]
call[name[out_dict]][name[device_id]] assign[=] call[name[_parse_error_tree], parameter[name[error]]]
return[name[out_dict]] | keyword[def] identifier[delete_file] ( identifier[self] , identifier[target] , identifier[path] ):
literal[string]
identifier[command_block] = identifier[FileSystemServiceCommandBlock] ()
identifier[command_block] . identifier[add_command] ( identifier[DeleteCommand] ( identifier[path] ))
identifier[root] = identifier[_parse_command_response] ( identifier[self] . identifier[_sci_api] . identifier[send_sci] ( literal[string] , identifier[target] , identifier[command_block] . identifier[get_command_string] ()))
identifier[out_dict] ={}
keyword[for] identifier[device] keyword[in] identifier[root] . identifier[findall] ( literal[string] ):
identifier[device_id] = identifier[device] . identifier[get] ( literal[string] )
identifier[error] = identifier[device] . identifier[find] ( literal[string] )
keyword[if] identifier[error] keyword[is] keyword[not] keyword[None] :
identifier[out_dict] [ identifier[device_id] ]= identifier[_parse_error_tree] ( identifier[error] )
keyword[else] :
identifier[out_dict] [ identifier[device_id] ]= identifier[DeleteCommand] . identifier[parse_response] ( identifier[device] . identifier[find] ( literal[string] ))
keyword[return] identifier[out_dict] | def delete_file(self, target, path):
"""Delete a file from a device
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param path: The path on the target to the file to delete.
:return: A dictionary with keys being device ids and value being None if successful or an :class:`~.ErrorInfo`
if the operation failed on that device
:raises: :class:`~.ResponseParseError` If the SCI response has unrecognized formatting
"""
command_block = FileSystemServiceCommandBlock()
command_block.add_command(DeleteCommand(path))
root = _parse_command_response(self._sci_api.send_sci('file_system', target, command_block.get_command_string()))
out_dict = {}
for device in root.findall('./file_system/device'):
device_id = device.get('id')
error = device.find('./error')
if error is not None:
out_dict[device_id] = _parse_error_tree(error) # depends on [control=['if'], data=['error']]
else:
out_dict[device_id] = DeleteCommand.parse_response(device.find('./commands/rm')) # depends on [control=['for'], data=['device']]
return out_dict |
def loopless_fva_iter(model, reaction, solution=False, zero_cutoff=None):
"""Plugin to get a loopless FVA solution from single FVA iteration.
Assumes the following about `model` and `reaction`:
1. the model objective is set to be `reaction`
2. the model has been optimized and contains the minimum/maximum flux for
`reaction`
3. the model contains an auxiliary variable called "fva_old_objective"
denoting the previous objective
Parameters
----------
model : cobra.Model
The model to be used.
reaction : cobra.Reaction
The reaction currently minimized/maximized.
solution : boolean, optional
Whether to return the entire solution or only the minimum/maximum for
`reaction`.
zero_cutoff : positive float, optional
Cutoff used for loop removal. Fluxes with an absolute value smaller
than `zero_cutoff` are considered to be zero (default model.tolerance).
Returns
-------
single float or dict
Returns the minimized/maximized flux through `reaction` if
all_fluxes == False (default). Otherwise returns a loopless flux
solution containing the minimum/maximum flux for `reaction`.
"""
zero_cutoff = normalize_cutoff(model, zero_cutoff)
current = model.objective.value
sol = get_solution(model)
objective_dir = model.objective.direction
# boundary reactions can not be part of cycles
if reaction.boundary:
if solution:
return sol
else:
return current
with model:
_add_cycle_free(model, sol.fluxes)
model.slim_optimize()
# If the previous optimum is maintained in the loopless solution it was
# loopless and we are done
if abs(reaction.flux - current) < zero_cutoff:
if solution:
return sol
return current
# If previous optimum was not in the loopless solution create a new
# almost loopless solution containing only loops including the current
# reaction. Than remove all of those loops.
ll_sol = get_solution(model).fluxes
reaction.bounds = (current, current)
model.slim_optimize()
almost_ll_sol = get_solution(model).fluxes
with model:
# find the reactions with loops using the current reaction and remove
# the loops
for rxn in model.reactions:
rid = rxn.id
if ((abs(ll_sol[rid]) < zero_cutoff) and
(abs(almost_ll_sol[rid]) > zero_cutoff)):
rxn.bounds = max(0, rxn.lower_bound), min(0, rxn.upper_bound)
if solution:
best = model.optimize()
else:
model.slim_optimize()
best = reaction.flux
model.objective.direction = objective_dir
return best | def function[loopless_fva_iter, parameter[model, reaction, solution, zero_cutoff]]:
constant[Plugin to get a loopless FVA solution from single FVA iteration.
Assumes the following about `model` and `reaction`:
1. the model objective is set to be `reaction`
2. the model has been optimized and contains the minimum/maximum flux for
`reaction`
3. the model contains an auxiliary variable called "fva_old_objective"
denoting the previous objective
Parameters
----------
model : cobra.Model
The model to be used.
reaction : cobra.Reaction
The reaction currently minimized/maximized.
solution : boolean, optional
Whether to return the entire solution or only the minimum/maximum for
`reaction`.
zero_cutoff : positive float, optional
Cutoff used for loop removal. Fluxes with an absolute value smaller
than `zero_cutoff` are considered to be zero (default model.tolerance).
Returns
-------
single float or dict
Returns the minimized/maximized flux through `reaction` if
all_fluxes == False (default). Otherwise returns a loopless flux
solution containing the minimum/maximum flux for `reaction`.
]
variable[zero_cutoff] assign[=] call[name[normalize_cutoff], parameter[name[model], name[zero_cutoff]]]
variable[current] assign[=] name[model].objective.value
variable[sol] assign[=] call[name[get_solution], parameter[name[model]]]
variable[objective_dir] assign[=] name[model].objective.direction
if name[reaction].boundary begin[:]
if name[solution] begin[:]
return[name[sol]]
with name[model] begin[:]
call[name[_add_cycle_free], parameter[name[model], name[sol].fluxes]]
call[name[model].slim_optimize, parameter[]]
if compare[call[name[abs], parameter[binary_operation[name[reaction].flux - name[current]]]] less[<] name[zero_cutoff]] begin[:]
if name[solution] begin[:]
return[name[sol]]
return[name[current]]
variable[ll_sol] assign[=] call[name[get_solution], parameter[name[model]]].fluxes
name[reaction].bounds assign[=] tuple[[<ast.Name object at 0x7da1b021ca00>, <ast.Name object at 0x7da1b021e0e0>]]
call[name[model].slim_optimize, parameter[]]
variable[almost_ll_sol] assign[=] call[name[get_solution], parameter[name[model]]].fluxes
with name[model] begin[:]
for taget[name[rxn]] in starred[name[model].reactions] begin[:]
variable[rid] assign[=] name[rxn].id
if <ast.BoolOp object at 0x7da1b021d270> begin[:]
name[rxn].bounds assign[=] tuple[[<ast.Call object at 0x7da1b021d090>, <ast.Call object at 0x7da1b021e140>]]
if name[solution] begin[:]
variable[best] assign[=] call[name[model].optimize, parameter[]]
name[model].objective.direction assign[=] name[objective_dir]
return[name[best]] | keyword[def] identifier[loopless_fva_iter] ( identifier[model] , identifier[reaction] , identifier[solution] = keyword[False] , identifier[zero_cutoff] = keyword[None] ):
literal[string]
identifier[zero_cutoff] = identifier[normalize_cutoff] ( identifier[model] , identifier[zero_cutoff] )
identifier[current] = identifier[model] . identifier[objective] . identifier[value]
identifier[sol] = identifier[get_solution] ( identifier[model] )
identifier[objective_dir] = identifier[model] . identifier[objective] . identifier[direction]
keyword[if] identifier[reaction] . identifier[boundary] :
keyword[if] identifier[solution] :
keyword[return] identifier[sol]
keyword[else] :
keyword[return] identifier[current]
keyword[with] identifier[model] :
identifier[_add_cycle_free] ( identifier[model] , identifier[sol] . identifier[fluxes] )
identifier[model] . identifier[slim_optimize] ()
keyword[if] identifier[abs] ( identifier[reaction] . identifier[flux] - identifier[current] )< identifier[zero_cutoff] :
keyword[if] identifier[solution] :
keyword[return] identifier[sol]
keyword[return] identifier[current]
identifier[ll_sol] = identifier[get_solution] ( identifier[model] ). identifier[fluxes]
identifier[reaction] . identifier[bounds] =( identifier[current] , identifier[current] )
identifier[model] . identifier[slim_optimize] ()
identifier[almost_ll_sol] = identifier[get_solution] ( identifier[model] ). identifier[fluxes]
keyword[with] identifier[model] :
keyword[for] identifier[rxn] keyword[in] identifier[model] . identifier[reactions] :
identifier[rid] = identifier[rxn] . identifier[id]
keyword[if] (( identifier[abs] ( identifier[ll_sol] [ identifier[rid] ])< identifier[zero_cutoff] ) keyword[and]
( identifier[abs] ( identifier[almost_ll_sol] [ identifier[rid] ])> identifier[zero_cutoff] )):
identifier[rxn] . identifier[bounds] = identifier[max] ( literal[int] , identifier[rxn] . identifier[lower_bound] ), identifier[min] ( literal[int] , identifier[rxn] . identifier[upper_bound] )
keyword[if] identifier[solution] :
identifier[best] = identifier[model] . identifier[optimize] ()
keyword[else] :
identifier[model] . identifier[slim_optimize] ()
identifier[best] = identifier[reaction] . identifier[flux]
identifier[model] . identifier[objective] . identifier[direction] = identifier[objective_dir]
keyword[return] identifier[best] | def loopless_fva_iter(model, reaction, solution=False, zero_cutoff=None):
"""Plugin to get a loopless FVA solution from single FVA iteration.
Assumes the following about `model` and `reaction`:
1. the model objective is set to be `reaction`
2. the model has been optimized and contains the minimum/maximum flux for
`reaction`
3. the model contains an auxiliary variable called "fva_old_objective"
denoting the previous objective
Parameters
----------
model : cobra.Model
The model to be used.
reaction : cobra.Reaction
The reaction currently minimized/maximized.
solution : boolean, optional
Whether to return the entire solution or only the minimum/maximum for
`reaction`.
zero_cutoff : positive float, optional
Cutoff used for loop removal. Fluxes with an absolute value smaller
than `zero_cutoff` are considered to be zero (default model.tolerance).
Returns
-------
single float or dict
Returns the minimized/maximized flux through `reaction` if
all_fluxes == False (default). Otherwise returns a loopless flux
solution containing the minimum/maximum flux for `reaction`.
"""
zero_cutoff = normalize_cutoff(model, zero_cutoff)
current = model.objective.value
sol = get_solution(model)
objective_dir = model.objective.direction
# boundary reactions can not be part of cycles
if reaction.boundary:
if solution:
return sol # depends on [control=['if'], data=[]]
else:
return current # depends on [control=['if'], data=[]]
with model:
_add_cycle_free(model, sol.fluxes)
model.slim_optimize()
# If the previous optimum is maintained in the loopless solution it was
# loopless and we are done
if abs(reaction.flux - current) < zero_cutoff:
if solution:
return sol # depends on [control=['if'], data=[]]
return current # depends on [control=['if'], data=[]]
# If previous optimum was not in the loopless solution create a new
# almost loopless solution containing only loops including the current
# reaction. Than remove all of those loops.
ll_sol = get_solution(model).fluxes
reaction.bounds = (current, current)
model.slim_optimize()
almost_ll_sol = get_solution(model).fluxes # depends on [control=['with'], data=[]]
with model:
# find the reactions with loops using the current reaction and remove
# the loops
for rxn in model.reactions:
rid = rxn.id
if abs(ll_sol[rid]) < zero_cutoff and abs(almost_ll_sol[rid]) > zero_cutoff:
rxn.bounds = (max(0, rxn.lower_bound), min(0, rxn.upper_bound)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rxn']]
if solution:
best = model.optimize() # depends on [control=['if'], data=[]]
else:
model.slim_optimize()
best = reaction.flux # depends on [control=['with'], data=[]]
model.objective.direction = objective_dir
return best |
def check_captcha(self,
captcha_id,
solution,
author_name=None,
author_url=None,
author_mail=None,
author_ip=None,
author_id=None,
author_open_id=None,
honeypot=None
):
"""Checks a CAPTCHA that was solved by the end-user.
Keyword arguments:
captcha_id -- Unique identifier of the CAPTCHA solved.
solution -- Solution provided by the end-user for the CAPTCHA.
author_name -- The name of the content author.
author_url -- The homepage/website URL of the content author.
author_mail -- The e-mail address of the content author.
author_ip -- The IP address of the content author.
author_id -- The local user ID on the client site of the content author.
author_open_id -- List of Open IDs of the content author.
honeypot -- The value of a client-side honeypot form element, if non-empty.
Returns:
solved -- Boolean whether or not the CAPTCHA was solved correctly.
If the CAPTCHA is associated with an unsure contents, it is recommended to recheck the content.
"""
check_catpcha_endpoint = Template("${rest_root}/captcha/${captcha_id}")
url = check_catpcha_endpoint.substitute(rest_root=self._rest_root, captcha_id=captcha_id)
data = {"solution": solution}
response = self.__post_request(url, data)
# Mollom returns "1" for success and "0" for failure
return response["captcha"]["solved"] == "1" | def function[check_captcha, parameter[self, captcha_id, solution, author_name, author_url, author_mail, author_ip, author_id, author_open_id, honeypot]]:
constant[Checks a CAPTCHA that was solved by the end-user.
Keyword arguments:
captcha_id -- Unique identifier of the CAPTCHA solved.
solution -- Solution provided by the end-user for the CAPTCHA.
author_name -- The name of the content author.
author_url -- The homepage/website URL of the content author.
author_mail -- The e-mail address of the content author.
author_ip -- The IP address of the content author.
author_id -- The local user ID on the client site of the content author.
author_open_id -- List of Open IDs of the content author.
honeypot -- The value of a client-side honeypot form element, if non-empty.
Returns:
solved -- Boolean whether or not the CAPTCHA was solved correctly.
If the CAPTCHA is associated with an unsure contents, it is recommended to recheck the content.
]
variable[check_catpcha_endpoint] assign[=] call[name[Template], parameter[constant[${rest_root}/captcha/${captcha_id}]]]
variable[url] assign[=] call[name[check_catpcha_endpoint].substitute, parameter[]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18ede48e0>], [<ast.Name object at 0x7da18ede6ad0>]]
variable[response] assign[=] call[name[self].__post_request, parameter[name[url], name[data]]]
return[compare[call[call[name[response]][constant[captcha]]][constant[solved]] equal[==] constant[1]]] | keyword[def] identifier[check_captcha] ( identifier[self] ,
identifier[captcha_id] ,
identifier[solution] ,
identifier[author_name] = keyword[None] ,
identifier[author_url] = keyword[None] ,
identifier[author_mail] = keyword[None] ,
identifier[author_ip] = keyword[None] ,
identifier[author_id] = keyword[None] ,
identifier[author_open_id] = keyword[None] ,
identifier[honeypot] = keyword[None]
):
literal[string]
identifier[check_catpcha_endpoint] = identifier[Template] ( literal[string] )
identifier[url] = identifier[check_catpcha_endpoint] . identifier[substitute] ( identifier[rest_root] = identifier[self] . identifier[_rest_root] , identifier[captcha_id] = identifier[captcha_id] )
identifier[data] ={ literal[string] : identifier[solution] }
identifier[response] = identifier[self] . identifier[__post_request] ( identifier[url] , identifier[data] )
keyword[return] identifier[response] [ literal[string] ][ literal[string] ]== literal[string] | def check_captcha(self, captcha_id, solution, author_name=None, author_url=None, author_mail=None, author_ip=None, author_id=None, author_open_id=None, honeypot=None):
"""Checks a CAPTCHA that was solved by the end-user.
Keyword arguments:
captcha_id -- Unique identifier of the CAPTCHA solved.
solution -- Solution provided by the end-user for the CAPTCHA.
author_name -- The name of the content author.
author_url -- The homepage/website URL of the content author.
author_mail -- The e-mail address of the content author.
author_ip -- The IP address of the content author.
author_id -- The local user ID on the client site of the content author.
author_open_id -- List of Open IDs of the content author.
honeypot -- The value of a client-side honeypot form element, if non-empty.
Returns:
solved -- Boolean whether or not the CAPTCHA was solved correctly.
If the CAPTCHA is associated with an unsure contents, it is recommended to recheck the content.
"""
check_catpcha_endpoint = Template('${rest_root}/captcha/${captcha_id}')
url = check_catpcha_endpoint.substitute(rest_root=self._rest_root, captcha_id=captcha_id)
data = {'solution': solution}
response = self.__post_request(url, data)
# Mollom returns "1" for success and "0" for failure
return response['captcha']['solved'] == '1' |
def simulate_source(self, src_dict=None):
"""
Inject simulated source counts into the data.
Parameters
----------
src_dict : dict
Dictionary defining the spatial and spectral properties of
the source that will be injected.
"""
self._fitcache = None
if src_dict is None:
src_dict = {}
else:
src_dict = copy.deepcopy(src_dict)
skydir = wcs_utils.get_target_skydir(src_dict, self.roi.skydir)
src_dict.setdefault('ra', skydir.ra.deg)
src_dict.setdefault('dec', skydir.dec.deg)
src_dict.setdefault('SpatialModel', 'PointSource')
src_dict.setdefault('SpatialWidth', 0.3)
src_dict.setdefault('Index', 2.0)
src_dict.setdefault('Prefactor', 1E-13)
self.add_source('mcsource', src_dict, free=True,
init_source=False)
for c in self.components:
c.simulate_roi('mcsource', clear=False)
self.delete_source('mcsource')
if hasattr(self.like.components[0].logLike, 'setCountsMap'):
self._init_roi_model()
else:
self.write_xml('tmp')
self._like = SummedLikelihood()
for i, c in enumerate(self._components):
c._create_binned_analysis('tmp.xml')
self._like.addComponent(c.like)
self._init_roi_model()
self.load_xml('tmp') | def function[simulate_source, parameter[self, src_dict]]:
constant[
Inject simulated source counts into the data.
Parameters
----------
src_dict : dict
Dictionary defining the spatial and spectral properties of
the source that will be injected.
]
name[self]._fitcache assign[=] constant[None]
if compare[name[src_dict] is constant[None]] begin[:]
variable[src_dict] assign[=] dictionary[[], []]
variable[skydir] assign[=] call[name[wcs_utils].get_target_skydir, parameter[name[src_dict], name[self].roi.skydir]]
call[name[src_dict].setdefault, parameter[constant[ra], name[skydir].ra.deg]]
call[name[src_dict].setdefault, parameter[constant[dec], name[skydir].dec.deg]]
call[name[src_dict].setdefault, parameter[constant[SpatialModel], constant[PointSource]]]
call[name[src_dict].setdefault, parameter[constant[SpatialWidth], constant[0.3]]]
call[name[src_dict].setdefault, parameter[constant[Index], constant[2.0]]]
call[name[src_dict].setdefault, parameter[constant[Prefactor], constant[1e-13]]]
call[name[self].add_source, parameter[constant[mcsource], name[src_dict]]]
for taget[name[c]] in starred[name[self].components] begin[:]
call[name[c].simulate_roi, parameter[constant[mcsource]]]
call[name[self].delete_source, parameter[constant[mcsource]]]
if call[name[hasattr], parameter[call[name[self].like.components][constant[0]].logLike, constant[setCountsMap]]] begin[:]
call[name[self]._init_roi_model, parameter[]] | keyword[def] identifier[simulate_source] ( identifier[self] , identifier[src_dict] = keyword[None] ):
literal[string]
identifier[self] . identifier[_fitcache] = keyword[None]
keyword[if] identifier[src_dict] keyword[is] keyword[None] :
identifier[src_dict] ={}
keyword[else] :
identifier[src_dict] = identifier[copy] . identifier[deepcopy] ( identifier[src_dict] )
identifier[skydir] = identifier[wcs_utils] . identifier[get_target_skydir] ( identifier[src_dict] , identifier[self] . identifier[roi] . identifier[skydir] )
identifier[src_dict] . identifier[setdefault] ( literal[string] , identifier[skydir] . identifier[ra] . identifier[deg] )
identifier[src_dict] . identifier[setdefault] ( literal[string] , identifier[skydir] . identifier[dec] . identifier[deg] )
identifier[src_dict] . identifier[setdefault] ( literal[string] , literal[string] )
identifier[src_dict] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[src_dict] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[src_dict] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[self] . identifier[add_source] ( literal[string] , identifier[src_dict] , identifier[free] = keyword[True] ,
identifier[init_source] = keyword[False] )
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[components] :
identifier[c] . identifier[simulate_roi] ( literal[string] , identifier[clear] = keyword[False] )
identifier[self] . identifier[delete_source] ( literal[string] )
keyword[if] identifier[hasattr] ( identifier[self] . identifier[like] . identifier[components] [ literal[int] ]. identifier[logLike] , literal[string] ):
identifier[self] . identifier[_init_roi_model] ()
keyword[else] :
identifier[self] . identifier[write_xml] ( literal[string] )
identifier[self] . identifier[_like] = identifier[SummedLikelihood] ()
keyword[for] identifier[i] , identifier[c] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_components] ):
identifier[c] . identifier[_create_binned_analysis] ( literal[string] )
identifier[self] . identifier[_like] . identifier[addComponent] ( identifier[c] . identifier[like] )
identifier[self] . identifier[_init_roi_model] ()
identifier[self] . identifier[load_xml] ( literal[string] ) | def simulate_source(self, src_dict=None):
"""
Inject simulated source counts into the data.
Parameters
----------
src_dict : dict
Dictionary defining the spatial and spectral properties of
the source that will be injected.
"""
self._fitcache = None
if src_dict is None:
src_dict = {} # depends on [control=['if'], data=['src_dict']]
else:
src_dict = copy.deepcopy(src_dict)
skydir = wcs_utils.get_target_skydir(src_dict, self.roi.skydir)
src_dict.setdefault('ra', skydir.ra.deg)
src_dict.setdefault('dec', skydir.dec.deg)
src_dict.setdefault('SpatialModel', 'PointSource')
src_dict.setdefault('SpatialWidth', 0.3)
src_dict.setdefault('Index', 2.0)
src_dict.setdefault('Prefactor', 1e-13)
self.add_source('mcsource', src_dict, free=True, init_source=False)
for c in self.components:
c.simulate_roi('mcsource', clear=False) # depends on [control=['for'], data=['c']]
self.delete_source('mcsource')
if hasattr(self.like.components[0].logLike, 'setCountsMap'):
self._init_roi_model() # depends on [control=['if'], data=[]]
else:
self.write_xml('tmp')
self._like = SummedLikelihood()
for (i, c) in enumerate(self._components):
c._create_binned_analysis('tmp.xml')
self._like.addComponent(c.like) # depends on [control=['for'], data=[]]
self._init_roi_model()
self.load_xml('tmp') |
def _setPrivate(self, private):
"""This is here to make testing easier"""
self.private = private
self.public = pow(self.generator, self.private, self.modulus) | def function[_setPrivate, parameter[self, private]]:
constant[This is here to make testing easier]
name[self].private assign[=] name[private]
name[self].public assign[=] call[name[pow], parameter[name[self].generator, name[self].private, name[self].modulus]] | keyword[def] identifier[_setPrivate] ( identifier[self] , identifier[private] ):
literal[string]
identifier[self] . identifier[private] = identifier[private]
identifier[self] . identifier[public] = identifier[pow] ( identifier[self] . identifier[generator] , identifier[self] . identifier[private] , identifier[self] . identifier[modulus] ) | def _setPrivate(self, private):
"""This is here to make testing easier"""
self.private = private
self.public = pow(self.generator, self.private, self.modulus) |
def read_string(self, content):
"""Parse a string containing C/C++ source code.
:param content: C/C++ source code.
:type content: str
:rtype: Declarations
"""
reader = source_reader.source_reader_t(
self.__config,
None,
self.__decl_factory)
decls = reader.read_string(content)
self.__xml_generator_from_xml_file = reader.xml_generator_from_xml_file
return decls | def function[read_string, parameter[self, content]]:
constant[Parse a string containing C/C++ source code.
:param content: C/C++ source code.
:type content: str
:rtype: Declarations
]
variable[reader] assign[=] call[name[source_reader].source_reader_t, parameter[name[self].__config, constant[None], name[self].__decl_factory]]
variable[decls] assign[=] call[name[reader].read_string, parameter[name[content]]]
name[self].__xml_generator_from_xml_file assign[=] name[reader].xml_generator_from_xml_file
return[name[decls]] | keyword[def] identifier[read_string] ( identifier[self] , identifier[content] ):
literal[string]
identifier[reader] = identifier[source_reader] . identifier[source_reader_t] (
identifier[self] . identifier[__config] ,
keyword[None] ,
identifier[self] . identifier[__decl_factory] )
identifier[decls] = identifier[reader] . identifier[read_string] ( identifier[content] )
identifier[self] . identifier[__xml_generator_from_xml_file] = identifier[reader] . identifier[xml_generator_from_xml_file]
keyword[return] identifier[decls] | def read_string(self, content):
"""Parse a string containing C/C++ source code.
:param content: C/C++ source code.
:type content: str
:rtype: Declarations
"""
reader = source_reader.source_reader_t(self.__config, None, self.__decl_factory)
decls = reader.read_string(content)
self.__xml_generator_from_xml_file = reader.xml_generator_from_xml_file
return decls |
def _context_callbacks(app, key, original_context=_CONTEXT_MISSING):
"""Register the callbacks we need to properly pop and push the
app-local context for a component.
Args:
app (flask.Flask): The app who this context belongs to. This is the
only sender our Blinker signal will listen to.
key (str): The key on ``_CONTEXT_LOCALS`` that this app's context
listens to.
Kwargs:
original_context (dict): The original context present whenever
these callbacks were registered. We will restore the context to
this value whenever the app context gets popped.
Returns:
(function, function): A two-element tuple of the dynamic functions
we generated as appcontext callbacks. The first element is the
callback for ``appcontext_pushed`` (i.e., get and store the
current context) and the second element is the callback for
``appcontext_popped`` (i.e., restore the current context to
to it's original value).
"""
def _get_context(dummy_app):
"""Set the context proxy so that it points to a specific context.
"""
_CONTEXT_LOCALS.context = _CONTEXT_LOCALS(key) # pylint: disable=assigning-non-slot
def _clear_context(dummy_app):
"""Remove the context proxy that points to a specific context and
restore the original context, if there was one.
"""
try:
del _CONTEXT_LOCALS.context
except AttributeError:
pass
if original_context is not _CONTEXT_MISSING:
setattr(_CONTEXT_LOCALS, key, original_context)
# store for later so Blinker doesn't remove these listeners and so we
# don't add them twice
_CONTEXT_CALLBACK_MAP[app] = (_get_context, _clear_context)
# and listen for any app context changes
appcontext_pushed.connect(_get_context, app)
appcontext_popped.connect(_clear_context, app)
return (_get_context, _clear_context) | def function[_context_callbacks, parameter[app, key, original_context]]:
constant[Register the callbacks we need to properly pop and push the
app-local context for a component.
Args:
app (flask.Flask): The app who this context belongs to. This is the
only sender our Blinker signal will listen to.
key (str): The key on ``_CONTEXT_LOCALS`` that this app's context
listens to.
Kwargs:
original_context (dict): The original context present whenever
these callbacks were registered. We will restore the context to
this value whenever the app context gets popped.
Returns:
(function, function): A two-element tuple of the dynamic functions
we generated as appcontext callbacks. The first element is the
callback for ``appcontext_pushed`` (i.e., get and store the
current context) and the second element is the callback for
``appcontext_popped`` (i.e., restore the current context to
to it's original value).
]
def function[_get_context, parameter[dummy_app]]:
constant[Set the context proxy so that it points to a specific context.
]
name[_CONTEXT_LOCALS].context assign[=] call[name[_CONTEXT_LOCALS], parameter[name[key]]]
def function[_clear_context, parameter[dummy_app]]:
constant[Remove the context proxy that points to a specific context and
restore the original context, if there was one.
]
<ast.Try object at 0x7da1b061bfa0>
if compare[name[original_context] is_not name[_CONTEXT_MISSING]] begin[:]
call[name[setattr], parameter[name[_CONTEXT_LOCALS], name[key], name[original_context]]]
call[name[_CONTEXT_CALLBACK_MAP]][name[app]] assign[=] tuple[[<ast.Name object at 0x7da1b0619240>, <ast.Name object at 0x7da1b06191e0>]]
call[name[appcontext_pushed].connect, parameter[name[_get_context], name[app]]]
call[name[appcontext_popped].connect, parameter[name[_clear_context], name[app]]]
return[tuple[[<ast.Name object at 0x7da1b061a200>, <ast.Name object at 0x7da1b0618b80>]]] | keyword[def] identifier[_context_callbacks] ( identifier[app] , identifier[key] , identifier[original_context] = identifier[_CONTEXT_MISSING] ):
literal[string]
keyword[def] identifier[_get_context] ( identifier[dummy_app] ):
literal[string]
identifier[_CONTEXT_LOCALS] . identifier[context] = identifier[_CONTEXT_LOCALS] ( identifier[key] )
keyword[def] identifier[_clear_context] ( identifier[dummy_app] ):
literal[string]
keyword[try] :
keyword[del] identifier[_CONTEXT_LOCALS] . identifier[context]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[if] identifier[original_context] keyword[is] keyword[not] identifier[_CONTEXT_MISSING] :
identifier[setattr] ( identifier[_CONTEXT_LOCALS] , identifier[key] , identifier[original_context] )
identifier[_CONTEXT_CALLBACK_MAP] [ identifier[app] ]=( identifier[_get_context] , identifier[_clear_context] )
identifier[appcontext_pushed] . identifier[connect] ( identifier[_get_context] , identifier[app] )
identifier[appcontext_popped] . identifier[connect] ( identifier[_clear_context] , identifier[app] )
keyword[return] ( identifier[_get_context] , identifier[_clear_context] ) | def _context_callbacks(app, key, original_context=_CONTEXT_MISSING):
"""Register the callbacks we need to properly pop and push the
app-local context for a component.
Args:
app (flask.Flask): The app who this context belongs to. This is the
only sender our Blinker signal will listen to.
key (str): The key on ``_CONTEXT_LOCALS`` that this app's context
listens to.
Kwargs:
original_context (dict): The original context present whenever
these callbacks were registered. We will restore the context to
this value whenever the app context gets popped.
Returns:
(function, function): A two-element tuple of the dynamic functions
we generated as appcontext callbacks. The first element is the
callback for ``appcontext_pushed`` (i.e., get and store the
current context) and the second element is the callback for
``appcontext_popped`` (i.e., restore the current context to
to it's original value).
"""
def _get_context(dummy_app):
"""Set the context proxy so that it points to a specific context.
"""
_CONTEXT_LOCALS.context = _CONTEXT_LOCALS(key) # pylint: disable=assigning-non-slot
def _clear_context(dummy_app):
"""Remove the context proxy that points to a specific context and
restore the original context, if there was one.
"""
try:
del _CONTEXT_LOCALS.context # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
if original_context is not _CONTEXT_MISSING:
setattr(_CONTEXT_LOCALS, key, original_context) # depends on [control=['if'], data=['original_context']]
# store for later so Blinker doesn't remove these listeners and so we
# don't add them twice
_CONTEXT_CALLBACK_MAP[app] = (_get_context, _clear_context)
# and listen for any app context changes
appcontext_pushed.connect(_get_context, app)
appcontext_popped.connect(_clear_context, app)
return (_get_context, _clear_context) |
def div_img(img1, div2):
""" Pixelwise division or divide by a number """
if is_img(div2):
return img1.get_data()/div2.get_data()
elif isinstance(div2, (float, int)):
return img1.get_data()/div2
else:
raise NotImplementedError('Cannot divide {}({}) by '
'{}({})'.format(type(img1),
img1,
type(div2),
div2)) | def function[div_img, parameter[img1, div2]]:
constant[ Pixelwise division or divide by a number ]
if call[name[is_img], parameter[name[div2]]] begin[:]
return[binary_operation[call[name[img1].get_data, parameter[]] / call[name[div2].get_data, parameter[]]]] | keyword[def] identifier[div_img] ( identifier[img1] , identifier[div2] ):
literal[string]
keyword[if] identifier[is_img] ( identifier[div2] ):
keyword[return] identifier[img1] . identifier[get_data] ()/ identifier[div2] . identifier[get_data] ()
keyword[elif] identifier[isinstance] ( identifier[div2] ,( identifier[float] , identifier[int] )):
keyword[return] identifier[img1] . identifier[get_data] ()/ identifier[div2]
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string] . identifier[format] ( identifier[type] ( identifier[img1] ),
identifier[img1] ,
identifier[type] ( identifier[div2] ),
identifier[div2] )) | def div_img(img1, div2):
""" Pixelwise division or divide by a number """
if is_img(div2):
return img1.get_data() / div2.get_data() # depends on [control=['if'], data=[]]
elif isinstance(div2, (float, int)):
return img1.get_data() / div2 # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Cannot divide {}({}) by {}({})'.format(type(img1), img1, type(div2), div2)) |
def create_for(line, search_result):
'''Create a new "for loop" line as a replacement for the original code.
'''
try:
return line.format(search_result.group("indented_for"),
search_result.group("var"),
search_result.group("start"),
search_result.group("stop"),
search_result.group("cond"))
except IndexError:
return line.format(search_result.group("indented_for"),
search_result.group("var"),
search_result.group("start"),
search_result.group("stop")) | def function[create_for, parameter[line, search_result]]:
constant[Create a new "for loop" line as a replacement for the original code.
]
<ast.Try object at 0x7da18f09dd50> | keyword[def] identifier[create_for] ( identifier[line] , identifier[search_result] ):
literal[string]
keyword[try] :
keyword[return] identifier[line] . identifier[format] ( identifier[search_result] . identifier[group] ( literal[string] ),
identifier[search_result] . identifier[group] ( literal[string] ),
identifier[search_result] . identifier[group] ( literal[string] ),
identifier[search_result] . identifier[group] ( literal[string] ),
identifier[search_result] . identifier[group] ( literal[string] ))
keyword[except] identifier[IndexError] :
keyword[return] identifier[line] . identifier[format] ( identifier[search_result] . identifier[group] ( literal[string] ),
identifier[search_result] . identifier[group] ( literal[string] ),
identifier[search_result] . identifier[group] ( literal[string] ),
identifier[search_result] . identifier[group] ( literal[string] )) | def create_for(line, search_result):
"""Create a new "for loop" line as a replacement for the original code.
"""
try:
return line.format(search_result.group('indented_for'), search_result.group('var'), search_result.group('start'), search_result.group('stop'), search_result.group('cond')) # depends on [control=['try'], data=[]]
except IndexError:
return line.format(search_result.group('indented_for'), search_result.group('var'), search_result.group('start'), search_result.group('stop')) # depends on [control=['except'], data=[]] |
def is_file(self, path, use_sudo=False):
"""
Check if a path exists, and is a file.
"""
if self.is_local and not use_sudo:
return os.path.isfile(path)
else:
func = use_sudo and _sudo or _run
with self.settings(hide('running', 'warnings'), warn_only=True):
return func('[ -f "%(path)s" ]' % locals()).succeeded | def function[is_file, parameter[self, path, use_sudo]]:
constant[
Check if a path exists, and is a file.
]
if <ast.BoolOp object at 0x7da1b003e0b0> begin[:]
return[call[name[os].path.isfile, parameter[name[path]]]] | keyword[def] identifier[is_file] ( identifier[self] , identifier[path] , identifier[use_sudo] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[is_local] keyword[and] keyword[not] identifier[use_sudo] :
keyword[return] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] )
keyword[else] :
identifier[func] = identifier[use_sudo] keyword[and] identifier[_sudo] keyword[or] identifier[_run]
keyword[with] identifier[self] . identifier[settings] ( identifier[hide] ( literal[string] , literal[string] ), identifier[warn_only] = keyword[True] ):
keyword[return] identifier[func] ( literal[string] % identifier[locals] ()). identifier[succeeded] | def is_file(self, path, use_sudo=False):
"""
Check if a path exists, and is a file.
"""
if self.is_local and (not use_sudo):
return os.path.isfile(path) # depends on [control=['if'], data=[]]
else:
func = use_sudo and _sudo or _run
with self.settings(hide('running', 'warnings'), warn_only=True):
return func('[ -f "%(path)s" ]' % locals()).succeeded # depends on [control=['with'], data=[]] |
def lu_solve(lower_upper, perm, rhs,
validate_args=False,
name=None):
"""Solves systems of linear eqns `A X = RHS`, given LU factorizations.
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if
`matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if
`matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`.
rhs: Matrix-shaped float `Tensor` representing targets for which to solve;
`A X = RHS`. To handle vector cases, use:
`lu_solve(..., rhs[..., tf.newaxis])[..., 0]`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., "lu_solve").
Returns:
x: The `X` in `A @ X = RHS`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[1., 2],
[3, 4]],
[[7, 8],
[3, 4]]]
inv_x = tfp.math.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with tf.compat.v1.name_scope(name, 'lu_solve', [lower_upper, perm, rhs]):
lower_upper = tf.convert_to_tensor(
value=lower_upper, dtype_hint=tf.float32, name='lower_upper')
perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm')
rhs = tf.convert_to_tensor(
value=rhs, dtype_hint=lower_upper.dtype, name='rhs')
assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)
if assertions:
with tf.control_dependencies(assertions):
lower_upper = tf.identity(lower_upper)
perm = tf.identity(perm)
rhs = tf.identity(rhs)
if rhs.shape.ndims == 2 and perm.shape.ndims == 1:
# Both rhs and perm have scalar batch_shape.
permuted_rhs = tf.gather(rhs, perm, axis=-2)
else:
# Either rhs or perm have non-scalar batch_shape or we can't determine
# this information statically.
rhs_shape = tf.shape(input=rhs)
broadcast_batch_shape = tf.broadcast_dynamic_shape(
rhs_shape[:-2],
tf.shape(input=perm)[:-1])
d, m = rhs_shape[-2], rhs_shape[-1]
rhs_broadcast_shape = tf.concat([broadcast_batch_shape, [d, m]], axis=0)
# Tile out rhs.
broadcast_rhs = tf.broadcast_to(rhs, rhs_broadcast_shape)
broadcast_rhs = tf.reshape(broadcast_rhs, [-1, d, m])
# Tile out perm and add batch indices.
broadcast_perm = tf.broadcast_to(perm, rhs_broadcast_shape[:-1])
broadcast_perm = tf.reshape(broadcast_perm, [-1, d])
broadcast_batch_size = tf.reduce_prod(input_tensor=broadcast_batch_shape)
broadcast_batch_indices = tf.broadcast_to(
tf.range(broadcast_batch_size)[:, tf.newaxis],
[broadcast_batch_size, d])
broadcast_perm = tf.stack([broadcast_batch_indices, broadcast_perm],
axis=-1)
permuted_rhs = tf.gather_nd(broadcast_rhs, broadcast_perm)
permuted_rhs = tf.reshape(permuted_rhs, rhs_broadcast_shape)
lower = tf.linalg.set_diag(
tf.linalg.band_part(lower_upper, num_lower=-1, num_upper=0),
tf.ones(tf.shape(input=lower_upper)[:-1], dtype=lower_upper.dtype))
return linear_operator_util.matrix_triangular_solve_with_broadcast(
lower_upper, # Only upper is accessed.
linear_operator_util.matrix_triangular_solve_with_broadcast(
lower, permuted_rhs),
lower=False) | def function[lu_solve, parameter[lower_upper, perm, rhs, validate_args, name]]:
constant[Solves systems of linear eqns `A X = RHS`, given LU factorizations.
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if
`matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if
`matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`.
rhs: Matrix-shaped float `Tensor` representing targets for which to solve;
`A X = RHS`. To handle vector cases, use:
`lu_solve(..., rhs[..., tf.newaxis])[..., 0]`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., "lu_solve").
Returns:
x: The `X` in `A @ X = RHS`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[1., 2],
[3, 4]],
[[7, 8],
[3, 4]]]
inv_x = tfp.math.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
]
with call[name[tf].compat.v1.name_scope, parameter[name[name], constant[lu_solve], list[[<ast.Name object at 0x7da1b03b9810>, <ast.Name object at 0x7da1b03b95a0>, <ast.Name object at 0x7da1b03babf0>]]]] begin[:]
variable[lower_upper] assign[=] call[name[tf].convert_to_tensor, parameter[]]
variable[perm] assign[=] call[name[tf].convert_to_tensor, parameter[]]
variable[rhs] assign[=] call[name[tf].convert_to_tensor, parameter[]]
variable[assertions] assign[=] call[name[_lu_solve_assertions], parameter[name[lower_upper], name[perm], name[rhs], name[validate_args]]]
if name[assertions] begin[:]
with call[name[tf].control_dependencies, parameter[name[assertions]]] begin[:]
variable[lower_upper] assign[=] call[name[tf].identity, parameter[name[lower_upper]]]
variable[perm] assign[=] call[name[tf].identity, parameter[name[perm]]]
variable[rhs] assign[=] call[name[tf].identity, parameter[name[rhs]]]
if <ast.BoolOp object at 0x7da20c6e5d20> begin[:]
variable[permuted_rhs] assign[=] call[name[tf].gather, parameter[name[rhs], name[perm]]]
variable[lower] assign[=] call[name[tf].linalg.set_diag, parameter[call[name[tf].linalg.band_part, parameter[name[lower_upper]]], call[name[tf].ones, parameter[call[call[name[tf].shape, parameter[]]][<ast.Slice object at 0x7da1b0354190>]]]]]
return[call[name[linear_operator_util].matrix_triangular_solve_with_broadcast, parameter[name[lower_upper], call[name[linear_operator_util].matrix_triangular_solve_with_broadcast, parameter[name[lower], name[permuted_rhs]]]]]] | keyword[def] identifier[lu_solve] ( identifier[lower_upper] , identifier[perm] , identifier[rhs] ,
identifier[validate_args] = keyword[False] ,
identifier[name] = keyword[None] ):
literal[string]
keyword[with] identifier[tf] . identifier[compat] . identifier[v1] . identifier[name_scope] ( identifier[name] , literal[string] ,[ identifier[lower_upper] , identifier[perm] , identifier[rhs] ]):
identifier[lower_upper] = identifier[tf] . identifier[convert_to_tensor] (
identifier[value] = identifier[lower_upper] , identifier[dtype_hint] = identifier[tf] . identifier[float32] , identifier[name] = literal[string] )
identifier[perm] = identifier[tf] . identifier[convert_to_tensor] ( identifier[value] = identifier[perm] , identifier[dtype_hint] = identifier[tf] . identifier[int32] , identifier[name] = literal[string] )
identifier[rhs] = identifier[tf] . identifier[convert_to_tensor] (
identifier[value] = identifier[rhs] , identifier[dtype_hint] = identifier[lower_upper] . identifier[dtype] , identifier[name] = literal[string] )
identifier[assertions] = identifier[_lu_solve_assertions] ( identifier[lower_upper] , identifier[perm] , identifier[rhs] , identifier[validate_args] )
keyword[if] identifier[assertions] :
keyword[with] identifier[tf] . identifier[control_dependencies] ( identifier[assertions] ):
identifier[lower_upper] = identifier[tf] . identifier[identity] ( identifier[lower_upper] )
identifier[perm] = identifier[tf] . identifier[identity] ( identifier[perm] )
identifier[rhs] = identifier[tf] . identifier[identity] ( identifier[rhs] )
keyword[if] identifier[rhs] . identifier[shape] . identifier[ndims] == literal[int] keyword[and] identifier[perm] . identifier[shape] . identifier[ndims] == literal[int] :
identifier[permuted_rhs] = identifier[tf] . identifier[gather] ( identifier[rhs] , identifier[perm] , identifier[axis] =- literal[int] )
keyword[else] :
identifier[rhs_shape] = identifier[tf] . identifier[shape] ( identifier[input] = identifier[rhs] )
identifier[broadcast_batch_shape] = identifier[tf] . identifier[broadcast_dynamic_shape] (
identifier[rhs_shape] [:- literal[int] ],
identifier[tf] . identifier[shape] ( identifier[input] = identifier[perm] )[:- literal[int] ])
identifier[d] , identifier[m] = identifier[rhs_shape] [- literal[int] ], identifier[rhs_shape] [- literal[int] ]
identifier[rhs_broadcast_shape] = identifier[tf] . identifier[concat] ([ identifier[broadcast_batch_shape] ,[ identifier[d] , identifier[m] ]], identifier[axis] = literal[int] )
identifier[broadcast_rhs] = identifier[tf] . identifier[broadcast_to] ( identifier[rhs] , identifier[rhs_broadcast_shape] )
identifier[broadcast_rhs] = identifier[tf] . identifier[reshape] ( identifier[broadcast_rhs] ,[- literal[int] , identifier[d] , identifier[m] ])
identifier[broadcast_perm] = identifier[tf] . identifier[broadcast_to] ( identifier[perm] , identifier[rhs_broadcast_shape] [:- literal[int] ])
identifier[broadcast_perm] = identifier[tf] . identifier[reshape] ( identifier[broadcast_perm] ,[- literal[int] , identifier[d] ])
identifier[broadcast_batch_size] = identifier[tf] . identifier[reduce_prod] ( identifier[input_tensor] = identifier[broadcast_batch_shape] )
identifier[broadcast_batch_indices] = identifier[tf] . identifier[broadcast_to] (
identifier[tf] . identifier[range] ( identifier[broadcast_batch_size] )[:, identifier[tf] . identifier[newaxis] ],
[ identifier[broadcast_batch_size] , identifier[d] ])
identifier[broadcast_perm] = identifier[tf] . identifier[stack] ([ identifier[broadcast_batch_indices] , identifier[broadcast_perm] ],
identifier[axis] =- literal[int] )
identifier[permuted_rhs] = identifier[tf] . identifier[gather_nd] ( identifier[broadcast_rhs] , identifier[broadcast_perm] )
identifier[permuted_rhs] = identifier[tf] . identifier[reshape] ( identifier[permuted_rhs] , identifier[rhs_broadcast_shape] )
identifier[lower] = identifier[tf] . identifier[linalg] . identifier[set_diag] (
identifier[tf] . identifier[linalg] . identifier[band_part] ( identifier[lower_upper] , identifier[num_lower] =- literal[int] , identifier[num_upper] = literal[int] ),
identifier[tf] . identifier[ones] ( identifier[tf] . identifier[shape] ( identifier[input] = identifier[lower_upper] )[:- literal[int] ], identifier[dtype] = identifier[lower_upper] . identifier[dtype] ))
keyword[return] identifier[linear_operator_util] . identifier[matrix_triangular_solve_with_broadcast] (
identifier[lower_upper] ,
identifier[linear_operator_util] . identifier[matrix_triangular_solve_with_broadcast] (
identifier[lower] , identifier[permuted_rhs] ),
identifier[lower] = keyword[False] ) | def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None):
"""Solves systems of linear eqns `A X = RHS`, given LU factorizations.
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if
`matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if
`matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`.
rhs: Matrix-shaped float `Tensor` representing targets for which to solve;
`A X = RHS`. To handle vector cases, use:
`lu_solve(..., rhs[..., tf.newaxis])[..., 0]`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., "lu_solve").
Returns:
x: The `X` in `A @ X = RHS`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[1., 2],
[3, 4]],
[[7, 8],
[3, 4]]]
inv_x = tfp.math.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with tf.compat.v1.name_scope(name, 'lu_solve', [lower_upper, perm, rhs]):
lower_upper = tf.convert_to_tensor(value=lower_upper, dtype_hint=tf.float32, name='lower_upper')
perm = tf.convert_to_tensor(value=perm, dtype_hint=tf.int32, name='perm')
rhs = tf.convert_to_tensor(value=rhs, dtype_hint=lower_upper.dtype, name='rhs')
assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)
if assertions:
with tf.control_dependencies(assertions):
lower_upper = tf.identity(lower_upper)
perm = tf.identity(perm)
rhs = tf.identity(rhs) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
if rhs.shape.ndims == 2 and perm.shape.ndims == 1:
# Both rhs and perm have scalar batch_shape.
permuted_rhs = tf.gather(rhs, perm, axis=-2) # depends on [control=['if'], data=[]]
else:
# Either rhs or perm have non-scalar batch_shape or we can't determine
# this information statically.
rhs_shape = tf.shape(input=rhs)
broadcast_batch_shape = tf.broadcast_dynamic_shape(rhs_shape[:-2], tf.shape(input=perm)[:-1])
(d, m) = (rhs_shape[-2], rhs_shape[-1])
rhs_broadcast_shape = tf.concat([broadcast_batch_shape, [d, m]], axis=0)
# Tile out rhs.
broadcast_rhs = tf.broadcast_to(rhs, rhs_broadcast_shape)
broadcast_rhs = tf.reshape(broadcast_rhs, [-1, d, m])
# Tile out perm and add batch indices.
broadcast_perm = tf.broadcast_to(perm, rhs_broadcast_shape[:-1])
broadcast_perm = tf.reshape(broadcast_perm, [-1, d])
broadcast_batch_size = tf.reduce_prod(input_tensor=broadcast_batch_shape)
broadcast_batch_indices = tf.broadcast_to(tf.range(broadcast_batch_size)[:, tf.newaxis], [broadcast_batch_size, d])
broadcast_perm = tf.stack([broadcast_batch_indices, broadcast_perm], axis=-1)
permuted_rhs = tf.gather_nd(broadcast_rhs, broadcast_perm)
permuted_rhs = tf.reshape(permuted_rhs, rhs_broadcast_shape)
lower = tf.linalg.set_diag(tf.linalg.band_part(lower_upper, num_lower=-1, num_upper=0), tf.ones(tf.shape(input=lower_upper)[:-1], dtype=lower_upper.dtype)) # Only upper is accessed.
return linear_operator_util.matrix_triangular_solve_with_broadcast(lower_upper, linear_operator_util.matrix_triangular_solve_with_broadcast(lower, permuted_rhs), lower=False) # depends on [control=['with'], data=[]] |
def start(self, host, nornir):
"""
Run the task for the given host.
Arguments:
host (:obj:`nornir.core.inventory.Host`): Host we are operating with. Populated right
before calling the ``task``
nornir(:obj:`nornir.core.Nornir`): Populated right before calling
the ``task``
Returns:
host (:obj:`nornir.core.task.MultiResult`): Results of the task and its subtasks
"""
self.host = host
self.nornir = nornir
try:
logger.debug("Host %r: running task %r", self.host.name, self.name)
r = self.task(self, **self.params)
if not isinstance(r, Result):
r = Result(host=host, result=r)
except NornirSubTaskError as e:
tb = traceback.format_exc()
logger.error(
"Host %r: task %r failed with traceback:\n%s",
self.host.name,
self.name,
tb,
)
r = Result(host, exception=e, result=str(e), failed=True)
except Exception as e:
tb = traceback.format_exc()
logger.error(
"Host %r: task %r failed with traceback:\n%s",
self.host.name,
self.name,
tb,
)
r = Result(host, exception=e, result=tb, failed=True)
r.name = self.name
r.severity_level = logging.ERROR if r.failed else self.severity_level
self.results.insert(0, r)
return self.results | def function[start, parameter[self, host, nornir]]:
constant[
Run the task for the given host.
Arguments:
host (:obj:`nornir.core.inventory.Host`): Host we are operating with. Populated right
before calling the ``task``
nornir(:obj:`nornir.core.Nornir`): Populated right before calling
the ``task``
Returns:
host (:obj:`nornir.core.task.MultiResult`): Results of the task and its subtasks
]
name[self].host assign[=] name[host]
name[self].nornir assign[=] name[nornir]
<ast.Try object at 0x7da204622230>
name[r].name assign[=] name[self].name
name[r].severity_level assign[=] <ast.IfExp object at 0x7da1b1ce5b40>
call[name[self].results.insert, parameter[constant[0], name[r]]]
return[name[self].results] | keyword[def] identifier[start] ( identifier[self] , identifier[host] , identifier[nornir] ):
literal[string]
identifier[self] . identifier[host] = identifier[host]
identifier[self] . identifier[nornir] = identifier[nornir]
keyword[try] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[host] . identifier[name] , identifier[self] . identifier[name] )
identifier[r] = identifier[self] . identifier[task] ( identifier[self] ,** identifier[self] . identifier[params] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[r] , identifier[Result] ):
identifier[r] = identifier[Result] ( identifier[host] = identifier[host] , identifier[result] = identifier[r] )
keyword[except] identifier[NornirSubTaskError] keyword[as] identifier[e] :
identifier[tb] = identifier[traceback] . identifier[format_exc] ()
identifier[logger] . identifier[error] (
literal[string] ,
identifier[self] . identifier[host] . identifier[name] ,
identifier[self] . identifier[name] ,
identifier[tb] ,
)
identifier[r] = identifier[Result] ( identifier[host] , identifier[exception] = identifier[e] , identifier[result] = identifier[str] ( identifier[e] ), identifier[failed] = keyword[True] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[tb] = identifier[traceback] . identifier[format_exc] ()
identifier[logger] . identifier[error] (
literal[string] ,
identifier[self] . identifier[host] . identifier[name] ,
identifier[self] . identifier[name] ,
identifier[tb] ,
)
identifier[r] = identifier[Result] ( identifier[host] , identifier[exception] = identifier[e] , identifier[result] = identifier[tb] , identifier[failed] = keyword[True] )
identifier[r] . identifier[name] = identifier[self] . identifier[name]
identifier[r] . identifier[severity_level] = identifier[logging] . identifier[ERROR] keyword[if] identifier[r] . identifier[failed] keyword[else] identifier[self] . identifier[severity_level]
identifier[self] . identifier[results] . identifier[insert] ( literal[int] , identifier[r] )
keyword[return] identifier[self] . identifier[results] | def start(self, host, nornir):
"""
Run the task for the given host.
Arguments:
host (:obj:`nornir.core.inventory.Host`): Host we are operating with. Populated right
before calling the ``task``
nornir(:obj:`nornir.core.Nornir`): Populated right before calling
the ``task``
Returns:
host (:obj:`nornir.core.task.MultiResult`): Results of the task and its subtasks
"""
self.host = host
self.nornir = nornir
try:
logger.debug('Host %r: running task %r', self.host.name, self.name)
r = self.task(self, **self.params)
if not isinstance(r, Result):
r = Result(host=host, result=r) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except NornirSubTaskError as e:
tb = traceback.format_exc()
logger.error('Host %r: task %r failed with traceback:\n%s', self.host.name, self.name, tb)
r = Result(host, exception=e, result=str(e), failed=True) # depends on [control=['except'], data=['e']]
except Exception as e:
tb = traceback.format_exc()
logger.error('Host %r: task %r failed with traceback:\n%s', self.host.name, self.name, tb)
r = Result(host, exception=e, result=tb, failed=True) # depends on [control=['except'], data=['e']]
r.name = self.name
r.severity_level = logging.ERROR if r.failed else self.severity_level
self.results.insert(0, r)
return self.results |
def _parse_commit_response(commit_response_pb):
"""Extract response data from a commit response.
:type commit_response_pb: :class:`.datastore_pb2.CommitResponse`
:param commit_response_pb: The protobuf response from a commit request.
:rtype: tuple
:returns: The pair of the number of index updates and a list of
:class:`.entity_pb2.Key` for each incomplete key
that was completed in the commit.
"""
mut_results = commit_response_pb.mutation_results
index_updates = commit_response_pb.index_updates
completed_keys = [
mut_result.key for mut_result in mut_results if mut_result.HasField("key")
] # Message field (Key)
return index_updates, completed_keys | def function[_parse_commit_response, parameter[commit_response_pb]]:
constant[Extract response data from a commit response.
:type commit_response_pb: :class:`.datastore_pb2.CommitResponse`
:param commit_response_pb: The protobuf response from a commit request.
:rtype: tuple
:returns: The pair of the number of index updates and a list of
:class:`.entity_pb2.Key` for each incomplete key
that was completed in the commit.
]
variable[mut_results] assign[=] name[commit_response_pb].mutation_results
variable[index_updates] assign[=] name[commit_response_pb].index_updates
variable[completed_keys] assign[=] <ast.ListComp object at 0x7da207f00eb0>
return[tuple[[<ast.Name object at 0x7da207f02a10>, <ast.Name object at 0x7da207f03310>]]] | keyword[def] identifier[_parse_commit_response] ( identifier[commit_response_pb] ):
literal[string]
identifier[mut_results] = identifier[commit_response_pb] . identifier[mutation_results]
identifier[index_updates] = identifier[commit_response_pb] . identifier[index_updates]
identifier[completed_keys] =[
identifier[mut_result] . identifier[key] keyword[for] identifier[mut_result] keyword[in] identifier[mut_results] keyword[if] identifier[mut_result] . identifier[HasField] ( literal[string] )
]
keyword[return] identifier[index_updates] , identifier[completed_keys] | def _parse_commit_response(commit_response_pb):
"""Extract response data from a commit response.
:type commit_response_pb: :class:`.datastore_pb2.CommitResponse`
:param commit_response_pb: The protobuf response from a commit request.
:rtype: tuple
:returns: The pair of the number of index updates and a list of
:class:`.entity_pb2.Key` for each incomplete key
that was completed in the commit.
"""
mut_results = commit_response_pb.mutation_results
index_updates = commit_response_pb.index_updates
completed_keys = [mut_result.key for mut_result in mut_results if mut_result.HasField('key')] # Message field (Key)
return (index_updates, completed_keys) |
def load(self, ladderName):
"""retrieve the ladder settings from saved disk file"""
self.name = ladderName # preset value to load self.filename
with open(self.filename, "rb") as f:
data = f.read()
self.__dict__.update( json.loads(data) ) | def function[load, parameter[self, ladderName]]:
constant[retrieve the ladder settings from saved disk file]
name[self].name assign[=] name[ladderName]
with call[name[open], parameter[name[self].filename, constant[rb]]] begin[:]
variable[data] assign[=] call[name[f].read, parameter[]]
call[name[self].__dict__.update, parameter[call[name[json].loads, parameter[name[data]]]]] | keyword[def] identifier[load] ( identifier[self] , identifier[ladderName] ):
literal[string]
identifier[self] . identifier[name] = identifier[ladderName]
keyword[with] identifier[open] ( identifier[self] . identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[data] = identifier[f] . identifier[read] ()
identifier[self] . identifier[__dict__] . identifier[update] ( identifier[json] . identifier[loads] ( identifier[data] )) | def load(self, ladderName):
"""retrieve the ladder settings from saved disk file"""
self.name = ladderName # preset value to load self.filename
with open(self.filename, 'rb') as f:
data = f.read()
self.__dict__.update(json.loads(data)) # depends on [control=['with'], data=['f']] |
def _qqplot_bar(M=1000000, alphaLevel = 0.05, distr = 'log10'):
"""calculate theoretical expectations for qqplot"""
mRange=10**(sp.arange(sp.log10(0.5),sp.log10(M-0.5)+0.1,0.1));#should be exp or 10**?
numPts=len(mRange);
betaalphaLevel=sp.zeros(numPts);#down in the plot
betaOneMinusalphaLevel=sp.zeros(numPts);#up in the plot
betaInvHalf=sp.zeros(numPts);
for n in range(numPts):
m=mRange[n]; #numPLessThanThresh=m;
betaInvHalf[n]=st.beta.ppf(0.5,m,M-m);
betaalphaLevel[n]=st.beta.ppf(alphaLevel,m,M-m);
betaOneMinusalphaLevel[n]=st.beta.ppf(1-alphaLevel,m,M-m);
betaDown=betaInvHalf-betaalphaLevel;
betaUp=betaOneMinusalphaLevel-betaInvHalf;
theoreticalPvals=mRange/M;
return betaUp, betaDown, theoreticalPvals | def function[_qqplot_bar, parameter[M, alphaLevel, distr]]:
constant[calculate theoretical expectations for qqplot]
variable[mRange] assign[=] binary_operation[constant[10] ** call[name[sp].arange, parameter[call[name[sp].log10, parameter[constant[0.5]]], binary_operation[call[name[sp].log10, parameter[binary_operation[name[M] - constant[0.5]]]] + constant[0.1]], constant[0.1]]]]
variable[numPts] assign[=] call[name[len], parameter[name[mRange]]]
variable[betaalphaLevel] assign[=] call[name[sp].zeros, parameter[name[numPts]]]
variable[betaOneMinusalphaLevel] assign[=] call[name[sp].zeros, parameter[name[numPts]]]
variable[betaInvHalf] assign[=] call[name[sp].zeros, parameter[name[numPts]]]
for taget[name[n]] in starred[call[name[range], parameter[name[numPts]]]] begin[:]
variable[m] assign[=] call[name[mRange]][name[n]]
call[name[betaInvHalf]][name[n]] assign[=] call[name[st].beta.ppf, parameter[constant[0.5], name[m], binary_operation[name[M] - name[m]]]]
call[name[betaalphaLevel]][name[n]] assign[=] call[name[st].beta.ppf, parameter[name[alphaLevel], name[m], binary_operation[name[M] - name[m]]]]
call[name[betaOneMinusalphaLevel]][name[n]] assign[=] call[name[st].beta.ppf, parameter[binary_operation[constant[1] - name[alphaLevel]], name[m], binary_operation[name[M] - name[m]]]]
variable[betaDown] assign[=] binary_operation[name[betaInvHalf] - name[betaalphaLevel]]
variable[betaUp] assign[=] binary_operation[name[betaOneMinusalphaLevel] - name[betaInvHalf]]
variable[theoreticalPvals] assign[=] binary_operation[name[mRange] / name[M]]
return[tuple[[<ast.Name object at 0x7da18bc71420>, <ast.Name object at 0x7da18bc72bf0>, <ast.Name object at 0x7da18bc70eb0>]]] | keyword[def] identifier[_qqplot_bar] ( identifier[M] = literal[int] , identifier[alphaLevel] = literal[int] , identifier[distr] = literal[string] ):
literal[string]
identifier[mRange] = literal[int] **( identifier[sp] . identifier[arange] ( identifier[sp] . identifier[log10] ( literal[int] ), identifier[sp] . identifier[log10] ( identifier[M] - literal[int] )+ literal[int] , literal[int] ));
identifier[numPts] = identifier[len] ( identifier[mRange] );
identifier[betaalphaLevel] = identifier[sp] . identifier[zeros] ( identifier[numPts] );
identifier[betaOneMinusalphaLevel] = identifier[sp] . identifier[zeros] ( identifier[numPts] );
identifier[betaInvHalf] = identifier[sp] . identifier[zeros] ( identifier[numPts] );
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[numPts] ):
identifier[m] = identifier[mRange] [ identifier[n] ];
identifier[betaInvHalf] [ identifier[n] ]= identifier[st] . identifier[beta] . identifier[ppf] ( literal[int] , identifier[m] , identifier[M] - identifier[m] );
identifier[betaalphaLevel] [ identifier[n] ]= identifier[st] . identifier[beta] . identifier[ppf] ( identifier[alphaLevel] , identifier[m] , identifier[M] - identifier[m] );
identifier[betaOneMinusalphaLevel] [ identifier[n] ]= identifier[st] . identifier[beta] . identifier[ppf] ( literal[int] - identifier[alphaLevel] , identifier[m] , identifier[M] - identifier[m] );
identifier[betaDown] = identifier[betaInvHalf] - identifier[betaalphaLevel] ;
identifier[betaUp] = identifier[betaOneMinusalphaLevel] - identifier[betaInvHalf] ;
identifier[theoreticalPvals] = identifier[mRange] / identifier[M] ;
keyword[return] identifier[betaUp] , identifier[betaDown] , identifier[theoreticalPvals] | def _qqplot_bar(M=1000000, alphaLevel=0.05, distr='log10'):
"""calculate theoretical expectations for qqplot"""
mRange = 10 ** sp.arange(sp.log10(0.5), sp.log10(M - 0.5) + 0.1, 0.1) #should be exp or 10**?
numPts = len(mRange)
betaalphaLevel = sp.zeros(numPts) #down in the plot
betaOneMinusalphaLevel = sp.zeros(numPts) #up in the plot
betaInvHalf = sp.zeros(numPts)
for n in range(numPts):
m = mRange[n] #numPLessThanThresh=m;
betaInvHalf[n] = st.beta.ppf(0.5, m, M - m)
betaalphaLevel[n] = st.beta.ppf(alphaLevel, m, M - m)
betaOneMinusalphaLevel[n] = st.beta.ppf(1 - alphaLevel, m, M - m) # depends on [control=['for'], data=['n']]
betaDown = betaInvHalf - betaalphaLevel
betaUp = betaOneMinusalphaLevel - betaInvHalf
theoreticalPvals = mRange / M
return (betaUp, betaDown, theoreticalPvals) |
def parse_signature(self, statement, element, module=None):
"""Parses the specified line as a new version of the signature for 'element'.
:arg statement: the string that has the new signature.
:arg element: the code element whose signature will be changed.
"""
#If the signature changes, the user might not have had a chance to add the
#detailed member information for it yet. Here
#we will just update the modifiers and attributes. Also, since all the mods
#etc. will be overwritten, we don't need to handle replace separately.
smatch = self.RE_SIG.match(statement)
result = (None, None, None)
eresult = None
if smatch is not None:
name = smatch.group("name").strip()
modifiers = smatch.group("modifiers") or []
codetype = smatch.group("codetype")
#If the exec is a function, we also may have a type and kind specified.
if codetype.lower() == "function":
dtype = smatch.group("type")
kind = smatch.group("kind")
if module is None:
element.update(name, modifiers, dtype, kind)
else:
eresult = Function(name, modifiers, dtype, kind, module)
else:
if module is None:
element.update(name, modifiers)
else:
eresult = Subroutine(name, modifiers, module)
#The parameter sets are actually driven by the body of the executable
#rather than the call signature. However, the declarations will be
#interpreted as members if we don't add the parameters to the ordered
#list of parameter names. Overwrite that list with the new names.
params = re.split("[\s,]+", smatch.group("parameters").lower())
if eresult is None:
element.paramorder = params
else:
eresult.paramorder = params
result = (eresult, smatch.start(), smatch.end())
return result | def function[parse_signature, parameter[self, statement, element, module]]:
constant[Parses the specified line as a new version of the signature for 'element'.
:arg statement: the string that has the new signature.
:arg element: the code element whose signature will be changed.
]
variable[smatch] assign[=] call[name[self].RE_SIG.match, parameter[name[statement]]]
variable[result] assign[=] tuple[[<ast.Constant object at 0x7da1b26f4580>, <ast.Constant object at 0x7da1b26f78e0>, <ast.Constant object at 0x7da1b26f6320>]]
variable[eresult] assign[=] constant[None]
if compare[name[smatch] is_not constant[None]] begin[:]
variable[name] assign[=] call[call[name[smatch].group, parameter[constant[name]]].strip, parameter[]]
variable[modifiers] assign[=] <ast.BoolOp object at 0x7da1b26f4550>
variable[codetype] assign[=] call[name[smatch].group, parameter[constant[codetype]]]
if compare[call[name[codetype].lower, parameter[]] equal[==] constant[function]] begin[:]
variable[dtype] assign[=] call[name[smatch].group, parameter[constant[type]]]
variable[kind] assign[=] call[name[smatch].group, parameter[constant[kind]]]
if compare[name[module] is constant[None]] begin[:]
call[name[element].update, parameter[name[name], name[modifiers], name[dtype], name[kind]]]
variable[params] assign[=] call[name[re].split, parameter[constant[[\s,]+], call[call[name[smatch].group, parameter[constant[parameters]]].lower, parameter[]]]]
if compare[name[eresult] is constant[None]] begin[:]
name[element].paramorder assign[=] name[params]
variable[result] assign[=] tuple[[<ast.Name object at 0x7da1b28f6170>, <ast.Call object at 0x7da1b28f6140>, <ast.Call object at 0x7da1b28f60b0>]]
return[name[result]] | keyword[def] identifier[parse_signature] ( identifier[self] , identifier[statement] , identifier[element] , identifier[module] = keyword[None] ):
literal[string]
identifier[smatch] = identifier[self] . identifier[RE_SIG] . identifier[match] ( identifier[statement] )
identifier[result] =( keyword[None] , keyword[None] , keyword[None] )
identifier[eresult] = keyword[None]
keyword[if] identifier[smatch] keyword[is] keyword[not] keyword[None] :
identifier[name] = identifier[smatch] . identifier[group] ( literal[string] ). identifier[strip] ()
identifier[modifiers] = identifier[smatch] . identifier[group] ( literal[string] ) keyword[or] []
identifier[codetype] = identifier[smatch] . identifier[group] ( literal[string] )
keyword[if] identifier[codetype] . identifier[lower] ()== literal[string] :
identifier[dtype] = identifier[smatch] . identifier[group] ( literal[string] )
identifier[kind] = identifier[smatch] . identifier[group] ( literal[string] )
keyword[if] identifier[module] keyword[is] keyword[None] :
identifier[element] . identifier[update] ( identifier[name] , identifier[modifiers] , identifier[dtype] , identifier[kind] )
keyword[else] :
identifier[eresult] = identifier[Function] ( identifier[name] , identifier[modifiers] , identifier[dtype] , identifier[kind] , identifier[module] )
keyword[else] :
keyword[if] identifier[module] keyword[is] keyword[None] :
identifier[element] . identifier[update] ( identifier[name] , identifier[modifiers] )
keyword[else] :
identifier[eresult] = identifier[Subroutine] ( identifier[name] , identifier[modifiers] , identifier[module] )
identifier[params] = identifier[re] . identifier[split] ( literal[string] , identifier[smatch] . identifier[group] ( literal[string] ). identifier[lower] ())
keyword[if] identifier[eresult] keyword[is] keyword[None] :
identifier[element] . identifier[paramorder] = identifier[params]
keyword[else] :
identifier[eresult] . identifier[paramorder] = identifier[params]
identifier[result] =( identifier[eresult] , identifier[smatch] . identifier[start] (), identifier[smatch] . identifier[end] ())
keyword[return] identifier[result] | def parse_signature(self, statement, element, module=None):
"""Parses the specified line as a new version of the signature for 'element'.
:arg statement: the string that has the new signature.
:arg element: the code element whose signature will be changed.
"""
#If the signature changes, the user might not have had a chance to add the
#detailed member information for it yet. Here
#we will just update the modifiers and attributes. Also, since all the mods
#etc. will be overwritten, we don't need to handle replace separately.
smatch = self.RE_SIG.match(statement)
result = (None, None, None)
eresult = None
if smatch is not None:
name = smatch.group('name').strip()
modifiers = smatch.group('modifiers') or []
codetype = smatch.group('codetype')
#If the exec is a function, we also may have a type and kind specified.
if codetype.lower() == 'function':
dtype = smatch.group('type')
kind = smatch.group('kind')
if module is None:
element.update(name, modifiers, dtype, kind) # depends on [control=['if'], data=[]]
else:
eresult = Function(name, modifiers, dtype, kind, module) # depends on [control=['if'], data=[]]
elif module is None:
element.update(name, modifiers) # depends on [control=['if'], data=[]]
else:
eresult = Subroutine(name, modifiers, module) #The parameter sets are actually driven by the body of the executable
#rather than the call signature. However, the declarations will be
#interpreted as members if we don't add the parameters to the ordered
#list of parameter names. Overwrite that list with the new names.
params = re.split('[\\s,]+', smatch.group('parameters').lower())
if eresult is None:
element.paramorder = params # depends on [control=['if'], data=[]]
else:
eresult.paramorder = params
result = (eresult, smatch.start(), smatch.end()) # depends on [control=['if'], data=['smatch']]
return result |
def _validate(self, val):
"""
Checks that the list is of the right length and has the right contents.
Otherwise, an exception is raised.
"""
if self.allow_None and val is None:
return
if not isinstance(val, list):
raise ValueError("List '%s' must be a list."%(self.name))
if self.bounds is not None:
min_length,max_length = self.bounds
l=len(val)
if min_length is not None and max_length is not None:
if not (min_length <= l <= max_length):
raise ValueError("%s: list length must be between %s and %s (inclusive)"%(self.name,min_length,max_length))
elif min_length is not None:
if not min_length <= l:
raise ValueError("%s: list length must be at least %s."%(self.name,min_length))
elif max_length is not None:
if not l <= max_length:
raise ValueError("%s: list length must be at most %s."%(self.name,max_length))
self._check_type(val) | def function[_validate, parameter[self, val]]:
constant[
Checks that the list is of the right length and has the right contents.
Otherwise, an exception is raised.
]
if <ast.BoolOp object at 0x7da18fe91d50> begin[:]
return[None]
if <ast.UnaryOp object at 0x7da18fe936a0> begin[:]
<ast.Raise object at 0x7da18fe92bf0>
if compare[name[self].bounds is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da18fe907c0> assign[=] name[self].bounds
variable[l] assign[=] call[name[len], parameter[name[val]]]
if <ast.BoolOp object at 0x7da18fe90e50> begin[:]
if <ast.UnaryOp object at 0x7da18fe93250> begin[:]
<ast.Raise object at 0x7da18fe90a00>
call[name[self]._check_type, parameter[name[val]]] | keyword[def] identifier[_validate] ( identifier[self] , identifier[val] ):
literal[string]
keyword[if] identifier[self] . identifier[allow_None] keyword[and] identifier[val] keyword[is] keyword[None] :
keyword[return]
keyword[if] keyword[not] identifier[isinstance] ( identifier[val] , identifier[list] ):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[self] . identifier[name] ))
keyword[if] identifier[self] . identifier[bounds] keyword[is] keyword[not] keyword[None] :
identifier[min_length] , identifier[max_length] = identifier[self] . identifier[bounds]
identifier[l] = identifier[len] ( identifier[val] )
keyword[if] identifier[min_length] keyword[is] keyword[not] keyword[None] keyword[and] identifier[max_length] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] ( identifier[min_length] <= identifier[l] <= identifier[max_length] ):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[self] . identifier[name] , identifier[min_length] , identifier[max_length] ))
keyword[elif] identifier[min_length] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[min_length] <= identifier[l] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[self] . identifier[name] , identifier[min_length] ))
keyword[elif] identifier[max_length] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[l] <= identifier[max_length] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[self] . identifier[name] , identifier[max_length] ))
identifier[self] . identifier[_check_type] ( identifier[val] ) | def _validate(self, val):
"""
Checks that the list is of the right length and has the right contents.
Otherwise, an exception is raised.
"""
if self.allow_None and val is None:
return # depends on [control=['if'], data=[]]
if not isinstance(val, list):
raise ValueError("List '%s' must be a list." % self.name) # depends on [control=['if'], data=[]]
if self.bounds is not None:
(min_length, max_length) = self.bounds
l = len(val)
if min_length is not None and max_length is not None:
if not min_length <= l <= max_length:
raise ValueError('%s: list length must be between %s and %s (inclusive)' % (self.name, min_length, max_length)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif min_length is not None:
if not min_length <= l:
raise ValueError('%s: list length must be at least %s.' % (self.name, min_length)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['min_length']]
elif max_length is not None:
if not l <= max_length:
raise ValueError('%s: list length must be at most %s.' % (self.name, max_length)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['max_length']] # depends on [control=['if'], data=[]]
self._check_type(val) |
def _determine_monetary_account_id(cls, monetary_account_id=None):
"""
:type monetary_account_id: int
:rtype: int
"""
if monetary_account_id is None:
return context.BunqContext.user_context().primary_monetary_account.id_
return monetary_account_id | def function[_determine_monetary_account_id, parameter[cls, monetary_account_id]]:
constant[
:type monetary_account_id: int
:rtype: int
]
if compare[name[monetary_account_id] is constant[None]] begin[:]
return[call[name[context].BunqContext.user_context, parameter[]].primary_monetary_account.id_]
return[name[monetary_account_id]] | keyword[def] identifier[_determine_monetary_account_id] ( identifier[cls] , identifier[monetary_account_id] = keyword[None] ):
literal[string]
keyword[if] identifier[monetary_account_id] keyword[is] keyword[None] :
keyword[return] identifier[context] . identifier[BunqContext] . identifier[user_context] (). identifier[primary_monetary_account] . identifier[id_]
keyword[return] identifier[monetary_account_id] | def _determine_monetary_account_id(cls, monetary_account_id=None):
"""
:type monetary_account_id: int
:rtype: int
"""
if monetary_account_id is None:
return context.BunqContext.user_context().primary_monetary_account.id_ # depends on [control=['if'], data=[]]
return monetary_account_id |
def container_execute(name, cmd, remote_addr=None,
cert=None, key=None, verify_cert=True):
'''
Execute a command list on a container.
name :
Name of the container
cmd :
Command to be executed (as a list)
Example :
'["ls", "-l"]'
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
salt '*' lxd.container_execute <container name> '["ls", "-l"]'
'''
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
try:
result = container.execute(cmd)
saltresult = {}
if not hasattr(result, 'exit_code'):
saltresult = dict(
exit_code=0,
stdout=result[0],
stderr=result[1],
)
else:
saltresult = dict(
exit_code=result.exit_code,
stdout=result.stdout,
stderr=result.stderr,
)
except pylxd.exceptions.NotFound as e:
# TODO: Using exit_code 0 here is not always right,
# in the most cases the command worked ok though.
# See: https://github.com/lxc/pylxd/issues/280
saltresult = dict(exit_code=0, stdout="", stderr=six.text_type(e))
if int(saltresult['exit_code']) > 0:
saltresult['result'] = False
else:
saltresult['result'] = True
return saltresult | def function[container_execute, parameter[name, cmd, remote_addr, cert, key, verify_cert]]:
constant[
Execute a command list on a container.
name :
Name of the container
cmd :
Command to be executed (as a list)
Example :
'["ls", "-l"]'
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
salt '*' lxd.container_execute <container name> '["ls", "-l"]'
]
variable[container] assign[=] call[name[container_get], parameter[name[name], name[remote_addr], name[cert], name[key], name[verify_cert]]]
<ast.Try object at 0x7da20c6a8e80>
if compare[call[name[int], parameter[call[name[saltresult]][constant[exit_code]]]] greater[>] constant[0]] begin[:]
call[name[saltresult]][constant[result]] assign[=] constant[False]
return[name[saltresult]] | keyword[def] identifier[container_execute] ( identifier[name] , identifier[cmd] , identifier[remote_addr] = keyword[None] ,
identifier[cert] = keyword[None] , identifier[key] = keyword[None] , identifier[verify_cert] = keyword[True] ):
literal[string]
identifier[container] = identifier[container_get] (
identifier[name] , identifier[remote_addr] , identifier[cert] , identifier[key] , identifier[verify_cert] , identifier[_raw] = keyword[True]
)
keyword[try] :
identifier[result] = identifier[container] . identifier[execute] ( identifier[cmd] )
identifier[saltresult] ={}
keyword[if] keyword[not] identifier[hasattr] ( identifier[result] , literal[string] ):
identifier[saltresult] = identifier[dict] (
identifier[exit_code] = literal[int] ,
identifier[stdout] = identifier[result] [ literal[int] ],
identifier[stderr] = identifier[result] [ literal[int] ],
)
keyword[else] :
identifier[saltresult] = identifier[dict] (
identifier[exit_code] = identifier[result] . identifier[exit_code] ,
identifier[stdout] = identifier[result] . identifier[stdout] ,
identifier[stderr] = identifier[result] . identifier[stderr] ,
)
keyword[except] identifier[pylxd] . identifier[exceptions] . identifier[NotFound] keyword[as] identifier[e] :
identifier[saltresult] = identifier[dict] ( identifier[exit_code] = literal[int] , identifier[stdout] = literal[string] , identifier[stderr] = identifier[six] . identifier[text_type] ( identifier[e] ))
keyword[if] identifier[int] ( identifier[saltresult] [ literal[string] ])> literal[int] :
identifier[saltresult] [ literal[string] ]= keyword[False]
keyword[else] :
identifier[saltresult] [ literal[string] ]= keyword[True]
keyword[return] identifier[saltresult] | def container_execute(name, cmd, remote_addr=None, cert=None, key=None, verify_cert=True):
"""
Execute a command list on a container.
name :
Name of the container
cmd :
Command to be executed (as a list)
Example :
'["ls", "-l"]'
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
salt '*' lxd.container_execute <container name> '["ls", "-l"]'
"""
container = container_get(name, remote_addr, cert, key, verify_cert, _raw=True)
try:
result = container.execute(cmd)
saltresult = {}
if not hasattr(result, 'exit_code'):
saltresult = dict(exit_code=0, stdout=result[0], stderr=result[1]) # depends on [control=['if'], data=[]]
else:
saltresult = dict(exit_code=result.exit_code, stdout=result.stdout, stderr=result.stderr) # depends on [control=['try'], data=[]]
except pylxd.exceptions.NotFound as e:
# TODO: Using exit_code 0 here is not always right,
# in the most cases the command worked ok though.
# See: https://github.com/lxc/pylxd/issues/280
saltresult = dict(exit_code=0, stdout='', stderr=six.text_type(e)) # depends on [control=['except'], data=['e']]
if int(saltresult['exit_code']) > 0:
saltresult['result'] = False # depends on [control=['if'], data=[]]
else:
saltresult['result'] = True
return saltresult |
def pending(self, start='-', stop='+', count=1000, consumer=None):
"""
List pending messages within the consumer group for this stream.
:param start: start id (or '-' for oldest pending)
:param stop: stop id (or '+' for newest pending)
:param count: limit number of messages returned
:param consumer: restrict message list to the given consumer
:returns: A list containing status for each pending message. Each
pending message returns [id, consumer, idle time, deliveries].
"""
return self.database.xpending_range(self.key, self.group, start, stop,
count, consumer) | def function[pending, parameter[self, start, stop, count, consumer]]:
constant[
List pending messages within the consumer group for this stream.
:param start: start id (or '-' for oldest pending)
:param stop: stop id (or '+' for newest pending)
:param count: limit number of messages returned
:param consumer: restrict message list to the given consumer
:returns: A list containing status for each pending message. Each
pending message returns [id, consumer, idle time, deliveries].
]
return[call[name[self].database.xpending_range, parameter[name[self].key, name[self].group, name[start], name[stop], name[count], name[consumer]]]] | keyword[def] identifier[pending] ( identifier[self] , identifier[start] = literal[string] , identifier[stop] = literal[string] , identifier[count] = literal[int] , identifier[consumer] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[database] . identifier[xpending_range] ( identifier[self] . identifier[key] , identifier[self] . identifier[group] , identifier[start] , identifier[stop] ,
identifier[count] , identifier[consumer] ) | def pending(self, start='-', stop='+', count=1000, consumer=None):
"""
List pending messages within the consumer group for this stream.
:param start: start id (or '-' for oldest pending)
:param stop: stop id (or '+' for newest pending)
:param count: limit number of messages returned
:param consumer: restrict message list to the given consumer
:returns: A list containing status for each pending message. Each
pending message returns [id, consumer, idle time, deliveries].
"""
return self.database.xpending_range(self.key, self.group, start, stop, count, consumer) |
def generate_header(self, newer_tag_name, newer_tag_link,
newer_tag_time,
older_tag_link, project_url):
"""
Generate a header for a tag section with specific parameters.
:param str newer_tag_name: Name (title) of newer tag.
:param str newer_tag_link: Tag name of newer tag, used for links.
Could be same as **newer_tag_name** or some
specific value, like `HEAD`.
:param datetime newer_tag_time: Date and time when
newer tag was created.
:param str older_tag_link: Tag name of older tag, used for links.
:param str project_url: URL for current project.
:rtype: str
:return: Generated ready-to-add tag section.
"""
log = ""
# Generate date string:
# noinspection PyUnresolvedReferences
time_string = newer_tag_time.strftime(self.options.date_format)
# Generate tag name and link
if self.options.release_url:
release_url = self.options.release_url.format(newer_tag_link)
else:
release_url = u"{project_url}/tree/{newer_tag_link}".format(
project_url=project_url, newer_tag_link=newer_tag_link)
if not self.options.unreleased_with_date and \
newer_tag_name == self.options.unreleased_label:
log += u"## [{newer_tag_name}]({release_url})\n\n".format(
newer_tag_name=newer_tag_name, release_url=release_url)
else:
log += u"## [{newer_tag_name}]({release_url}) " \
u"({time_string})\n".format(
newer_tag_name=newer_tag_name,
release_url=release_url,
time_string=time_string
)
if self.options.compare_link \
and older_tag_link != REPO_CREATED_TAG_NAME:
# Generate compare link
log += u"[Full Changelog]"
log += u"({project_url}/compare/{older_tag_link}".format(
project_url=project_url,
older_tag_link=older_tag_link,
)
log += u"...{newer_tag_link})\n\n".format(
newer_tag_link=newer_tag_link
)
return log | def function[generate_header, parameter[self, newer_tag_name, newer_tag_link, newer_tag_time, older_tag_link, project_url]]:
constant[
Generate a header for a tag section with specific parameters.
:param str newer_tag_name: Name (title) of newer tag.
:param str newer_tag_link: Tag name of newer tag, used for links.
Could be same as **newer_tag_name** or some
specific value, like `HEAD`.
:param datetime newer_tag_time: Date and time when
newer tag was created.
:param str older_tag_link: Tag name of older tag, used for links.
:param str project_url: URL for current project.
:rtype: str
:return: Generated ready-to-add tag section.
]
variable[log] assign[=] constant[]
variable[time_string] assign[=] call[name[newer_tag_time].strftime, parameter[name[self].options.date_format]]
if name[self].options.release_url begin[:]
variable[release_url] assign[=] call[name[self].options.release_url.format, parameter[name[newer_tag_link]]]
if <ast.BoolOp object at 0x7da1b00b7580> begin[:]
<ast.AugAssign object at 0x7da1b00b4670>
if <ast.BoolOp object at 0x7da1b00b5b70> begin[:]
<ast.AugAssign object at 0x7da1b00d9960>
<ast.AugAssign object at 0x7da1b00dbb50>
<ast.AugAssign object at 0x7da1b00d9000>
return[name[log]] | keyword[def] identifier[generate_header] ( identifier[self] , identifier[newer_tag_name] , identifier[newer_tag_link] ,
identifier[newer_tag_time] ,
identifier[older_tag_link] , identifier[project_url] ):
literal[string]
identifier[log] = literal[string]
identifier[time_string] = identifier[newer_tag_time] . identifier[strftime] ( identifier[self] . identifier[options] . identifier[date_format] )
keyword[if] identifier[self] . identifier[options] . identifier[release_url] :
identifier[release_url] = identifier[self] . identifier[options] . identifier[release_url] . identifier[format] ( identifier[newer_tag_link] )
keyword[else] :
identifier[release_url] = literal[string] . identifier[format] (
identifier[project_url] = identifier[project_url] , identifier[newer_tag_link] = identifier[newer_tag_link] )
keyword[if] keyword[not] identifier[self] . identifier[options] . identifier[unreleased_with_date] keyword[and] identifier[newer_tag_name] == identifier[self] . identifier[options] . identifier[unreleased_label] :
identifier[log] += literal[string] . identifier[format] (
identifier[newer_tag_name] = identifier[newer_tag_name] , identifier[release_url] = identifier[release_url] )
keyword[else] :
identifier[log] += literal[string] literal[string] . identifier[format] (
identifier[newer_tag_name] = identifier[newer_tag_name] ,
identifier[release_url] = identifier[release_url] ,
identifier[time_string] = identifier[time_string]
)
keyword[if] identifier[self] . identifier[options] . identifier[compare_link] keyword[and] identifier[older_tag_link] != identifier[REPO_CREATED_TAG_NAME] :
identifier[log] += literal[string]
identifier[log] += literal[string] . identifier[format] (
identifier[project_url] = identifier[project_url] ,
identifier[older_tag_link] = identifier[older_tag_link] ,
)
identifier[log] += literal[string] . identifier[format] (
identifier[newer_tag_link] = identifier[newer_tag_link]
)
keyword[return] identifier[log] | def generate_header(self, newer_tag_name, newer_tag_link, newer_tag_time, older_tag_link, project_url):
"""
Generate a header for a tag section with specific parameters.
:param str newer_tag_name: Name (title) of newer tag.
:param str newer_tag_link: Tag name of newer tag, used for links.
Could be same as **newer_tag_name** or some
specific value, like `HEAD`.
:param datetime newer_tag_time: Date and time when
newer tag was created.
:param str older_tag_link: Tag name of older tag, used for links.
:param str project_url: URL for current project.
:rtype: str
:return: Generated ready-to-add tag section.
"""
log = ''
# Generate date string:
# noinspection PyUnresolvedReferences
time_string = newer_tag_time.strftime(self.options.date_format)
# Generate tag name and link
if self.options.release_url:
release_url = self.options.release_url.format(newer_tag_link) # depends on [control=['if'], data=[]]
else:
release_url = u'{project_url}/tree/{newer_tag_link}'.format(project_url=project_url, newer_tag_link=newer_tag_link)
if not self.options.unreleased_with_date and newer_tag_name == self.options.unreleased_label:
log += u'## [{newer_tag_name}]({release_url})\n\n'.format(newer_tag_name=newer_tag_name, release_url=release_url) # depends on [control=['if'], data=[]]
else:
log += u'## [{newer_tag_name}]({release_url}) ({time_string})\n'.format(newer_tag_name=newer_tag_name, release_url=release_url, time_string=time_string)
if self.options.compare_link and older_tag_link != REPO_CREATED_TAG_NAME:
# Generate compare link
log += u'[Full Changelog]'
log += u'({project_url}/compare/{older_tag_link}'.format(project_url=project_url, older_tag_link=older_tag_link)
log += u'...{newer_tag_link})\n\n'.format(newer_tag_link=newer_tag_link) # depends on [control=['if'], data=[]]
return log |
def _get_variant_silent(parser, variant):
"""Gets a variant from the parser while disabling logging."""
prev_log = config.LOG_NOT_FOUND
config.LOG_NOT_FOUND = False
results = parser.get_variant_genotypes(variant)
config.LOG_NOT_FOUND = prev_log
return results | def function[_get_variant_silent, parameter[parser, variant]]:
constant[Gets a variant from the parser while disabling logging.]
variable[prev_log] assign[=] name[config].LOG_NOT_FOUND
name[config].LOG_NOT_FOUND assign[=] constant[False]
variable[results] assign[=] call[name[parser].get_variant_genotypes, parameter[name[variant]]]
name[config].LOG_NOT_FOUND assign[=] name[prev_log]
return[name[results]] | keyword[def] identifier[_get_variant_silent] ( identifier[parser] , identifier[variant] ):
literal[string]
identifier[prev_log] = identifier[config] . identifier[LOG_NOT_FOUND]
identifier[config] . identifier[LOG_NOT_FOUND] = keyword[False]
identifier[results] = identifier[parser] . identifier[get_variant_genotypes] ( identifier[variant] )
identifier[config] . identifier[LOG_NOT_FOUND] = identifier[prev_log]
keyword[return] identifier[results] | def _get_variant_silent(parser, variant):
"""Gets a variant from the parser while disabling logging."""
prev_log = config.LOG_NOT_FOUND
config.LOG_NOT_FOUND = False
results = parser.get_variant_genotypes(variant)
config.LOG_NOT_FOUND = prev_log
return results |
def safe_rt(resource_type, lower=False):
"""Format the Resource Type.
Takes Custom Indicator types with a space character and return a *safe* string.
(e.g. *User Agent* is converted to User_Agent or user_agent.)
Args:
resource_type (string): The resource type to format.
lower (boolean): Return type in all lower case
Returns:
(string): The formatted resource type.
"""
if resource_type is not None:
resource_type = resource_type.replace(' ', '_')
if lower:
resource_type = resource_type.lower()
return resource_type | def function[safe_rt, parameter[resource_type, lower]]:
constant[Format the Resource Type.
Takes Custom Indicator types with a space character and return a *safe* string.
(e.g. *User Agent* is converted to User_Agent or user_agent.)
Args:
resource_type (string): The resource type to format.
lower (boolean): Return type in all lower case
Returns:
(string): The formatted resource type.
]
if compare[name[resource_type] is_not constant[None]] begin[:]
variable[resource_type] assign[=] call[name[resource_type].replace, parameter[constant[ ], constant[_]]]
if name[lower] begin[:]
variable[resource_type] assign[=] call[name[resource_type].lower, parameter[]]
return[name[resource_type]] | keyword[def] identifier[safe_rt] ( identifier[resource_type] , identifier[lower] = keyword[False] ):
literal[string]
keyword[if] identifier[resource_type] keyword[is] keyword[not] keyword[None] :
identifier[resource_type] = identifier[resource_type] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[lower] :
identifier[resource_type] = identifier[resource_type] . identifier[lower] ()
keyword[return] identifier[resource_type] | def safe_rt(resource_type, lower=False):
"""Format the Resource Type.
Takes Custom Indicator types with a space character and return a *safe* string.
(e.g. *User Agent* is converted to User_Agent or user_agent.)
Args:
resource_type (string): The resource type to format.
lower (boolean): Return type in all lower case
Returns:
(string): The formatted resource type.
"""
if resource_type is not None:
resource_type = resource_type.replace(' ', '_')
if lower:
resource_type = resource_type.lower() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['resource_type']]
return resource_type |
def get_model_label(model):
"""
Take a model class or model label and return its model label.
>>> get_model_label(MyModel)
"myapp.MyModel"
>>> get_model_label("myapp.MyModel")
"myapp.MyModel"
"""
if isinstance(model, six.string_types):
return model
else:
return "%s.%s" % (
model._meta.app_label,
model.__name__
) | def function[get_model_label, parameter[model]]:
constant[
Take a model class or model label and return its model label.
>>> get_model_label(MyModel)
"myapp.MyModel"
>>> get_model_label("myapp.MyModel")
"myapp.MyModel"
]
if call[name[isinstance], parameter[name[model], name[six].string_types]] begin[:]
return[name[model]] | keyword[def] identifier[get_model_label] ( identifier[model] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[model] , identifier[six] . identifier[string_types] ):
keyword[return] identifier[model]
keyword[else] :
keyword[return] literal[string] %(
identifier[model] . identifier[_meta] . identifier[app_label] ,
identifier[model] . identifier[__name__]
) | def get_model_label(model):
"""
Take a model class or model label and return its model label.
>>> get_model_label(MyModel)
"myapp.MyModel"
>>> get_model_label("myapp.MyModel")
"myapp.MyModel"
"""
if isinstance(model, six.string_types):
return model # depends on [control=['if'], data=[]]
else:
return '%s.%s' % (model._meta.app_label, model.__name__) |
def get_class(name, config_key, module):
"""Get the class by its name as a string."""
clsmembers = inspect.getmembers(module, inspect.isclass)
for string_name, act_class in clsmembers:
if string_name == name:
return act_class
# Check if the user has specified a plugin and if the class is in there
cfg = get_project_configuration()
if config_key in cfg:
modname = os.path.splitext(os.path.basename(cfg[config_key]))[0]
if os.path.isfile(cfg[config_key]):
usermodule = imp.load_source(modname, cfg[config_key])
clsmembers = inspect.getmembers(usermodule, inspect.isclass)
for string_name, act_class in clsmembers:
if string_name == name:
return act_class
else:
logging.warning("File '%s' does not exist. Adjust ~/.hwrtrc.",
cfg['data_analyzation_plugins'])
logging.debug("Unknown class '%s'.", name)
return None | def function[get_class, parameter[name, config_key, module]]:
constant[Get the class by its name as a string.]
variable[clsmembers] assign[=] call[name[inspect].getmembers, parameter[name[module], name[inspect].isclass]]
for taget[tuple[[<ast.Name object at 0x7da1b28645e0>, <ast.Name object at 0x7da1b2864c10>]]] in starred[name[clsmembers]] begin[:]
if compare[name[string_name] equal[==] name[name]] begin[:]
return[name[act_class]]
variable[cfg] assign[=] call[name[get_project_configuration], parameter[]]
if compare[name[config_key] in name[cfg]] begin[:]
variable[modname] assign[=] call[call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[call[name[cfg]][name[config_key]]]]]]][constant[0]]
if call[name[os].path.isfile, parameter[call[name[cfg]][name[config_key]]]] begin[:]
variable[usermodule] assign[=] call[name[imp].load_source, parameter[name[modname], call[name[cfg]][name[config_key]]]]
variable[clsmembers] assign[=] call[name[inspect].getmembers, parameter[name[usermodule], name[inspect].isclass]]
for taget[tuple[[<ast.Name object at 0x7da1b2819840>, <ast.Name object at 0x7da1b281a1a0>]]] in starred[name[clsmembers]] begin[:]
if compare[name[string_name] equal[==] name[name]] begin[:]
return[name[act_class]]
call[name[logging].debug, parameter[constant[Unknown class '%s'.], name[name]]]
return[constant[None]] | keyword[def] identifier[get_class] ( identifier[name] , identifier[config_key] , identifier[module] ):
literal[string]
identifier[clsmembers] = identifier[inspect] . identifier[getmembers] ( identifier[module] , identifier[inspect] . identifier[isclass] )
keyword[for] identifier[string_name] , identifier[act_class] keyword[in] identifier[clsmembers] :
keyword[if] identifier[string_name] == identifier[name] :
keyword[return] identifier[act_class]
identifier[cfg] = identifier[get_project_configuration] ()
keyword[if] identifier[config_key] keyword[in] identifier[cfg] :
identifier[modname] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[cfg] [ identifier[config_key] ]))[ literal[int] ]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[cfg] [ identifier[config_key] ]):
identifier[usermodule] = identifier[imp] . identifier[load_source] ( identifier[modname] , identifier[cfg] [ identifier[config_key] ])
identifier[clsmembers] = identifier[inspect] . identifier[getmembers] ( identifier[usermodule] , identifier[inspect] . identifier[isclass] )
keyword[for] identifier[string_name] , identifier[act_class] keyword[in] identifier[clsmembers] :
keyword[if] identifier[string_name] == identifier[name] :
keyword[return] identifier[act_class]
keyword[else] :
identifier[logging] . identifier[warning] ( literal[string] ,
identifier[cfg] [ literal[string] ])
identifier[logging] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] keyword[None] | def get_class(name, config_key, module):
"""Get the class by its name as a string."""
clsmembers = inspect.getmembers(module, inspect.isclass)
for (string_name, act_class) in clsmembers:
if string_name == name:
return act_class # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Check if the user has specified a plugin and if the class is in there
cfg = get_project_configuration()
if config_key in cfg:
modname = os.path.splitext(os.path.basename(cfg[config_key]))[0]
if os.path.isfile(cfg[config_key]):
usermodule = imp.load_source(modname, cfg[config_key])
clsmembers = inspect.getmembers(usermodule, inspect.isclass)
for (string_name, act_class) in clsmembers:
if string_name == name:
return act_class # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
logging.warning("File '%s' does not exist. Adjust ~/.hwrtrc.", cfg['data_analyzation_plugins']) # depends on [control=['if'], data=['config_key', 'cfg']]
logging.debug("Unknown class '%s'.", name)
return None |
def mkdir(path,
owner=None,
grant_perms=None,
deny_perms=None,
inheritance=True,
reset=False):
'''
Ensure that the directory is available and permissions are set.
Args:
path (str):
The full path to the directory.
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter,
ie:
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool):
If True the object will inherit permissions from the parent, if
``False``, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created.
reset (bool):
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
.. versionadded:: 2018.3.0
Returns:
bool: True if successful
Raises:
CommandExecutionError: If unsuccessful
CLI Example:
.. code-block:: bash
# To grant the 'Users' group 'read & execute' permissions.
salt '*' file.mkdir C:\\Temp\\ Administrators "{'Users': {'perms': 'read_execute'}}"
# Locally using salt call
salt-call file.mkdir C:\\Temp\\ Administrators "{'Users': {'perms': 'read_execute', 'applies_to': 'this_folder_only'}}"
# Specify advanced attributes with a list
salt '*' file.mkdir C:\\Temp\\ Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder_only'}}"
'''
# Make sure the drive is valid
drive = os.path.splitdrive(path)[0]
if not os.path.isdir(drive):
raise CommandExecutionError('Drive {0} is not mapped'.format(drive))
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if not os.path.isdir(path):
try:
# Make the directory
os.mkdir(path)
# Set owner
if owner:
salt.utils.win_dacl.set_owner(obj_name=path, principal=owner)
# Set permissions
set_perms(
path=path,
grant_perms=grant_perms,
deny_perms=deny_perms,
inheritance=inheritance,
reset=reset)
except WindowsError as exc:
raise CommandExecutionError(exc)
return True | def function[mkdir, parameter[path, owner, grant_perms, deny_perms, inheritance, reset]]:
constant[
Ensure that the directory is available and permissions are set.
Args:
path (str):
The full path to the directory.
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter,
ie:
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool):
If True the object will inherit permissions from the parent, if
``False``, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created.
reset (bool):
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
.. versionadded:: 2018.3.0
Returns:
bool: True if successful
Raises:
CommandExecutionError: If unsuccessful
CLI Example:
.. code-block:: bash
# To grant the 'Users' group 'read & execute' permissions.
salt '*' file.mkdir C:\Temp\ Administrators "{'Users': {'perms': 'read_execute'}}"
# Locally using salt call
salt-call file.mkdir C:\Temp\ Administrators "{'Users': {'perms': 'read_execute', 'applies_to': 'this_folder_only'}}"
# Specify advanced attributes with a list
salt '*' file.mkdir C:\Temp\ Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder_only'}}"
]
variable[drive] assign[=] call[call[name[os].path.splitdrive, parameter[name[path]]]][constant[0]]
if <ast.UnaryOp object at 0x7da1b216b8b0> begin[:]
<ast.Raise object at 0x7da1b2169630>
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[path]]]
variable[path] assign[=] call[name[os].path.expandvars, parameter[name[path]]]
if <ast.UnaryOp object at 0x7da1b2169d20> begin[:]
<ast.Try object at 0x7da1b216ba00>
return[constant[True]] | keyword[def] identifier[mkdir] ( identifier[path] ,
identifier[owner] = keyword[None] ,
identifier[grant_perms] = keyword[None] ,
identifier[deny_perms] = keyword[None] ,
identifier[inheritance] = keyword[True] ,
identifier[reset] = keyword[False] ):
literal[string]
identifier[drive] = identifier[os] . identifier[path] . identifier[splitdrive] ( identifier[path] )[ literal[int] ]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[drive] ):
keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[drive] ))
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] )
identifier[path] = identifier[os] . identifier[path] . identifier[expandvars] ( identifier[path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
keyword[try] :
identifier[os] . identifier[mkdir] ( identifier[path] )
keyword[if] identifier[owner] :
identifier[salt] . identifier[utils] . identifier[win_dacl] . identifier[set_owner] ( identifier[obj_name] = identifier[path] , identifier[principal] = identifier[owner] )
identifier[set_perms] (
identifier[path] = identifier[path] ,
identifier[grant_perms] = identifier[grant_perms] ,
identifier[deny_perms] = identifier[deny_perms] ,
identifier[inheritance] = identifier[inheritance] ,
identifier[reset] = identifier[reset] )
keyword[except] identifier[WindowsError] keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] ( identifier[exc] )
keyword[return] keyword[True] | def mkdir(path, owner=None, grant_perms=None, deny_perms=None, inheritance=True, reset=False):
"""
Ensure that the directory is available and permissions are set.
Args:
path (str):
The full path to the directory.
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter,
ie:
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool):
If True the object will inherit permissions from the parent, if
``False``, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created.
reset (bool):
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
.. versionadded:: 2018.3.0
Returns:
bool: True if successful
Raises:
CommandExecutionError: If unsuccessful
CLI Example:
.. code-block:: bash
# To grant the 'Users' group 'read & execute' permissions.
salt '*' file.mkdir C:\\Temp\\ Administrators "{'Users': {'perms': 'read_execute'}}"
# Locally using salt call
salt-call file.mkdir C:\\Temp\\ Administrators "{'Users': {'perms': 'read_execute', 'applies_to': 'this_folder_only'}}"
# Specify advanced attributes with a list
salt '*' file.mkdir C:\\Temp\\ Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder_only'}}"
"""
# Make sure the drive is valid
drive = os.path.splitdrive(path)[0]
if not os.path.isdir(drive):
raise CommandExecutionError('Drive {0} is not mapped'.format(drive)) # depends on [control=['if'], data=[]]
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if not os.path.isdir(path):
try:
# Make the directory
os.mkdir(path)
# Set owner
if owner:
salt.utils.win_dacl.set_owner(obj_name=path, principal=owner) # depends on [control=['if'], data=[]]
# Set permissions
set_perms(path=path, grant_perms=grant_perms, deny_perms=deny_perms, inheritance=inheritance, reset=reset) # depends on [control=['try'], data=[]]
except WindowsError as exc:
raise CommandExecutionError(exc) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
return True |
def load(cls, filename, format=None):
""" Return an instance of the class that is saved in the file with the
given filename in the specified format.
"""
if format is None:
# try to derive protocol from file extension
format = format_from_extension(filename)
with file(filename, 'rbU') as fp:
obj = cls.load_from_file_object(fp, format)
obj.filename = filename
return obj | def function[load, parameter[cls, filename, format]]:
constant[ Return an instance of the class that is saved in the file with the
given filename in the specified format.
]
if compare[name[format] is constant[None]] begin[:]
variable[format] assign[=] call[name[format_from_extension], parameter[name[filename]]]
with call[name[file], parameter[name[filename], constant[rbU]]] begin[:]
variable[obj] assign[=] call[name[cls].load_from_file_object, parameter[name[fp], name[format]]]
name[obj].filename assign[=] name[filename]
return[name[obj]] | keyword[def] identifier[load] ( identifier[cls] , identifier[filename] , identifier[format] = keyword[None] ):
literal[string]
keyword[if] identifier[format] keyword[is] keyword[None] :
identifier[format] = identifier[format_from_extension] ( identifier[filename] )
keyword[with] identifier[file] ( identifier[filename] , literal[string] ) keyword[as] identifier[fp] :
identifier[obj] = identifier[cls] . identifier[load_from_file_object] ( identifier[fp] , identifier[format] )
identifier[obj] . identifier[filename] = identifier[filename]
keyword[return] identifier[obj] | def load(cls, filename, format=None):
""" Return an instance of the class that is saved in the file with the
given filename in the specified format.
"""
if format is None:
# try to derive protocol from file extension
format = format_from_extension(filename) # depends on [control=['if'], data=['format']]
with file(filename, 'rbU') as fp:
obj = cls.load_from_file_object(fp, format)
obj.filename = filename
return obj # depends on [control=['with'], data=['fp']] |
def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = ('Zappa - Deploy Python applications to AWS Lambda'
' and API Gateway.\n')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'-v', '--version', action='version',
version=pkg_resources.get_distribution("zappa").version,
help='Print the zappa version'
)
parser.add_argument(
'--color', default='auto', choices=['auto','never','always']
)
env_parser = argparse.ArgumentParser(add_help=False)
me_group = env_parser.add_mutually_exclusive_group()
all_help = ('Execute this command for all of our defined '
'Zappa stages.')
me_group.add_argument('--all', action='store_true', help=all_help)
me_group.add_argument('stage_env', nargs='?')
group = env_parser.add_argument_group()
group.add_argument(
'-a', '--app_function', help='The WSGI application function.'
)
group.add_argument(
'-s', '--settings_file', help='The path to a Zappa settings file.'
)
group.add_argument(
'-q', '--quiet', action='store_true', help='Silence all output.'
)
# https://github.com/Miserlou/Zappa/issues/407
# Moved when 'template' command added.
# Fuck Terraform.
group.add_argument(
'-j', '--json', action='store_true', help='Make the output of this command be machine readable.'
)
# https://github.com/Miserlou/Zappa/issues/891
group.add_argument(
'--disable_progress', action='store_true', help='Disable progress bars.'
)
##
# Certify
##
subparsers = parser.add_subparsers(title='subcommands', dest='command')
cert_parser = subparsers.add_parser(
'certify', parents=[env_parser],
help='Create and install SSL certificate'
)
cert_parser.add_argument(
'--manual', action='store_true',
help=("Gets new Let's Encrypt certificates, but prints them to console."
"Does not update API Gateway domains.")
)
cert_parser.add_argument(
'-y', '--yes', action='store_true', help='Auto confirm yes.'
)
##
# Deploy
##
deploy_parser = subparsers.add_parser(
'deploy', parents=[env_parser], help='Deploy application.'
)
deploy_parser.add_argument(
'-z', '--zip', help='Deploy Lambda with specific local or S3 hosted zip package'
)
##
# Init
##
init_parser = subparsers.add_parser('init', help='Initialize Zappa app.')
##
# Package
##
package_parser = subparsers.add_parser(
'package', parents=[env_parser], help='Build the application zip package locally.'
)
package_parser.add_argument(
'-o', '--output', help='Name of file to output the package to.'
)
##
# Template
##
template_parser = subparsers.add_parser(
'template', parents=[env_parser], help='Create a CloudFormation template for this API Gateway.'
)
template_parser.add_argument(
'-l', '--lambda-arn', required=True, help='ARN of the Lambda function to template to.'
)
template_parser.add_argument(
'-r', '--role-arn', required=True, help='ARN of the Role to template with.'
)
template_parser.add_argument(
'-o', '--output', help='Name of file to output the template to.'
)
##
# Invocation
##
invoke_parser = subparsers.add_parser(
'invoke', parents=[env_parser],
help='Invoke remote function.'
)
invoke_parser.add_argument(
'--raw', action='store_true',
help=('When invoking remotely, invoke this python as a string,'
' not as a modular path.')
)
invoke_parser.add_argument(
'--no-color', action='store_true',
help=("Don't color the output")
)
invoke_parser.add_argument('command_rest')
##
# Manage
##
manage_parser = subparsers.add_parser(
'manage',
help='Invoke remote Django manage.py commands.'
)
rest_help = ("Command in the form of <env> <command>. <env> is not "
"required if --all is specified")
manage_parser.add_argument('--all', action='store_true', help=all_help)
manage_parser.add_argument('command_rest', nargs='+', help=rest_help)
manage_parser.add_argument(
'--no-color', action='store_true',
help=("Don't color the output")
)
# This is explicitly added here because this is the only subcommand that doesn't inherit from env_parser
# https://github.com/Miserlou/Zappa/issues/1002
manage_parser.add_argument(
'-s', '--settings_file', help='The path to a Zappa settings file.'
)
##
# Rollback
##
def positive_int(s):
""" Ensure an arg is positive """
i = int(s)
if i < 0:
msg = "This argument must be positive (got {})".format(s)
raise argparse.ArgumentTypeError(msg)
return i
rollback_parser = subparsers.add_parser(
'rollback', parents=[env_parser],
help='Rollback deployed code to a previous version.'
)
rollback_parser.add_argument(
'-n', '--num-rollback', type=positive_int, default=1,
help='The number of versions to rollback.'
)
##
# Scheduling
##
subparsers.add_parser(
'schedule', parents=[env_parser],
help='Schedule functions to occur at regular intervals.'
)
##
# Status
##
status_parser = subparsers.add_parser(
'status', parents=[env_parser],
help='Show deployment status and event schedules.'
)
##
# Log Tailing
##
tail_parser = subparsers.add_parser(
'tail', parents=[env_parser], help='Tail deployment logs.'
)
tail_parser.add_argument(
'--no-color', action='store_true',
help="Don't color log tail output."
)
tail_parser.add_argument(
'--http', action='store_true',
help='Only show HTTP requests in tail output.'
)
tail_parser.add_argument(
'--non-http', action='store_true',
help='Only show non-HTTP requests in tail output.'
)
tail_parser.add_argument(
'--since', type=str, default="100000s",
help="Only show lines since a certain timeframe."
)
tail_parser.add_argument(
'--filter', type=str, default="",
help="Apply a filter pattern to the logs."
)
tail_parser.add_argument(
'--force-color', action='store_true',
help='Force coloring log tail output even if coloring support is not auto-detected. (example: piping)'
)
tail_parser.add_argument(
'--disable-keep-open', action='store_true',
help="Exit after printing the last available log, rather than keeping the log open."
)
##
# Undeploy
##
undeploy_parser = subparsers.add_parser(
'undeploy', parents=[env_parser], help='Undeploy application.'
)
undeploy_parser.add_argument(
'--remove-logs', action='store_true',
help=('Removes log groups of api gateway and lambda task'
' during the undeployment.'),
)
undeploy_parser.add_argument(
'-y', '--yes', action='store_true', help='Auto confirm yes.'
)
##
# Unschedule
##
subparsers.add_parser('unschedule', parents=[env_parser],
help='Unschedule functions.')
##
# Updating
##
update_parser = subparsers.add_parser(
'update', parents=[env_parser], help='Update deployed application.'
)
update_parser.add_argument(
'-z', '--zip', help='Update Lambda with specific local or S3 hosted zip package'
)
update_parser.add_argument(
'-n', '--no-upload', help="Update configuration where appropriate, but don't upload new code"
)
##
# Debug
##
subparsers.add_parser(
'shell', parents=[env_parser], help='A debug shell with a loaded Zappa object.'
)
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
if args.color == 'never':
disable_click_colors()
elif args.color == 'always':
#TODO: Support aggressive coloring like "--force-color" on all commands
pass
elif args.color == 'auto':
pass
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both stage_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that stage_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
if not args.command:
parser.print_help()
return
if args.command == 'manage' and not self.vargs.get('all'):
self.stage_env = self.vargs['command_rest'].pop(0)
else:
self.stage_env = self.vargs.get('stage_env')
if args.command == 'package':
self.load_credentials = False
self.command = args.command
self.disable_progress = self.vargs.get('disable_progress')
if self.vargs.get('quiet'):
self.silence()
# We don't have any settings yet, so make those first!
# (Settings-based interactions will fail
# before a project has been initialized.)
if self.command == 'init':
self.init()
return
# Make sure there isn't a new version available
if not self.vargs.get('json'):
self.check_for_update()
# Load and Validate Settings File
self.load_settings_file(self.vargs.get('settings_file'))
# Should we execute this for all stages, or just one?
all_stages = self.vargs.get('all')
stages = []
if all_stages: # All stages!
stages = self.zappa_settings.keys()
else: # Just one env.
if not self.stage_env:
# If there's only one stage defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0])
else:
parser.error("Please supply a stage to interact with.")
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage)
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code) | def function[handle, parameter[self, argv]]:
constant[
Main function.
Parses command, load settings and dispatches accordingly.
]
variable[desc] assign[=] constant[Zappa - Deploy Python applications to AWS Lambda and API Gateway.
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[-v], constant[--version]]]
call[name[parser].add_argument, parameter[constant[--color]]]
variable[env_parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
variable[me_group] assign[=] call[name[env_parser].add_mutually_exclusive_group, parameter[]]
variable[all_help] assign[=] constant[Execute this command for all of our defined Zappa stages.]
call[name[me_group].add_argument, parameter[constant[--all]]]
call[name[me_group].add_argument, parameter[constant[stage_env]]]
variable[group] assign[=] call[name[env_parser].add_argument_group, parameter[]]
call[name[group].add_argument, parameter[constant[-a], constant[--app_function]]]
call[name[group].add_argument, parameter[constant[-s], constant[--settings_file]]]
call[name[group].add_argument, parameter[constant[-q], constant[--quiet]]]
call[name[group].add_argument, parameter[constant[-j], constant[--json]]]
call[name[group].add_argument, parameter[constant[--disable_progress]]]
variable[subparsers] assign[=] call[name[parser].add_subparsers, parameter[]]
variable[cert_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[certify]]]
call[name[cert_parser].add_argument, parameter[constant[--manual]]]
call[name[cert_parser].add_argument, parameter[constant[-y], constant[--yes]]]
variable[deploy_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[deploy]]]
call[name[deploy_parser].add_argument, parameter[constant[-z], constant[--zip]]]
variable[init_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[init]]]
variable[package_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[package]]]
call[name[package_parser].add_argument, parameter[constant[-o], constant[--output]]]
variable[template_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[template]]]
call[name[template_parser].add_argument, parameter[constant[-l], constant[--lambda-arn]]]
call[name[template_parser].add_argument, parameter[constant[-r], constant[--role-arn]]]
call[name[template_parser].add_argument, parameter[constant[-o], constant[--output]]]
variable[invoke_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[invoke]]]
call[name[invoke_parser].add_argument, parameter[constant[--raw]]]
call[name[invoke_parser].add_argument, parameter[constant[--no-color]]]
call[name[invoke_parser].add_argument, parameter[constant[command_rest]]]
variable[manage_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[manage]]]
variable[rest_help] assign[=] constant[Command in the form of <env> <command>. <env> is not required if --all is specified]
call[name[manage_parser].add_argument, parameter[constant[--all]]]
call[name[manage_parser].add_argument, parameter[constant[command_rest]]]
call[name[manage_parser].add_argument, parameter[constant[--no-color]]]
call[name[manage_parser].add_argument, parameter[constant[-s], constant[--settings_file]]]
def function[positive_int, parameter[s]]:
constant[ Ensure an arg is positive ]
variable[i] assign[=] call[name[int], parameter[name[s]]]
if compare[name[i] less[<] constant[0]] begin[:]
variable[msg] assign[=] call[constant[This argument must be positive (got {})].format, parameter[name[s]]]
<ast.Raise object at 0x7da1b208ea70>
return[name[i]]
variable[rollback_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[rollback]]]
call[name[rollback_parser].add_argument, parameter[constant[-n], constant[--num-rollback]]]
call[name[subparsers].add_parser, parameter[constant[schedule]]]
variable[status_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[status]]]
variable[tail_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[tail]]]
call[name[tail_parser].add_argument, parameter[constant[--no-color]]]
call[name[tail_parser].add_argument, parameter[constant[--http]]]
call[name[tail_parser].add_argument, parameter[constant[--non-http]]]
call[name[tail_parser].add_argument, parameter[constant[--since]]]
call[name[tail_parser].add_argument, parameter[constant[--filter]]]
call[name[tail_parser].add_argument, parameter[constant[--force-color]]]
call[name[tail_parser].add_argument, parameter[constant[--disable-keep-open]]]
variable[undeploy_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[undeploy]]]
call[name[undeploy_parser].add_argument, parameter[constant[--remove-logs]]]
call[name[undeploy_parser].add_argument, parameter[constant[-y], constant[--yes]]]
call[name[subparsers].add_parser, parameter[constant[unschedule]]]
variable[update_parser] assign[=] call[name[subparsers].add_parser, parameter[constant[update]]]
call[name[update_parser].add_argument, parameter[constant[-z], constant[--zip]]]
call[name[update_parser].add_argument, parameter[constant[-n], constant[--no-upload]]]
call[name[subparsers].add_parser, parameter[constant[shell]]]
call[name[argcomplete].autocomplete, parameter[name[parser]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[argv]]]
name[self].vargs assign[=] call[name[vars], parameter[name[args]]]
if compare[name[args].color equal[==] constant[never]] begin[:]
call[name[disable_click_colors], parameter[]]
if <ast.UnaryOp object at 0x7da1b1f70580> begin[:]
call[name[parser].print_help, parameter[]]
return[None]
if <ast.BoolOp object at 0x7da1b1f737c0> begin[:]
name[self].stage_env assign[=] call[call[name[self].vargs][constant[command_rest]].pop, parameter[constant[0]]]
if compare[name[args].command equal[==] constant[package]] begin[:]
name[self].load_credentials assign[=] constant[False]
name[self].command assign[=] name[args].command
name[self].disable_progress assign[=] call[name[self].vargs.get, parameter[constant[disable_progress]]]
if call[name[self].vargs.get, parameter[constant[quiet]]] begin[:]
call[name[self].silence, parameter[]]
if compare[name[self].command equal[==] constant[init]] begin[:]
call[name[self].init, parameter[]]
return[None]
if <ast.UnaryOp object at 0x7da1b1f73430> begin[:]
call[name[self].check_for_update, parameter[]]
call[name[self].load_settings_file, parameter[call[name[self].vargs.get, parameter[constant[settings_file]]]]]
variable[all_stages] assign[=] call[name[self].vargs.get, parameter[constant[all]]]
variable[stages] assign[=] list[[]]
if name[all_stages] begin[:]
variable[stages] assign[=] call[name[self].zappa_settings.keys, parameter[]]
for taget[name[stage]] in starred[name[stages]] begin[:]
<ast.Try object at 0x7da1b20b84c0> | keyword[def] identifier[handle] ( identifier[self] , identifier[argv] = keyword[None] ):
literal[string]
identifier[desc] =( literal[string]
literal[string] )
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = identifier[desc] )
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[version] = identifier[pkg_resources] . identifier[get_distribution] ( literal[string] ). identifier[version] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[default] = literal[string] , identifier[choices] =[ literal[string] , literal[string] , literal[string] ]
)
identifier[env_parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[add_help] = keyword[False] )
identifier[me_group] = identifier[env_parser] . identifier[add_mutually_exclusive_group] ()
identifier[all_help] =( literal[string]
literal[string] )
identifier[me_group] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[help] = identifier[all_help] )
identifier[me_group] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] )
identifier[group] = identifier[env_parser] . identifier[add_argument_group] ()
identifier[group] . identifier[add_argument] (
literal[string] , literal[string] , identifier[help] = literal[string]
)
identifier[group] . identifier[add_argument] (
literal[string] , literal[string] , identifier[help] = literal[string]
)
identifier[group] . identifier[add_argument] (
literal[string] , literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string]
)
identifier[group] . identifier[add_argument] (
literal[string] , literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string]
)
identifier[group] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string]
)
identifier[subparsers] = identifier[parser] . identifier[add_subparsers] ( identifier[title] = literal[string] , identifier[dest] = literal[string] )
identifier[cert_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ],
identifier[help] = literal[string]
)
identifier[cert_parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] =( literal[string]
literal[string] )
)
identifier[cert_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string]
)
identifier[deploy_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ], identifier[help] = literal[string]
)
identifier[deploy_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[help] = literal[string]
)
identifier[init_parser] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] )
identifier[package_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ], identifier[help] = literal[string]
)
identifier[package_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[help] = literal[string]
)
identifier[template_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ], identifier[help] = literal[string]
)
identifier[template_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string]
)
identifier[template_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string]
)
identifier[template_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[help] = literal[string]
)
identifier[invoke_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ],
identifier[help] = literal[string]
)
identifier[invoke_parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] =( literal[string]
literal[string] )
)
identifier[invoke_parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] =( literal[string] )
)
identifier[invoke_parser] . identifier[add_argument] ( literal[string] )
identifier[manage_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] ,
identifier[help] = literal[string]
)
identifier[rest_help] =( literal[string]
literal[string] )
identifier[manage_parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[help] = identifier[all_help] )
identifier[manage_parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] , identifier[help] = identifier[rest_help] )
identifier[manage_parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] =( literal[string] )
)
identifier[manage_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[help] = literal[string]
)
keyword[def] identifier[positive_int] ( identifier[s] ):
literal[string]
identifier[i] = identifier[int] ( identifier[s] )
keyword[if] identifier[i] < literal[int] :
identifier[msg] = literal[string] . identifier[format] ( identifier[s] )
keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] ( identifier[msg] )
keyword[return] identifier[i]
identifier[rollback_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ],
identifier[help] = literal[string]
)
identifier[rollback_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[type] = identifier[positive_int] , identifier[default] = literal[int] ,
identifier[help] = literal[string]
)
identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ],
identifier[help] = literal[string]
)
identifier[status_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ],
identifier[help] = literal[string]
)
identifier[tail_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ], identifier[help] = literal[string]
)
identifier[tail_parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[tail_parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[tail_parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[tail_parser] . identifier[add_argument] (
literal[string] , identifier[type] = identifier[str] , identifier[default] = literal[string] ,
identifier[help] = literal[string]
)
identifier[tail_parser] . identifier[add_argument] (
literal[string] , identifier[type] = identifier[str] , identifier[default] = literal[string] ,
identifier[help] = literal[string]
)
identifier[tail_parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[tail_parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[undeploy_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ], identifier[help] = literal[string]
)
identifier[undeploy_parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] =( literal[string]
literal[string] ),
)
identifier[undeploy_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string]
)
identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[parents] =[ identifier[env_parser] ],
identifier[help] = literal[string] )
identifier[update_parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ], identifier[help] = literal[string]
)
identifier[update_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[help] = literal[string]
)
identifier[update_parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[help] = literal[string]
)
identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[parents] =[ identifier[env_parser] ], identifier[help] = literal[string]
)
identifier[argcomplete] . identifier[autocomplete] ( identifier[parser] )
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[argv] )
identifier[self] . identifier[vargs] = identifier[vars] ( identifier[args] )
keyword[if] identifier[args] . identifier[color] == literal[string] :
identifier[disable_click_colors] ()
keyword[elif] identifier[args] . identifier[color] == literal[string] :
keyword[pass]
keyword[elif] identifier[args] . identifier[color] == literal[string] :
keyword[pass]
keyword[if] keyword[not] identifier[args] . identifier[command] :
identifier[parser] . identifier[print_help] ()
keyword[return]
keyword[if] identifier[args] . identifier[command] == literal[string] keyword[and] keyword[not] identifier[self] . identifier[vargs] . identifier[get] ( literal[string] ):
identifier[self] . identifier[stage_env] = identifier[self] . identifier[vargs] [ literal[string] ]. identifier[pop] ( literal[int] )
keyword[else] :
identifier[self] . identifier[stage_env] = identifier[self] . identifier[vargs] . identifier[get] ( literal[string] )
keyword[if] identifier[args] . identifier[command] == literal[string] :
identifier[self] . identifier[load_credentials] = keyword[False]
identifier[self] . identifier[command] = identifier[args] . identifier[command]
identifier[self] . identifier[disable_progress] = identifier[self] . identifier[vargs] . identifier[get] ( literal[string] )
keyword[if] identifier[self] . identifier[vargs] . identifier[get] ( literal[string] ):
identifier[self] . identifier[silence] ()
keyword[if] identifier[self] . identifier[command] == literal[string] :
identifier[self] . identifier[init] ()
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[vargs] . identifier[get] ( literal[string] ):
identifier[self] . identifier[check_for_update] ()
identifier[self] . identifier[load_settings_file] ( identifier[self] . identifier[vargs] . identifier[get] ( literal[string] ))
identifier[all_stages] = identifier[self] . identifier[vargs] . identifier[get] ( literal[string] )
identifier[stages] =[]
keyword[if] identifier[all_stages] :
identifier[stages] = identifier[self] . identifier[zappa_settings] . identifier[keys] ()
keyword[else] :
keyword[if] keyword[not] identifier[self] . identifier[stage_env] :
keyword[if] identifier[len] ( identifier[self] . identifier[zappa_settings] . identifier[keys] ())== literal[int] :
identifier[stages] . identifier[append] ( identifier[list] ( identifier[self] . identifier[zappa_settings] . identifier[keys] ())[ literal[int] ])
keyword[else] :
identifier[parser] . identifier[error] ( literal[string] )
keyword[else] :
identifier[stages] . identifier[append] ( identifier[self] . identifier[stage_env] )
keyword[for] identifier[stage] keyword[in] identifier[stages] :
keyword[try] :
identifier[self] . identifier[dispatch_command] ( identifier[self] . identifier[command] , identifier[stage] )
keyword[except] identifier[ClickException] keyword[as] identifier[e] :
identifier[e] . identifier[show] ()
identifier[sys] . identifier[exit] ( identifier[e] . identifier[exit_code] ) | def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = 'Zappa - Deploy Python applications to AWS Lambda and API Gateway.\n'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-v', '--version', action='version', version=pkg_resources.get_distribution('zappa').version, help='Print the zappa version')
parser.add_argument('--color', default='auto', choices=['auto', 'never', 'always'])
env_parser = argparse.ArgumentParser(add_help=False)
me_group = env_parser.add_mutually_exclusive_group()
all_help = 'Execute this command for all of our defined Zappa stages.'
me_group.add_argument('--all', action='store_true', help=all_help)
me_group.add_argument('stage_env', nargs='?')
group = env_parser.add_argument_group()
group.add_argument('-a', '--app_function', help='The WSGI application function.')
group.add_argument('-s', '--settings_file', help='The path to a Zappa settings file.')
group.add_argument('-q', '--quiet', action='store_true', help='Silence all output.')
# https://github.com/Miserlou/Zappa/issues/407
# Moved when 'template' command added.
# Fuck Terraform.
group.add_argument('-j', '--json', action='store_true', help='Make the output of this command be machine readable.')
# https://github.com/Miserlou/Zappa/issues/891
group.add_argument('--disable_progress', action='store_true', help='Disable progress bars.')
##
# Certify
##
subparsers = parser.add_subparsers(title='subcommands', dest='command')
cert_parser = subparsers.add_parser('certify', parents=[env_parser], help='Create and install SSL certificate')
cert_parser.add_argument('--manual', action='store_true', help="Gets new Let's Encrypt certificates, but prints them to console.Does not update API Gateway domains.")
cert_parser.add_argument('-y', '--yes', action='store_true', help='Auto confirm yes.')
##
# Deploy
##
deploy_parser = subparsers.add_parser('deploy', parents=[env_parser], help='Deploy application.')
deploy_parser.add_argument('-z', '--zip', help='Deploy Lambda with specific local or S3 hosted zip package')
##
# Init
##
init_parser = subparsers.add_parser('init', help='Initialize Zappa app.')
##
# Package
##
package_parser = subparsers.add_parser('package', parents=[env_parser], help='Build the application zip package locally.')
package_parser.add_argument('-o', '--output', help='Name of file to output the package to.')
##
# Template
##
template_parser = subparsers.add_parser('template', parents=[env_parser], help='Create a CloudFormation template for this API Gateway.')
template_parser.add_argument('-l', '--lambda-arn', required=True, help='ARN of the Lambda function to template to.')
template_parser.add_argument('-r', '--role-arn', required=True, help='ARN of the Role to template with.')
template_parser.add_argument('-o', '--output', help='Name of file to output the template to.')
##
# Invocation
##
invoke_parser = subparsers.add_parser('invoke', parents=[env_parser], help='Invoke remote function.')
invoke_parser.add_argument('--raw', action='store_true', help='When invoking remotely, invoke this python as a string, not as a modular path.')
invoke_parser.add_argument('--no-color', action='store_true', help="Don't color the output")
invoke_parser.add_argument('command_rest')
##
# Manage
##
manage_parser = subparsers.add_parser('manage', help='Invoke remote Django manage.py commands.')
rest_help = 'Command in the form of <env> <command>. <env> is not required if --all is specified'
manage_parser.add_argument('--all', action='store_true', help=all_help)
manage_parser.add_argument('command_rest', nargs='+', help=rest_help)
manage_parser.add_argument('--no-color', action='store_true', help="Don't color the output")
# This is explicitly added here because this is the only subcommand that doesn't inherit from env_parser
# https://github.com/Miserlou/Zappa/issues/1002
manage_parser.add_argument('-s', '--settings_file', help='The path to a Zappa settings file.')
##
# Rollback
##
def positive_int(s):
""" Ensure an arg is positive """
i = int(s)
if i < 0:
msg = 'This argument must be positive (got {})'.format(s)
raise argparse.ArgumentTypeError(msg) # depends on [control=['if'], data=[]]
return i
rollback_parser = subparsers.add_parser('rollback', parents=[env_parser], help='Rollback deployed code to a previous version.')
rollback_parser.add_argument('-n', '--num-rollback', type=positive_int, default=1, help='The number of versions to rollback.')
##
# Scheduling
##
subparsers.add_parser('schedule', parents=[env_parser], help='Schedule functions to occur at regular intervals.')
##
# Status
##
status_parser = subparsers.add_parser('status', parents=[env_parser], help='Show deployment status and event schedules.')
##
# Log Tailing
##
tail_parser = subparsers.add_parser('tail', parents=[env_parser], help='Tail deployment logs.')
tail_parser.add_argument('--no-color', action='store_true', help="Don't color log tail output.")
tail_parser.add_argument('--http', action='store_true', help='Only show HTTP requests in tail output.')
tail_parser.add_argument('--non-http', action='store_true', help='Only show non-HTTP requests in tail output.')
tail_parser.add_argument('--since', type=str, default='100000s', help='Only show lines since a certain timeframe.')
tail_parser.add_argument('--filter', type=str, default='', help='Apply a filter pattern to the logs.')
tail_parser.add_argument('--force-color', action='store_true', help='Force coloring log tail output even if coloring support is not auto-detected. (example: piping)')
tail_parser.add_argument('--disable-keep-open', action='store_true', help='Exit after printing the last available log, rather than keeping the log open.')
##
# Undeploy
##
undeploy_parser = subparsers.add_parser('undeploy', parents=[env_parser], help='Undeploy application.')
undeploy_parser.add_argument('--remove-logs', action='store_true', help='Removes log groups of api gateway and lambda task during the undeployment.')
undeploy_parser.add_argument('-y', '--yes', action='store_true', help='Auto confirm yes.')
##
# Unschedule
##
subparsers.add_parser('unschedule', parents=[env_parser], help='Unschedule functions.')
##
# Updating
##
update_parser = subparsers.add_parser('update', parents=[env_parser], help='Update deployed application.')
update_parser.add_argument('-z', '--zip', help='Update Lambda with specific local or S3 hosted zip package')
update_parser.add_argument('-n', '--no-upload', help="Update configuration where appropriate, but don't upload new code")
##
# Debug
##
subparsers.add_parser('shell', parents=[env_parser], help='A debug shell with a loaded Zappa object.')
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
if args.color == 'never':
disable_click_colors() # depends on [control=['if'], data=[]]
elif args.color == 'always':
#TODO: Support aggressive coloring like "--force-color" on all commands
pass # depends on [control=['if'], data=[]]
elif args.color == 'auto':
pass # depends on [control=['if'], data=[]]
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both stage_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that stage_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
if not args.command:
parser.print_help()
return # depends on [control=['if'], data=[]]
if args.command == 'manage' and (not self.vargs.get('all')):
self.stage_env = self.vargs['command_rest'].pop(0) # depends on [control=['if'], data=[]]
else:
self.stage_env = self.vargs.get('stage_env')
if args.command == 'package':
self.load_credentials = False # depends on [control=['if'], data=[]]
self.command = args.command
self.disable_progress = self.vargs.get('disable_progress')
if self.vargs.get('quiet'):
self.silence() # depends on [control=['if'], data=[]]
# We don't have any settings yet, so make those first!
# (Settings-based interactions will fail
# before a project has been initialized.)
if self.command == 'init':
self.init()
return # depends on [control=['if'], data=[]]
# Make sure there isn't a new version available
if not self.vargs.get('json'):
self.check_for_update() # depends on [control=['if'], data=[]]
# Load and Validate Settings File
self.load_settings_file(self.vargs.get('settings_file'))
# Should we execute this for all stages, or just one?
all_stages = self.vargs.get('all')
stages = []
if all_stages: # All stages!
stages = self.zappa_settings.keys() # depends on [control=['if'], data=[]] # Just one env.
elif not self.stage_env:
# If there's only one stage defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0]) # depends on [control=['if'], data=[]]
else:
parser.error('Please supply a stage to interact with.') # depends on [control=['if'], data=[]]
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage) # depends on [control=['try'], data=[]]
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['stage']] |
def _read_config(self, filename=None):
"""
Read the user configuration
"""
if filename:
self._config_filename = filename
else:
try:
import appdirs
except ImportError:
raise Exception("Missing dependency for determining config path. Please install "
"the 'appdirs' Python module.")
self._config_filename = appdirs.user_config_dir(_LIBRARY_NAME, "ProfitBricks") + ".ini"
if not self._config:
self._config = configparser.ConfigParser()
self._config.optionxform = str
self._config.read(self._config_filename) | def function[_read_config, parameter[self, filename]]:
constant[
Read the user configuration
]
if name[filename] begin[:]
name[self]._config_filename assign[=] name[filename]
if <ast.UnaryOp object at 0x7da1b26afd30> begin[:]
name[self]._config assign[=] call[name[configparser].ConfigParser, parameter[]]
name[self]._config.optionxform assign[=] name[str]
call[name[self]._config.read, parameter[name[self]._config_filename]] | keyword[def] identifier[_read_config] ( identifier[self] , identifier[filename] = keyword[None] ):
literal[string]
keyword[if] identifier[filename] :
identifier[self] . identifier[_config_filename] = identifier[filename]
keyword[else] :
keyword[try] :
keyword[import] identifier[appdirs]
keyword[except] identifier[ImportError] :
keyword[raise] identifier[Exception] ( literal[string]
literal[string] )
identifier[self] . identifier[_config_filename] = identifier[appdirs] . identifier[user_config_dir] ( identifier[_LIBRARY_NAME] , literal[string] )+ literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_config] :
identifier[self] . identifier[_config] = identifier[configparser] . identifier[ConfigParser] ()
identifier[self] . identifier[_config] . identifier[optionxform] = identifier[str]
identifier[self] . identifier[_config] . identifier[read] ( identifier[self] . identifier[_config_filename] ) | def _read_config(self, filename=None):
"""
Read the user configuration
"""
if filename:
self._config_filename = filename # depends on [control=['if'], data=[]]
else:
try:
import appdirs # depends on [control=['try'], data=[]]
except ImportError:
raise Exception("Missing dependency for determining config path. Please install the 'appdirs' Python module.") # depends on [control=['except'], data=[]]
self._config_filename = appdirs.user_config_dir(_LIBRARY_NAME, 'ProfitBricks') + '.ini'
if not self._config:
self._config = configparser.ConfigParser()
self._config.optionxform = str
self._config.read(self._config_filename) # depends on [control=['if'], data=[]] |
def select_python_parser(parser=None):
"""
Select default parser for loading and refactoring steps. Passing `redbaron` as argument
will select the old paring engine from v0.3.3
Replacing the redbaron parser was necessary to support Python 3 syntax. We have tried our
best to make sure there is no user impact on users. However, there may be regressions with
new parser backend.
To revert to the old parser implementation, add `GETGAUGE_USE_0_3_3_PARSER=true` property
to the `python.properties` file in the `<PROJECT_DIR>/env/default directory.
This property along with the redbaron parser will be removed in future releases.
"""
if parser == 'redbaron' or os.environ.get('GETGAUGE_USE_0_3_3_PARSER'):
PythonFile.Class = RedbaronPythonFile
else:
PythonFile.Class = ParsoPythonFile | def function[select_python_parser, parameter[parser]]:
constant[
Select default parser for loading and refactoring steps. Passing `redbaron` as argument
will select the old paring engine from v0.3.3
Replacing the redbaron parser was necessary to support Python 3 syntax. We have tried our
best to make sure there is no user impact on users. However, there may be regressions with
new parser backend.
To revert to the old parser implementation, add `GETGAUGE_USE_0_3_3_PARSER=true` property
to the `python.properties` file in the `<PROJECT_DIR>/env/default directory.
This property along with the redbaron parser will be removed in future releases.
]
if <ast.BoolOp object at 0x7da20c6e6590> begin[:]
name[PythonFile].Class assign[=] name[RedbaronPythonFile] | keyword[def] identifier[select_python_parser] ( identifier[parser] = keyword[None] ):
literal[string]
keyword[if] identifier[parser] == literal[string] keyword[or] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ):
identifier[PythonFile] . identifier[Class] = identifier[RedbaronPythonFile]
keyword[else] :
identifier[PythonFile] . identifier[Class] = identifier[ParsoPythonFile] | def select_python_parser(parser=None):
"""
Select default parser for loading and refactoring steps. Passing `redbaron` as argument
will select the old paring engine from v0.3.3
Replacing the redbaron parser was necessary to support Python 3 syntax. We have tried our
best to make sure there is no user impact on users. However, there may be regressions with
new parser backend.
To revert to the old parser implementation, add `GETGAUGE_USE_0_3_3_PARSER=true` property
to the `python.properties` file in the `<PROJECT_DIR>/env/default directory.
This property along with the redbaron parser will be removed in future releases.
"""
if parser == 'redbaron' or os.environ.get('GETGAUGE_USE_0_3_3_PARSER'):
PythonFile.Class = RedbaronPythonFile # depends on [control=['if'], data=[]]
else:
PythonFile.Class = ParsoPythonFile |
def lastNoiseCurve(expPath, suite, iteration="last"):
"""
Print the noise errors from the last iteration of this experiment
"""
noiseValues = ["0.0", "0.05", "0.1", "0.15", "0.2", "0.25", "0.3",
"0.35", "0.4", "0.45", "0.5"]
print("\nNOISE CURVE =====",expPath,"====== ITERATION:",iteration,"=========")
try:
result = suite.get_value(expPath, 0, noiseValues, iteration)
info = []
for k in noiseValues:
info.append([k,result[k]["testerror"]])
print(tabulate(info, headers=["noise","Test Error"], tablefmt="grid"))
print("totalCorrect:", suite.get_value(expPath, 0, "totalCorrect", iteration))
except:
print("Couldn't load experiment",expPath) | def function[lastNoiseCurve, parameter[expPath, suite, iteration]]:
constant[
Print the noise errors from the last iteration of this experiment
]
variable[noiseValues] assign[=] list[[<ast.Constant object at 0x7da1b08651b0>, <ast.Constant object at 0x7da1b08643d0>, <ast.Constant object at 0x7da1b08642e0>, <ast.Constant object at 0x7da1b0866320>, <ast.Constant object at 0x7da1b083f460>, <ast.Constant object at 0x7da1b083f400>, <ast.Constant object at 0x7da1b083f910>, <ast.Constant object at 0x7da1b09800a0>, <ast.Constant object at 0x7da1b0847670>, <ast.Constant object at 0x7da1b0847d60>, <ast.Constant object at 0x7da1b0844040>]]
call[name[print], parameter[constant[
NOISE CURVE =====], name[expPath], constant[====== ITERATION:], name[iteration], constant[=========]]]
<ast.Try object at 0x7da1b08477f0> | keyword[def] identifier[lastNoiseCurve] ( identifier[expPath] , identifier[suite] , identifier[iteration] = literal[string] ):
literal[string]
identifier[noiseValues] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[print] ( literal[string] , identifier[expPath] , literal[string] , identifier[iteration] , literal[string] )
keyword[try] :
identifier[result] = identifier[suite] . identifier[get_value] ( identifier[expPath] , literal[int] , identifier[noiseValues] , identifier[iteration] )
identifier[info] =[]
keyword[for] identifier[k] keyword[in] identifier[noiseValues] :
identifier[info] . identifier[append] ([ identifier[k] , identifier[result] [ identifier[k] ][ literal[string] ]])
identifier[print] ( identifier[tabulate] ( identifier[info] , identifier[headers] =[ literal[string] , literal[string] ], identifier[tablefmt] = literal[string] ))
identifier[print] ( literal[string] , identifier[suite] . identifier[get_value] ( identifier[expPath] , literal[int] , literal[string] , identifier[iteration] ))
keyword[except] :
identifier[print] ( literal[string] , identifier[expPath] ) | def lastNoiseCurve(expPath, suite, iteration='last'):
"""
Print the noise errors from the last iteration of this experiment
"""
noiseValues = ['0.0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', '0.45', '0.5']
print('\nNOISE CURVE =====', expPath, '====== ITERATION:', iteration, '=========')
try:
result = suite.get_value(expPath, 0, noiseValues, iteration)
info = []
for k in noiseValues:
info.append([k, result[k]['testerror']]) # depends on [control=['for'], data=['k']]
print(tabulate(info, headers=['noise', 'Test Error'], tablefmt='grid'))
print('totalCorrect:', suite.get_value(expPath, 0, 'totalCorrect', iteration)) # depends on [control=['try'], data=[]]
except:
print("Couldn't load experiment", expPath) # depends on [control=['except'], data=[]] |
def _compare_version(self, requirements, support):
"""
Return whether there is an intersection between a support applications
GUID set and a set of supported applications.
"""
for guid in requirements:
# If we support any of the GUIDs in the guid_set, test if any of
# the provided versions for the GUID are supported.
if (guid in support and
any((detected_version in requirements[guid]) for
detected_version in support[guid])):
return True | def function[_compare_version, parameter[self, requirements, support]]:
constant[
Return whether there is an intersection between a support applications
GUID set and a set of supported applications.
]
for taget[name[guid]] in starred[name[requirements]] begin[:]
if <ast.BoolOp object at 0x7da20e955060> begin[:]
return[constant[True]] | keyword[def] identifier[_compare_version] ( identifier[self] , identifier[requirements] , identifier[support] ):
literal[string]
keyword[for] identifier[guid] keyword[in] identifier[requirements] :
keyword[if] ( identifier[guid] keyword[in] identifier[support] keyword[and]
identifier[any] (( identifier[detected_version] keyword[in] identifier[requirements] [ identifier[guid] ]) keyword[for]
identifier[detected_version] keyword[in] identifier[support] [ identifier[guid] ])):
keyword[return] keyword[True] | def _compare_version(self, requirements, support):
"""
Return whether there is an intersection between a support applications
GUID set and a set of supported applications.
"""
for guid in requirements:
# If we support any of the GUIDs in the guid_set, test if any of
# the provided versions for the GUID are supported.
if guid in support and any((detected_version in requirements[guid] for detected_version in support[guid])):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['guid']] |
def cmdify(self):
"""Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
"""
return " ".join(
itertools.chain(
[_quote_if_contains(self.command, r"[\s^()]")],
(_quote_if_contains(arg, r"[\s^]") for arg in self.args),
)
) | def function[cmdify, parameter[self]]:
constant[Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
]
return[call[constant[ ].join, parameter[call[name[itertools].chain, parameter[list[[<ast.Call object at 0x7da1b2067c40>]], <ast.GeneratorExp object at 0x7da1b20669b0>]]]]] | keyword[def] identifier[cmdify] ( identifier[self] ):
literal[string]
keyword[return] literal[string] . identifier[join] (
identifier[itertools] . identifier[chain] (
[ identifier[_quote_if_contains] ( identifier[self] . identifier[command] , literal[string] )],
( identifier[_quote_if_contains] ( identifier[arg] , literal[string] ) keyword[for] identifier[arg] keyword[in] identifier[self] . identifier[args] ),
)
) | def cmdify(self):
"""Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
"""
return ' '.join(itertools.chain([_quote_if_contains(self.command, '[\\s^()]')], (_quote_if_contains(arg, '[\\s^]') for arg in self.args))) |
def search(self, filterstr, attrlist):
"""Query the configured LDAP server."""
return self._paged_search_ext_s(self.settings.BASE, ldap.SCOPE_SUBTREE, filterstr=filterstr,
attrlist=attrlist, page_size=self.settings.PAGE_SIZE) | def function[search, parameter[self, filterstr, attrlist]]:
constant[Query the configured LDAP server.]
return[call[name[self]._paged_search_ext_s, parameter[name[self].settings.BASE, name[ldap].SCOPE_SUBTREE]]] | keyword[def] identifier[search] ( identifier[self] , identifier[filterstr] , identifier[attrlist] ):
literal[string]
keyword[return] identifier[self] . identifier[_paged_search_ext_s] ( identifier[self] . identifier[settings] . identifier[BASE] , identifier[ldap] . identifier[SCOPE_SUBTREE] , identifier[filterstr] = identifier[filterstr] ,
identifier[attrlist] = identifier[attrlist] , identifier[page_size] = identifier[self] . identifier[settings] . identifier[PAGE_SIZE] ) | def search(self, filterstr, attrlist):
"""Query the configured LDAP server."""
return self._paged_search_ext_s(self.settings.BASE, ldap.SCOPE_SUBTREE, filterstr=filterstr, attrlist=attrlist, page_size=self.settings.PAGE_SIZE) |
def run_fusion_caller(job, star_bam, univ_options, fusion_options):
"""
This module will run a fusion caller on DNA bams. This module will be
implemented in the future.
This module corresponds to node 10 on the tree
"""
job.fileStore.logToMaster('Running FUSION on %s' % univ_options['patient'])
fusion_file = job.fileStore.getLocalTempFile()
output_file = job.fileStore.writeGlobalFile(fusion_file)
return output_file | def function[run_fusion_caller, parameter[job, star_bam, univ_options, fusion_options]]:
constant[
This module will run a fusion caller on DNA bams. This module will be
implemented in the future.
This module corresponds to node 10 on the tree
]
call[name[job].fileStore.logToMaster, parameter[binary_operation[constant[Running FUSION on %s] <ast.Mod object at 0x7da2590d6920> call[name[univ_options]][constant[patient]]]]]
variable[fusion_file] assign[=] call[name[job].fileStore.getLocalTempFile, parameter[]]
variable[output_file] assign[=] call[name[job].fileStore.writeGlobalFile, parameter[name[fusion_file]]]
return[name[output_file]] | keyword[def] identifier[run_fusion_caller] ( identifier[job] , identifier[star_bam] , identifier[univ_options] , identifier[fusion_options] ):
literal[string]
identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] % identifier[univ_options] [ literal[string] ])
identifier[fusion_file] = identifier[job] . identifier[fileStore] . identifier[getLocalTempFile] ()
identifier[output_file] = identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[fusion_file] )
keyword[return] identifier[output_file] | def run_fusion_caller(job, star_bam, univ_options, fusion_options):
"""
This module will run a fusion caller on DNA bams. This module will be
implemented in the future.
This module corresponds to node 10 on the tree
"""
job.fileStore.logToMaster('Running FUSION on %s' % univ_options['patient'])
fusion_file = job.fileStore.getLocalTempFile()
output_file = job.fileStore.writeGlobalFile(fusion_file)
return output_file |
def emit_data_changed(self):
"""Emit the data changed signal on the model of the treeitem
if the treeitem has a model.
:returns: None
:rtype: None
:raises: None
"""
item = self.get_treeitem()
m = item.get_model()
if m:
start = m.index_of_item(item)
parent = start.parent()
end = m.index(start.row(), item.column_count()-1, parent)
m.dataChanged.emit(start, end) | def function[emit_data_changed, parameter[self]]:
constant[Emit the data changed signal on the model of the treeitem
if the treeitem has a model.
:returns: None
:rtype: None
:raises: None
]
variable[item] assign[=] call[name[self].get_treeitem, parameter[]]
variable[m] assign[=] call[name[item].get_model, parameter[]]
if name[m] begin[:]
variable[start] assign[=] call[name[m].index_of_item, parameter[name[item]]]
variable[parent] assign[=] call[name[start].parent, parameter[]]
variable[end] assign[=] call[name[m].index, parameter[call[name[start].row, parameter[]], binary_operation[call[name[item].column_count, parameter[]] - constant[1]], name[parent]]]
call[name[m].dataChanged.emit, parameter[name[start], name[end]]] | keyword[def] identifier[emit_data_changed] ( identifier[self] ):
literal[string]
identifier[item] = identifier[self] . identifier[get_treeitem] ()
identifier[m] = identifier[item] . identifier[get_model] ()
keyword[if] identifier[m] :
identifier[start] = identifier[m] . identifier[index_of_item] ( identifier[item] )
identifier[parent] = identifier[start] . identifier[parent] ()
identifier[end] = identifier[m] . identifier[index] ( identifier[start] . identifier[row] (), identifier[item] . identifier[column_count] ()- literal[int] , identifier[parent] )
identifier[m] . identifier[dataChanged] . identifier[emit] ( identifier[start] , identifier[end] ) | def emit_data_changed(self):
"""Emit the data changed signal on the model of the treeitem
if the treeitem has a model.
:returns: None
:rtype: None
:raises: None
"""
item = self.get_treeitem()
m = item.get_model()
if m:
start = m.index_of_item(item)
parent = start.parent()
end = m.index(start.row(), item.column_count() - 1, parent)
m.dataChanged.emit(start, end) # depends on [control=['if'], data=[]] |
def profile_distribution(data):
"""
Compute the mean, standard deviation, min, quartile1, quartile2, quartile3, and max of a vector
Parameters
----------
data: array of real values
Returns
-------
features = dictionary containing the min, max, mean, and standard deviation
"""
if len(data) == 0:
return (data, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan)
else:
ddof = 1 if len(data) > 1 else 0
dist_mean = np.mean(data)
dist_stdev = np.std(data, ddof=ddof)
dist_min, dist_quartile1, dist_quartile2, dist_quartile3, dist_max = np.percentile(data, [0,25,50,75,100])
dist_skew = skew(data)
dist_kurtosis = kurtosis(data)
return (data, dist_mean, dist_stdev, dist_skew, dist_kurtosis, dist_min, dist_quartile1, dist_quartile2, dist_quartile3, dist_max) | def function[profile_distribution, parameter[data]]:
constant[
Compute the mean, standard deviation, min, quartile1, quartile2, quartile3, and max of a vector
Parameters
----------
data: array of real values
Returns
-------
features = dictionary containing the min, max, mean, and standard deviation
]
if compare[call[name[len], parameter[name[data]]] equal[==] constant[0]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b2594310>, <ast.Attribute object at 0x7da1b2597a60>, <ast.Attribute object at 0x7da1b2594fa0>, <ast.Attribute object at 0x7da1b2595ab0>, <ast.Attribute object at 0x7da1b2595270>, <ast.Attribute object at 0x7da1b25944f0>, <ast.Attribute object at 0x7da1b25975e0>, <ast.Attribute object at 0x7da1b2594880>, <ast.Attribute object at 0x7da1b2597b80>, <ast.Attribute object at 0x7da1b2595ba0>]]]
return[tuple[[<ast.Name object at 0x7da1b2405a80>, <ast.Name object at 0x7da1b2404a90>, <ast.Name object at 0x7da1b2407460>, <ast.Name object at 0x7da1b2405ab0>, <ast.Name object at 0x7da1b2404dc0>, <ast.Name object at 0x7da1b2406380>, <ast.Name object at 0x7da1b2406d70>, <ast.Name object at 0x7da1b2406890>, <ast.Name object at 0x7da1b2404640>, <ast.Name object at 0x7da1b2406830>]]] | keyword[def] identifier[profile_distribution] ( identifier[data] ):
literal[string]
keyword[if] identifier[len] ( identifier[data] )== literal[int] :
keyword[return] ( identifier[data] , identifier[np] . identifier[nan] , identifier[np] . identifier[nan] , identifier[np] . identifier[nan] , identifier[np] . identifier[nan] , identifier[np] . identifier[nan] , identifier[np] . identifier[nan] , identifier[np] . identifier[nan] , identifier[np] . identifier[nan] , identifier[np] . identifier[nan] )
keyword[else] :
identifier[ddof] = literal[int] keyword[if] identifier[len] ( identifier[data] )> literal[int] keyword[else] literal[int]
identifier[dist_mean] = identifier[np] . identifier[mean] ( identifier[data] )
identifier[dist_stdev] = identifier[np] . identifier[std] ( identifier[data] , identifier[ddof] = identifier[ddof] )
identifier[dist_min] , identifier[dist_quartile1] , identifier[dist_quartile2] , identifier[dist_quartile3] , identifier[dist_max] = identifier[np] . identifier[percentile] ( identifier[data] ,[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[dist_skew] = identifier[skew] ( identifier[data] )
identifier[dist_kurtosis] = identifier[kurtosis] ( identifier[data] )
keyword[return] ( identifier[data] , identifier[dist_mean] , identifier[dist_stdev] , identifier[dist_skew] , identifier[dist_kurtosis] , identifier[dist_min] , identifier[dist_quartile1] , identifier[dist_quartile2] , identifier[dist_quartile3] , identifier[dist_max] ) | def profile_distribution(data):
"""
Compute the mean, standard deviation, min, quartile1, quartile2, quartile3, and max of a vector
Parameters
----------
data: array of real values
Returns
-------
features = dictionary containing the min, max, mean, and standard deviation
"""
if len(data) == 0:
return (data, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan) # depends on [control=['if'], data=[]]
else:
ddof = 1 if len(data) > 1 else 0
dist_mean = np.mean(data)
dist_stdev = np.std(data, ddof=ddof)
(dist_min, dist_quartile1, dist_quartile2, dist_quartile3, dist_max) = np.percentile(data, [0, 25, 50, 75, 100])
dist_skew = skew(data)
dist_kurtosis = kurtosis(data)
return (data, dist_mean, dist_stdev, dist_skew, dist_kurtosis, dist_min, dist_quartile1, dist_quartile2, dist_quartile3, dist_max) |
def get_imageid(vm_):
'''
Returns the ImageId to use
'''
image = config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
)
if image.startswith('ami-'):
return image
# a poor man's cache
if not hasattr(get_imageid, 'images'):
get_imageid.images = {}
elif image in get_imageid.images:
return get_imageid.images[image]
params = {'Action': 'DescribeImages',
'Filter.0.Name': 'name',
'Filter.0.Value.0': image}
# Query AWS, sort by 'creationDate' and get the last imageId
_t = lambda x: datetime.datetime.strptime(x['creationDate'], '%Y-%m-%dT%H:%M:%S.%fZ')
image_id = sorted(aws.query(params, location=get_location(),
provider=get_provider(), opts=__opts__, sigver='4'),
lambda i, j: salt.utils.compat.cmp(_t(i), _t(j))
)[-1]['imageId']
get_imageid.images[image] = image_id
return image_id | def function[get_imageid, parameter[vm_]]:
constant[
Returns the ImageId to use
]
variable[image] assign[=] call[name[config].get_cloud_config_value, parameter[constant[image], name[vm_], name[__opts__]]]
if call[name[image].startswith, parameter[constant[ami-]]] begin[:]
return[name[image]]
if <ast.UnaryOp object at 0x7da20e9556c0> begin[:]
name[get_imageid].images assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18bc73700>, <ast.Constant object at 0x7da18bc721d0>, <ast.Constant object at 0x7da18bc71510>], [<ast.Constant object at 0x7da18bc70f70>, <ast.Constant object at 0x7da18bc736a0>, <ast.Name object at 0x7da18bc73100>]]
variable[_t] assign[=] <ast.Lambda object at 0x7da18bc71c00>
variable[image_id] assign[=] call[call[call[name[sorted], parameter[call[name[aws].query, parameter[name[params]]], <ast.Lambda object at 0x7da20cabe200>]]][<ast.UnaryOp object at 0x7da20cabecb0>]][constant[imageId]]
call[name[get_imageid].images][name[image]] assign[=] name[image_id]
return[name[image_id]] | keyword[def] identifier[get_imageid] ( identifier[vm_] ):
literal[string]
identifier[image] = identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__] , identifier[search_global] = keyword[False]
)
keyword[if] identifier[image] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[image]
keyword[if] keyword[not] identifier[hasattr] ( identifier[get_imageid] , literal[string] ):
identifier[get_imageid] . identifier[images] ={}
keyword[elif] identifier[image] keyword[in] identifier[get_imageid] . identifier[images] :
keyword[return] identifier[get_imageid] . identifier[images] [ identifier[image] ]
identifier[params] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[image] }
identifier[_t] = keyword[lambda] identifier[x] : identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[x] [ literal[string] ], literal[string] )
identifier[image_id] = identifier[sorted] ( identifier[aws] . identifier[query] ( identifier[params] , identifier[location] = identifier[get_location] (),
identifier[provider] = identifier[get_provider] (), identifier[opts] = identifier[__opts__] , identifier[sigver] = literal[string] ),
keyword[lambda] identifier[i] , identifier[j] : identifier[salt] . identifier[utils] . identifier[compat] . identifier[cmp] ( identifier[_t] ( identifier[i] ), identifier[_t] ( identifier[j] ))
)[- literal[int] ][ literal[string] ]
identifier[get_imageid] . identifier[images] [ identifier[image] ]= identifier[image_id]
keyword[return] identifier[image_id] | def get_imageid(vm_):
"""
Returns the ImageId to use
"""
image = config.get_cloud_config_value('image', vm_, __opts__, search_global=False)
if image.startswith('ami-'):
return image # depends on [control=['if'], data=[]]
# a poor man's cache
if not hasattr(get_imageid, 'images'):
get_imageid.images = {} # depends on [control=['if'], data=[]]
elif image in get_imageid.images:
return get_imageid.images[image] # depends on [control=['if'], data=['image']]
params = {'Action': 'DescribeImages', 'Filter.0.Name': 'name', 'Filter.0.Value.0': image}
# Query AWS, sort by 'creationDate' and get the last imageId
_t = lambda x: datetime.datetime.strptime(x['creationDate'], '%Y-%m-%dT%H:%M:%S.%fZ')
image_id = sorted(aws.query(params, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4'), lambda i, j: salt.utils.compat.cmp(_t(i), _t(j)))[-1]['imageId']
get_imageid.images[image] = image_id
return image_id |
def canonicalize(method, resource, query_parameters, headers):
"""Canonicalize method, resource
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the
signature will additionally contain the `x-goog-resumable`
header, and the method changed to POST. See the signed URL
docs regarding this flow:
https://cloud.google.com/storage/docs/access-control/signed-urls
:type resource: str
:param resource: A pointer to a specific resource
(typically, ``/bucket-name/path/to/blob.txt``).
:type query_parameters: dict
:param query_parameters:
(Optional) Additional query paramtersto be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers#query
:type headers: Union[dict|List(Tuple(str,str))]
:param headers:
(Optional) Additional HTTP headers to be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers
Requests using the signed URL *must* pass the specified header
(name and value) with each request for the URL.
:rtype: :class:_Canonical
:returns: Canonical method, resource, query_parameters, and headers.
"""
headers, _ = get_canonical_headers(headers)
if method == "RESUMABLE":
method = "POST"
headers.append("x-goog-resumable:start")
if query_parameters is None:
return _Canonical(method, resource, [], headers)
normalized_qp = sorted(
(key.lower(), value and value.strip() or "")
for key, value in query_parameters.items()
)
encoded_qp = six.moves.urllib.parse.urlencode(normalized_qp)
canonical_resource = "{}?{}".format(resource, encoded_qp)
return _Canonical(method, canonical_resource, normalized_qp, headers) | def function[canonicalize, parameter[method, resource, query_parameters, headers]]:
constant[Canonicalize method, resource
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the
signature will additionally contain the `x-goog-resumable`
header, and the method changed to POST. See the signed URL
docs regarding this flow:
https://cloud.google.com/storage/docs/access-control/signed-urls
:type resource: str
:param resource: A pointer to a specific resource
(typically, ``/bucket-name/path/to/blob.txt``).
:type query_parameters: dict
:param query_parameters:
(Optional) Additional query paramtersto be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers#query
:type headers: Union[dict|List(Tuple(str,str))]
:param headers:
(Optional) Additional HTTP headers to be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers
Requests using the signed URL *must* pass the specified header
(name and value) with each request for the URL.
:rtype: :class:_Canonical
:returns: Canonical method, resource, query_parameters, and headers.
]
<ast.Tuple object at 0x7da207f02650> assign[=] call[name[get_canonical_headers], parameter[name[headers]]]
if compare[name[method] equal[==] constant[RESUMABLE]] begin[:]
variable[method] assign[=] constant[POST]
call[name[headers].append, parameter[constant[x-goog-resumable:start]]]
if compare[name[query_parameters] is constant[None]] begin[:]
return[call[name[_Canonical], parameter[name[method], name[resource], list[[]], name[headers]]]]
variable[normalized_qp] assign[=] call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da204564760>]]
variable[encoded_qp] assign[=] call[name[six].moves.urllib.parse.urlencode, parameter[name[normalized_qp]]]
variable[canonical_resource] assign[=] call[constant[{}?{}].format, parameter[name[resource], name[encoded_qp]]]
return[call[name[_Canonical], parameter[name[method], name[canonical_resource], name[normalized_qp], name[headers]]]] | keyword[def] identifier[canonicalize] ( identifier[method] , identifier[resource] , identifier[query_parameters] , identifier[headers] ):
literal[string]
identifier[headers] , identifier[_] = identifier[get_canonical_headers] ( identifier[headers] )
keyword[if] identifier[method] == literal[string] :
identifier[method] = literal[string]
identifier[headers] . identifier[append] ( literal[string] )
keyword[if] identifier[query_parameters] keyword[is] keyword[None] :
keyword[return] identifier[_Canonical] ( identifier[method] , identifier[resource] ,[], identifier[headers] )
identifier[normalized_qp] = identifier[sorted] (
( identifier[key] . identifier[lower] (), identifier[value] keyword[and] identifier[value] . identifier[strip] () keyword[or] literal[string] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[query_parameters] . identifier[items] ()
)
identifier[encoded_qp] = identifier[six] . identifier[moves] . identifier[urllib] . identifier[parse] . identifier[urlencode] ( identifier[normalized_qp] )
identifier[canonical_resource] = literal[string] . identifier[format] ( identifier[resource] , identifier[encoded_qp] )
keyword[return] identifier[_Canonical] ( identifier[method] , identifier[canonical_resource] , identifier[normalized_qp] , identifier[headers] ) | def canonicalize(method, resource, query_parameters, headers):
"""Canonicalize method, resource
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the
signature will additionally contain the `x-goog-resumable`
header, and the method changed to POST. See the signed URL
docs regarding this flow:
https://cloud.google.com/storage/docs/access-control/signed-urls
:type resource: str
:param resource: A pointer to a specific resource
(typically, ``/bucket-name/path/to/blob.txt``).
:type query_parameters: dict
:param query_parameters:
(Optional) Additional query paramtersto be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers#query
:type headers: Union[dict|List(Tuple(str,str))]
:param headers:
(Optional) Additional HTTP headers to be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers
Requests using the signed URL *must* pass the specified header
(name and value) with each request for the URL.
:rtype: :class:_Canonical
:returns: Canonical method, resource, query_parameters, and headers.
"""
(headers, _) = get_canonical_headers(headers)
if method == 'RESUMABLE':
method = 'POST'
headers.append('x-goog-resumable:start') # depends on [control=['if'], data=['method']]
if query_parameters is None:
return _Canonical(method, resource, [], headers) # depends on [control=['if'], data=[]]
normalized_qp = sorted(((key.lower(), value and value.strip() or '') for (key, value) in query_parameters.items()))
encoded_qp = six.moves.urllib.parse.urlencode(normalized_qp)
canonical_resource = '{}?{}'.format(resource, encoded_qp)
return _Canonical(method, canonical_resource, normalized_qp, headers) |
def put_logs(self, request):
""" Put logs to log service. up to 512000 logs up to 10MB size
Unsuccessful opertaion will cause an LogException.
:type request: PutLogsRequest
:param request: the PutLogs request parameters class
:return: PutLogsResponse
:raise: LogException
"""
if len(request.get_log_items()) > 512000:
raise LogException('InvalidLogSize',
"logItems' length exceeds maximum limitation: 512000 lines. now: {0}".format(
len(request.get_log_items())))
logGroup = LogGroup()
logGroup.Topic = request.get_topic()
if request.get_source():
logGroup.Source = request.get_source()
else:
if self._source == '127.0.0.1':
self._source = Util.get_host_ip(request.get_project() + '.' + self._logHost)
logGroup.Source = self._source
for logItem in request.get_log_items():
log = logGroup.Logs.add()
log.Time = logItem.get_time()
contents = logItem.get_contents()
for key, value in contents:
content = log.Contents.add()
content.Key = self._get_unicode(key)
content.Value = self._get_binary(value)
if request.get_log_tags() is not None:
tags = request.get_log_tags()
for key, value in tags:
pb_tag = logGroup.LogTags.add()
pb_tag.Key = key
pb_tag.Value = value
body = logGroup.SerializeToString()
if len(body) > 10 * 1024 * 1024: # 10 MB
raise LogException('InvalidLogSize',
"logItems' size exceeds maximum limitation: 10 MB. now: {0} MB.".format(
len(body) / 1024.0 / 1024))
headers = {'x-log-bodyrawsize': str(len(body)), 'Content-Type': 'application/x-protobuf'}
is_compress = request.get_compress()
compress_data = None
if is_compress:
if lz4:
headers['x-log-compresstype'] = 'lz4'
compress_data = lz_compresss(body)
else:
headers['x-log-compresstype'] = 'deflate'
compress_data = zlib.compress(body)
params = {}
logstore = request.get_logstore()
project = request.get_project()
if request.get_hash_key() is not None:
resource = '/logstores/' + logstore + "/shards/route"
params["key"] = request.get_hash_key()
else:
resource = '/logstores/' + logstore + "/shards/lb"
if is_compress:
(resp, header) = self._send('POST', project, compress_data, resource, params, headers)
else:
(resp, header) = self._send('POST', project, body, resource, params, headers)
return PutLogsResponse(header, resp) | def function[put_logs, parameter[self, request]]:
constant[ Put logs to log service. up to 512000 logs up to 10MB size
Unsuccessful opertaion will cause an LogException.
:type request: PutLogsRequest
:param request: the PutLogs request parameters class
:return: PutLogsResponse
:raise: LogException
]
if compare[call[name[len], parameter[call[name[request].get_log_items, parameter[]]]] greater[>] constant[512000]] begin[:]
<ast.Raise object at 0x7da2047ebb20>
variable[logGroup] assign[=] call[name[LogGroup], parameter[]]
name[logGroup].Topic assign[=] call[name[request].get_topic, parameter[]]
if call[name[request].get_source, parameter[]] begin[:]
name[logGroup].Source assign[=] call[name[request].get_source, parameter[]]
for taget[name[logItem]] in starred[call[name[request].get_log_items, parameter[]]] begin[:]
variable[log] assign[=] call[name[logGroup].Logs.add, parameter[]]
name[log].Time assign[=] call[name[logItem].get_time, parameter[]]
variable[contents] assign[=] call[name[logItem].get_contents, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b08b8340>, <ast.Name object at 0x7da1b08b8400>]]] in starred[name[contents]] begin[:]
variable[content] assign[=] call[name[log].Contents.add, parameter[]]
name[content].Key assign[=] call[name[self]._get_unicode, parameter[name[key]]]
name[content].Value assign[=] call[name[self]._get_binary, parameter[name[value]]]
if compare[call[name[request].get_log_tags, parameter[]] is_not constant[None]] begin[:]
variable[tags] assign[=] call[name[request].get_log_tags, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b08baa40>, <ast.Name object at 0x7da1b08ba9e0>]]] in starred[name[tags]] begin[:]
variable[pb_tag] assign[=] call[name[logGroup].LogTags.add, parameter[]]
name[pb_tag].Key assign[=] name[key]
name[pb_tag].Value assign[=] name[value]
variable[body] assign[=] call[name[logGroup].SerializeToString, parameter[]]
if compare[call[name[len], parameter[name[body]]] greater[>] binary_operation[binary_operation[constant[10] * constant[1024]] * constant[1024]]] begin[:]
<ast.Raise object at 0x7da1b08ba680>
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b08ba650>, <ast.Constant object at 0x7da1b08ba8c0>], [<ast.Call object at 0x7da1b08ba4a0>, <ast.Constant object at 0x7da1b08ba950>]]
variable[is_compress] assign[=] call[name[request].get_compress, parameter[]]
variable[compress_data] assign[=] constant[None]
if name[is_compress] begin[:]
if name[lz4] begin[:]
call[name[headers]][constant[x-log-compresstype]] assign[=] constant[lz4]
variable[compress_data] assign[=] call[name[lz_compresss], parameter[name[body]]]
variable[params] assign[=] dictionary[[], []]
variable[logstore] assign[=] call[name[request].get_logstore, parameter[]]
variable[project] assign[=] call[name[request].get_project, parameter[]]
if compare[call[name[request].get_hash_key, parameter[]] is_not constant[None]] begin[:]
variable[resource] assign[=] binary_operation[binary_operation[constant[/logstores/] + name[logstore]] + constant[/shards/route]]
call[name[params]][constant[key]] assign[=] call[name[request].get_hash_key, parameter[]]
if name[is_compress] begin[:]
<ast.Tuple object at 0x7da1b08b9e10> assign[=] call[name[self]._send, parameter[constant[POST], name[project], name[compress_data], name[resource], name[params], name[headers]]]
return[call[name[PutLogsResponse], parameter[name[header], name[resp]]]] | keyword[def] identifier[put_logs] ( identifier[self] , identifier[request] ):
literal[string]
keyword[if] identifier[len] ( identifier[request] . identifier[get_log_items] ())> literal[int] :
keyword[raise] identifier[LogException] ( literal[string] ,
literal[string] . identifier[format] (
identifier[len] ( identifier[request] . identifier[get_log_items] ())))
identifier[logGroup] = identifier[LogGroup] ()
identifier[logGroup] . identifier[Topic] = identifier[request] . identifier[get_topic] ()
keyword[if] identifier[request] . identifier[get_source] ():
identifier[logGroup] . identifier[Source] = identifier[request] . identifier[get_source] ()
keyword[else] :
keyword[if] identifier[self] . identifier[_source] == literal[string] :
identifier[self] . identifier[_source] = identifier[Util] . identifier[get_host_ip] ( identifier[request] . identifier[get_project] ()+ literal[string] + identifier[self] . identifier[_logHost] )
identifier[logGroup] . identifier[Source] = identifier[self] . identifier[_source]
keyword[for] identifier[logItem] keyword[in] identifier[request] . identifier[get_log_items] ():
identifier[log] = identifier[logGroup] . identifier[Logs] . identifier[add] ()
identifier[log] . identifier[Time] = identifier[logItem] . identifier[get_time] ()
identifier[contents] = identifier[logItem] . identifier[get_contents] ()
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[contents] :
identifier[content] = identifier[log] . identifier[Contents] . identifier[add] ()
identifier[content] . identifier[Key] = identifier[self] . identifier[_get_unicode] ( identifier[key] )
identifier[content] . identifier[Value] = identifier[self] . identifier[_get_binary] ( identifier[value] )
keyword[if] identifier[request] . identifier[get_log_tags] () keyword[is] keyword[not] keyword[None] :
identifier[tags] = identifier[request] . identifier[get_log_tags] ()
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[tags] :
identifier[pb_tag] = identifier[logGroup] . identifier[LogTags] . identifier[add] ()
identifier[pb_tag] . identifier[Key] = identifier[key]
identifier[pb_tag] . identifier[Value] = identifier[value]
identifier[body] = identifier[logGroup] . identifier[SerializeToString] ()
keyword[if] identifier[len] ( identifier[body] )> literal[int] * literal[int] * literal[int] :
keyword[raise] identifier[LogException] ( literal[string] ,
literal[string] . identifier[format] (
identifier[len] ( identifier[body] )/ literal[int] / literal[int] ))
identifier[headers] ={ literal[string] : identifier[str] ( identifier[len] ( identifier[body] )), literal[string] : literal[string] }
identifier[is_compress] = identifier[request] . identifier[get_compress] ()
identifier[compress_data] = keyword[None]
keyword[if] identifier[is_compress] :
keyword[if] identifier[lz4] :
identifier[headers] [ literal[string] ]= literal[string]
identifier[compress_data] = identifier[lz_compresss] ( identifier[body] )
keyword[else] :
identifier[headers] [ literal[string] ]= literal[string]
identifier[compress_data] = identifier[zlib] . identifier[compress] ( identifier[body] )
identifier[params] ={}
identifier[logstore] = identifier[request] . identifier[get_logstore] ()
identifier[project] = identifier[request] . identifier[get_project] ()
keyword[if] identifier[request] . identifier[get_hash_key] () keyword[is] keyword[not] keyword[None] :
identifier[resource] = literal[string] + identifier[logstore] + literal[string]
identifier[params] [ literal[string] ]= identifier[request] . identifier[get_hash_key] ()
keyword[else] :
identifier[resource] = literal[string] + identifier[logstore] + literal[string]
keyword[if] identifier[is_compress] :
( identifier[resp] , identifier[header] )= identifier[self] . identifier[_send] ( literal[string] , identifier[project] , identifier[compress_data] , identifier[resource] , identifier[params] , identifier[headers] )
keyword[else] :
( identifier[resp] , identifier[header] )= identifier[self] . identifier[_send] ( literal[string] , identifier[project] , identifier[body] , identifier[resource] , identifier[params] , identifier[headers] )
keyword[return] identifier[PutLogsResponse] ( identifier[header] , identifier[resp] ) | def put_logs(self, request):
""" Put logs to log service. up to 512000 logs up to 10MB size
Unsuccessful opertaion will cause an LogException.
:type request: PutLogsRequest
:param request: the PutLogs request parameters class
:return: PutLogsResponse
:raise: LogException
"""
if len(request.get_log_items()) > 512000:
raise LogException('InvalidLogSize', "logItems' length exceeds maximum limitation: 512000 lines. now: {0}".format(len(request.get_log_items()))) # depends on [control=['if'], data=[]]
logGroup = LogGroup()
logGroup.Topic = request.get_topic()
if request.get_source():
logGroup.Source = request.get_source() # depends on [control=['if'], data=[]]
else:
if self._source == '127.0.0.1':
self._source = Util.get_host_ip(request.get_project() + '.' + self._logHost) # depends on [control=['if'], data=[]]
logGroup.Source = self._source
for logItem in request.get_log_items():
log = logGroup.Logs.add()
log.Time = logItem.get_time()
contents = logItem.get_contents()
for (key, value) in contents:
content = log.Contents.add()
content.Key = self._get_unicode(key)
content.Value = self._get_binary(value) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['logItem']]
if request.get_log_tags() is not None:
tags = request.get_log_tags()
for (key, value) in tags:
pb_tag = logGroup.LogTags.add()
pb_tag.Key = key
pb_tag.Value = value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
body = logGroup.SerializeToString()
if len(body) > 10 * 1024 * 1024: # 10 MB
raise LogException('InvalidLogSize', "logItems' size exceeds maximum limitation: 10 MB. now: {0} MB.".format(len(body) / 1024.0 / 1024)) # depends on [control=['if'], data=[]]
headers = {'x-log-bodyrawsize': str(len(body)), 'Content-Type': 'application/x-protobuf'}
is_compress = request.get_compress()
compress_data = None
if is_compress:
if lz4:
headers['x-log-compresstype'] = 'lz4'
compress_data = lz_compresss(body) # depends on [control=['if'], data=[]]
else:
headers['x-log-compresstype'] = 'deflate'
compress_data = zlib.compress(body) # depends on [control=['if'], data=[]]
params = {}
logstore = request.get_logstore()
project = request.get_project()
if request.get_hash_key() is not None:
resource = '/logstores/' + logstore + '/shards/route'
params['key'] = request.get_hash_key() # depends on [control=['if'], data=[]]
else:
resource = '/logstores/' + logstore + '/shards/lb'
if is_compress:
(resp, header) = self._send('POST', project, compress_data, resource, params, headers) # depends on [control=['if'], data=[]]
else:
(resp, header) = self._send('POST', project, body, resource, params, headers)
return PutLogsResponse(header, resp) |
def get_scale_fac(fig, fiducial_width=8, fiducial_height=7):
"""Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`).
"""
width, height = fig.get_size_inches()
return (width*height/(fiducial_width*fiducial_height))**0.5 | def function[get_scale_fac, parameter[fig, fiducial_width, fiducial_height]]:
constant[Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`).
]
<ast.Tuple object at 0x7da18f812d40> assign[=] call[name[fig].get_size_inches, parameter[]]
return[binary_operation[binary_operation[binary_operation[name[width] * name[height]] / binary_operation[name[fiducial_width] * name[fiducial_height]]] ** constant[0.5]]] | keyword[def] identifier[get_scale_fac] ( identifier[fig] , identifier[fiducial_width] = literal[int] , identifier[fiducial_height] = literal[int] ):
literal[string]
identifier[width] , identifier[height] = identifier[fig] . identifier[get_size_inches] ()
keyword[return] ( identifier[width] * identifier[height] /( identifier[fiducial_width] * identifier[fiducial_height] ))** literal[int] | def get_scale_fac(fig, fiducial_width=8, fiducial_height=7):
"""Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`).
"""
(width, height) = fig.get_size_inches()
return (width * height / (fiducial_width * fiducial_height)) ** 0.5 |
def parse_specification(spec):
"""
Parse a requirement from a python distribution metadata and return a
tuple with name, extras, constraints, marker and url components.
This method does not enforce strict specifications but extracts the
information which is assumed to be *correct*. As such no errors are raised.
Example
-------
spec = 'requests[security, tests] >=3.3.0 ; foo >= 2.7 or bar == 1'
('requests', ['security', 'pyfoo'], '>=3.3.0', 'foo >= 2.7 or bar == 1', '')
"""
name, extras, const = spec, [], ''
# Remove excess whitespace
spec = ' '.join(p for p in spec.split(' ') if p).strip()
# Extract marker (Assumes that there can only be one ';' inside the spec)
spec, marker = split_spec(spec, ';')
# Extract url (Assumes that there can only be one '@' inside the spec)
spec, url = split_spec(spec, '@')
# Find name, extras and constraints
r = PARTIAL_PYPI_SPEC_PATTERN.match(spec)
if r:
# Normalize name
name = r.group('name')
# Clean extras
extras = r.group('extras')
extras = [e.strip() for e in extras.split(',') if e] if extras else []
# Clean constraints
const = r.group('constraints')
const = ''.join(c for c in const.split(' ') if c).strip()
if const.startswith('(') and const.endswith(')'):
# Remove parens
const = const[1:-1]
return name, extras, const, marker, url | def function[parse_specification, parameter[spec]]:
constant[
Parse a requirement from a python distribution metadata and return a
tuple with name, extras, constraints, marker and url components.
This method does not enforce strict specifications but extracts the
information which is assumed to be *correct*. As such no errors are raised.
Example
-------
spec = 'requests[security, tests] >=3.3.0 ; foo >= 2.7 or bar == 1'
('requests', ['security', 'pyfoo'], '>=3.3.0', 'foo >= 2.7 or bar == 1', '')
]
<ast.Tuple object at 0x7da1b083d990> assign[=] tuple[[<ast.Name object at 0x7da1b083c430>, <ast.List object at 0x7da1b083c820>, <ast.Constant object at 0x7da1b083c8b0>]]
variable[spec] assign[=] call[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da1b083db70>]].strip, parameter[]]
<ast.Tuple object at 0x7da1b083c6a0> assign[=] call[name[split_spec], parameter[name[spec], constant[;]]]
<ast.Tuple object at 0x7da1b083db40> assign[=] call[name[split_spec], parameter[name[spec], constant[@]]]
variable[r] assign[=] call[name[PARTIAL_PYPI_SPEC_PATTERN].match, parameter[name[spec]]]
if name[r] begin[:]
variable[name] assign[=] call[name[r].group, parameter[constant[name]]]
variable[extras] assign[=] call[name[r].group, parameter[constant[extras]]]
variable[extras] assign[=] <ast.IfExp object at 0x7da1b083c070>
variable[const] assign[=] call[name[r].group, parameter[constant[constraints]]]
variable[const] assign[=] call[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da1b0888fd0>]].strip, parameter[]]
if <ast.BoolOp object at 0x7da1b088abc0> begin[:]
variable[const] assign[=] call[name[const]][<ast.Slice object at 0x7da1b088a290>]
return[tuple[[<ast.Name object at 0x7da1b08451e0>, <ast.Name object at 0x7da1b08456c0>, <ast.Name object at 0x7da1b08479a0>, <ast.Name object at 0x7da1b0846050>, <ast.Name object at 0x7da1b0847a90>]]] | keyword[def] identifier[parse_specification] ( identifier[spec] ):
literal[string]
identifier[name] , identifier[extras] , identifier[const] = identifier[spec] ,[], literal[string]
identifier[spec] = literal[string] . identifier[join] ( identifier[p] keyword[for] identifier[p] keyword[in] identifier[spec] . identifier[split] ( literal[string] ) keyword[if] identifier[p] ). identifier[strip] ()
identifier[spec] , identifier[marker] = identifier[split_spec] ( identifier[spec] , literal[string] )
identifier[spec] , identifier[url] = identifier[split_spec] ( identifier[spec] , literal[string] )
identifier[r] = identifier[PARTIAL_PYPI_SPEC_PATTERN] . identifier[match] ( identifier[spec] )
keyword[if] identifier[r] :
identifier[name] = identifier[r] . identifier[group] ( literal[string] )
identifier[extras] = identifier[r] . identifier[group] ( literal[string] )
identifier[extras] =[ identifier[e] . identifier[strip] () keyword[for] identifier[e] keyword[in] identifier[extras] . identifier[split] ( literal[string] ) keyword[if] identifier[e] ] keyword[if] identifier[extras] keyword[else] []
identifier[const] = identifier[r] . identifier[group] ( literal[string] )
identifier[const] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[const] . identifier[split] ( literal[string] ) keyword[if] identifier[c] ). identifier[strip] ()
keyword[if] identifier[const] . identifier[startswith] ( literal[string] ) keyword[and] identifier[const] . identifier[endswith] ( literal[string] ):
identifier[const] = identifier[const] [ literal[int] :- literal[int] ]
keyword[return] identifier[name] , identifier[extras] , identifier[const] , identifier[marker] , identifier[url] | def parse_specification(spec):
"""
Parse a requirement from a python distribution metadata and return a
tuple with name, extras, constraints, marker and url components.
This method does not enforce strict specifications but extracts the
information which is assumed to be *correct*. As such no errors are raised.
Example
-------
spec = 'requests[security, tests] >=3.3.0 ; foo >= 2.7 or bar == 1'
('requests', ['security', 'pyfoo'], '>=3.3.0', 'foo >= 2.7 or bar == 1', '')
"""
(name, extras, const) = (spec, [], '')
# Remove excess whitespace
spec = ' '.join((p for p in spec.split(' ') if p)).strip()
# Extract marker (Assumes that there can only be one ';' inside the spec)
(spec, marker) = split_spec(spec, ';')
# Extract url (Assumes that there can only be one '@' inside the spec)
(spec, url) = split_spec(spec, '@')
# Find name, extras and constraints
r = PARTIAL_PYPI_SPEC_PATTERN.match(spec)
if r:
# Normalize name
name = r.group('name')
# Clean extras
extras = r.group('extras')
extras = [e.strip() for e in extras.split(',') if e] if extras else []
# Clean constraints
const = r.group('constraints')
const = ''.join((c for c in const.split(' ') if c)).strip()
if const.startswith('(') and const.endswith(')'):
# Remove parens
const = const[1:-1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return (name, extras, const, marker, url) |
def set_dtype(self, opt, dtype):
"""Set the `dtype` attribute. If opt['DataType'] has a value
other than None, it overrides the `dtype` parameter of this
method. No changes are made if the `dtype` attribute already
exists and has a value other than 'None'.
Parameters
----------
opt : :class:`cdict.ConstrainedDict` object
Algorithm options
dtype : data-type
Data type for working variables (overridden by 'DataType' option)
"""
# Take no action of self.dtype exists and is not None
if not hasattr(self, 'dtype') or self.dtype is None:
# DataType option overrides explicitly specified data type
if opt['DataType'] is None:
self.dtype = dtype
else:
self.dtype = np.dtype(opt['DataType']) | def function[set_dtype, parameter[self, opt, dtype]]:
constant[Set the `dtype` attribute. If opt['DataType'] has a value
other than None, it overrides the `dtype` parameter of this
method. No changes are made if the `dtype` attribute already
exists and has a value other than 'None'.
Parameters
----------
opt : :class:`cdict.ConstrainedDict` object
Algorithm options
dtype : data-type
Data type for working variables (overridden by 'DataType' option)
]
if <ast.BoolOp object at 0x7da1b074cca0> begin[:]
if compare[call[name[opt]][constant[DataType]] is constant[None]] begin[:]
name[self].dtype assign[=] name[dtype] | keyword[def] identifier[set_dtype] ( identifier[self] , identifier[opt] , identifier[dtype] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[or] identifier[self] . identifier[dtype] keyword[is] keyword[None] :
keyword[if] identifier[opt] [ literal[string] ] keyword[is] keyword[None] :
identifier[self] . identifier[dtype] = identifier[dtype]
keyword[else] :
identifier[self] . identifier[dtype] = identifier[np] . identifier[dtype] ( identifier[opt] [ literal[string] ]) | def set_dtype(self, opt, dtype):
"""Set the `dtype` attribute. If opt['DataType'] has a value
other than None, it overrides the `dtype` parameter of this
method. No changes are made if the `dtype` attribute already
exists and has a value other than 'None'.
Parameters
----------
opt : :class:`cdict.ConstrainedDict` object
Algorithm options
dtype : data-type
Data type for working variables (overridden by 'DataType' option)
"""
# Take no action of self.dtype exists and is not None
if not hasattr(self, 'dtype') or self.dtype is None:
# DataType option overrides explicitly specified data type
if opt['DataType'] is None:
self.dtype = dtype # depends on [control=['if'], data=[]]
else:
self.dtype = np.dtype(opt['DataType']) # depends on [control=['if'], data=[]] |
def enable(soft_fail=False):
"""
Enable ufw
:param soft_fail: If set to True silently disables IPv6 support in ufw,
otherwise a UFWIPv6Error exception is raised when IP6
support is broken.
:returns: True if ufw is successfully enabled
"""
if is_enabled():
return True
if not is_ipv6_ok(soft_fail):
disable_ipv6()
output = subprocess.check_output(['ufw', 'enable'],
universal_newlines=True,
env={'LANG': 'en_US',
'PATH': os.environ['PATH']})
m = re.findall('^Firewall is active and enabled on system startup\n',
output, re.M)
hookenv.log(output, level='DEBUG')
if len(m) == 0:
hookenv.log("ufw couldn't be enabled", level='WARN')
return False
else:
hookenv.log("ufw enabled", level='INFO')
return True | def function[enable, parameter[soft_fail]]:
constant[
Enable ufw
:param soft_fail: If set to True silently disables IPv6 support in ufw,
otherwise a UFWIPv6Error exception is raised when IP6
support is broken.
:returns: True if ufw is successfully enabled
]
if call[name[is_enabled], parameter[]] begin[:]
return[constant[True]]
if <ast.UnaryOp object at 0x7da18fe90c70> begin[:]
call[name[disable_ipv6], parameter[]]
variable[output] assign[=] call[name[subprocess].check_output, parameter[list[[<ast.Constant object at 0x7da18fe92800>, <ast.Constant object at 0x7da18fe93070>]]]]
variable[m] assign[=] call[name[re].findall, parameter[constant[^Firewall is active and enabled on system startup
], name[output], name[re].M]]
call[name[hookenv].log, parameter[name[output]]]
if compare[call[name[len], parameter[name[m]]] equal[==] constant[0]] begin[:]
call[name[hookenv].log, parameter[constant[ufw couldn't be enabled]]]
return[constant[False]] | keyword[def] identifier[enable] ( identifier[soft_fail] = keyword[False] ):
literal[string]
keyword[if] identifier[is_enabled] ():
keyword[return] keyword[True]
keyword[if] keyword[not] identifier[is_ipv6_ok] ( identifier[soft_fail] ):
identifier[disable_ipv6] ()
identifier[output] = identifier[subprocess] . identifier[check_output] ([ literal[string] , literal[string] ],
identifier[universal_newlines] = keyword[True] ,
identifier[env] ={ literal[string] : literal[string] ,
literal[string] : identifier[os] . identifier[environ] [ literal[string] ]})
identifier[m] = identifier[re] . identifier[findall] ( literal[string] ,
identifier[output] , identifier[re] . identifier[M] )
identifier[hookenv] . identifier[log] ( identifier[output] , identifier[level] = literal[string] )
keyword[if] identifier[len] ( identifier[m] )== literal[int] :
identifier[hookenv] . identifier[log] ( literal[string] , identifier[level] = literal[string] )
keyword[return] keyword[False]
keyword[else] :
identifier[hookenv] . identifier[log] ( literal[string] , identifier[level] = literal[string] )
keyword[return] keyword[True] | def enable(soft_fail=False):
"""
Enable ufw
:param soft_fail: If set to True silently disables IPv6 support in ufw,
otherwise a UFWIPv6Error exception is raised when IP6
support is broken.
:returns: True if ufw is successfully enabled
"""
if is_enabled():
return True # depends on [control=['if'], data=[]]
if not is_ipv6_ok(soft_fail):
disable_ipv6() # depends on [control=['if'], data=[]]
output = subprocess.check_output(['ufw', 'enable'], universal_newlines=True, env={'LANG': 'en_US', 'PATH': os.environ['PATH']})
m = re.findall('^Firewall is active and enabled on system startup\n', output, re.M)
hookenv.log(output, level='DEBUG')
if len(m) == 0:
hookenv.log("ufw couldn't be enabled", level='WARN')
return False # depends on [control=['if'], data=[]]
else:
hookenv.log('ufw enabled', level='INFO')
return True |
def _import_config(filepath):
"""
Imports filetree and root_path variable values from the filepath.
:param filepath:
:return: root_path and filetree
"""
if not op.isfile(filepath):
raise IOError('Data config file not found. '
'Got: {0}'.format(filepath))
cfg = import_pyfile(filepath)
if not hasattr(cfg, 'root_path'):
raise KeyError('Config file root_path key not found.')
if not hasattr(cfg, 'filetree'):
raise KeyError('Config file filetree key not found.')
return cfg.root_path, cfg.filetree | def function[_import_config, parameter[filepath]]:
constant[
Imports filetree and root_path variable values from the filepath.
:param filepath:
:return: root_path and filetree
]
if <ast.UnaryOp object at 0x7da1b004d4e0> begin[:]
<ast.Raise object at 0x7da1b004c130>
variable[cfg] assign[=] call[name[import_pyfile], parameter[name[filepath]]]
if <ast.UnaryOp object at 0x7da1b004feb0> begin[:]
<ast.Raise object at 0x7da1b004d090>
if <ast.UnaryOp object at 0x7da1b004cd60> begin[:]
<ast.Raise object at 0x7da1b004c250>
return[tuple[[<ast.Attribute object at 0x7da1b004fe80>, <ast.Attribute object at 0x7da1b004f040>]]] | keyword[def] identifier[_import_config] ( identifier[filepath] ):
literal[string]
keyword[if] keyword[not] identifier[op] . identifier[isfile] ( identifier[filepath] ):
keyword[raise] identifier[IOError] ( literal[string]
literal[string] . identifier[format] ( identifier[filepath] ))
identifier[cfg] = identifier[import_pyfile] ( identifier[filepath] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[cfg] , literal[string] ):
keyword[raise] identifier[KeyError] ( literal[string] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[cfg] , literal[string] ):
keyword[raise] identifier[KeyError] ( literal[string] )
keyword[return] identifier[cfg] . identifier[root_path] , identifier[cfg] . identifier[filetree] | def _import_config(filepath):
"""
Imports filetree and root_path variable values from the filepath.
:param filepath:
:return: root_path and filetree
"""
if not op.isfile(filepath):
raise IOError('Data config file not found. Got: {0}'.format(filepath)) # depends on [control=['if'], data=[]]
cfg = import_pyfile(filepath)
if not hasattr(cfg, 'root_path'):
raise KeyError('Config file root_path key not found.') # depends on [control=['if'], data=[]]
if not hasattr(cfg, 'filetree'):
raise KeyError('Config file filetree key not found.') # depends on [control=['if'], data=[]]
return (cfg.root_path, cfg.filetree) |
def _sync_directories(from_directory, to_directory):
"""Sync to_directory with from_directory by copying each file in
to_directory with new contents. Files in to_directory will be
overwritten by files of the same name in from_directory. We need to
keep two copies of the log directory because otherwise TensorBoard
picks up temp files from `aws s3 sync` and then stops reading the
correct tfevent files. We walk the directory and copy each file
individually because the directory that TensorBoard watches needs to
always exist.
Args:
from_directory (str): The directory with updated files.
to_directory (str): The directory to be synced.
"""
if not os.path.exists(to_directory):
os.mkdir(to_directory)
for root, dirs, files in os.walk(from_directory):
to_root = root.replace(from_directory, to_directory)
for directory in dirs:
to_child_dir = os.path.join(to_root, directory)
if not os.path.exists(to_child_dir):
os.mkdir(to_child_dir)
for fname in files:
from_file = os.path.join(root, fname)
to_file = os.path.join(to_root, fname)
with open(from_file, 'rb') as a, open(to_file, 'wb') as b:
b.write(a.read()) | def function[_sync_directories, parameter[from_directory, to_directory]]:
constant[Sync to_directory with from_directory by copying each file in
to_directory with new contents. Files in to_directory will be
overwritten by files of the same name in from_directory. We need to
keep two copies of the log directory because otherwise TensorBoard
picks up temp files from `aws s3 sync` and then stops reading the
correct tfevent files. We walk the directory and copy each file
individually because the directory that TensorBoard watches needs to
always exist.
Args:
from_directory (str): The directory with updated files.
to_directory (str): The directory to be synced.
]
if <ast.UnaryOp object at 0x7da1b1c1acb0> begin[:]
call[name[os].mkdir, parameter[name[to_directory]]]
for taget[tuple[[<ast.Name object at 0x7da1b1c1ab00>, <ast.Name object at 0x7da1b1c1b130>, <ast.Name object at 0x7da1b1c18e50>]]] in starred[call[name[os].walk, parameter[name[from_directory]]]] begin[:]
variable[to_root] assign[=] call[name[root].replace, parameter[name[from_directory], name[to_directory]]]
for taget[name[directory]] in starred[name[dirs]] begin[:]
variable[to_child_dir] assign[=] call[name[os].path.join, parameter[name[to_root], name[directory]]]
if <ast.UnaryOp object at 0x7da1b1c18970> begin[:]
call[name[os].mkdir, parameter[name[to_child_dir]]]
for taget[name[fname]] in starred[name[files]] begin[:]
variable[from_file] assign[=] call[name[os].path.join, parameter[name[root], name[fname]]]
variable[to_file] assign[=] call[name[os].path.join, parameter[name[to_root], name[fname]]]
with call[name[open], parameter[name[from_file], constant[rb]]] begin[:]
call[name[b].write, parameter[call[name[a].read, parameter[]]]] | keyword[def] identifier[_sync_directories] ( identifier[from_directory] , identifier[to_directory] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[to_directory] ):
identifier[os] . identifier[mkdir] ( identifier[to_directory] )
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[from_directory] ):
identifier[to_root] = identifier[root] . identifier[replace] ( identifier[from_directory] , identifier[to_directory] )
keyword[for] identifier[directory] keyword[in] identifier[dirs] :
identifier[to_child_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[to_root] , identifier[directory] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[to_child_dir] ):
identifier[os] . identifier[mkdir] ( identifier[to_child_dir] )
keyword[for] identifier[fname] keyword[in] identifier[files] :
identifier[from_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[fname] )
identifier[to_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[to_root] , identifier[fname] )
keyword[with] identifier[open] ( identifier[from_file] , literal[string] ) keyword[as] identifier[a] , identifier[open] ( identifier[to_file] , literal[string] ) keyword[as] identifier[b] :
identifier[b] . identifier[write] ( identifier[a] . identifier[read] ()) | def _sync_directories(from_directory, to_directory):
"""Sync to_directory with from_directory by copying each file in
to_directory with new contents. Files in to_directory will be
overwritten by files of the same name in from_directory. We need to
keep two copies of the log directory because otherwise TensorBoard
picks up temp files from `aws s3 sync` and then stops reading the
correct tfevent files. We walk the directory and copy each file
individually because the directory that TensorBoard watches needs to
always exist.
Args:
from_directory (str): The directory with updated files.
to_directory (str): The directory to be synced.
"""
if not os.path.exists(to_directory):
os.mkdir(to_directory) # depends on [control=['if'], data=[]]
for (root, dirs, files) in os.walk(from_directory):
to_root = root.replace(from_directory, to_directory)
for directory in dirs:
to_child_dir = os.path.join(to_root, directory)
if not os.path.exists(to_child_dir):
os.mkdir(to_child_dir) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['directory']]
for fname in files:
from_file = os.path.join(root, fname)
to_file = os.path.join(to_root, fname)
with open(from_file, 'rb') as a, open(to_file, 'wb') as b:
b.write(a.read()) # depends on [control=['with'], data=['a']] # depends on [control=['for'], data=['fname']] # depends on [control=['for'], data=[]] |
def _get_filesystem_path(self, url_path, basedir=settings.MEDIA_ROOT):
"""Makes a filesystem path from the specified URL path"""
if url_path.startswith(settings.MEDIA_URL):
url_path = url_path[len(settings.MEDIA_URL):] # strip media root url
return os.path.normpath(os.path.join(basedir, url2pathname(url_path))) | def function[_get_filesystem_path, parameter[self, url_path, basedir]]:
constant[Makes a filesystem path from the specified URL path]
if call[name[url_path].startswith, parameter[name[settings].MEDIA_URL]] begin[:]
variable[url_path] assign[=] call[name[url_path]][<ast.Slice object at 0x7da1b28d6ec0>]
return[call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[name[basedir], call[name[url2pathname], parameter[name[url_path]]]]]]]] | keyword[def] identifier[_get_filesystem_path] ( identifier[self] , identifier[url_path] , identifier[basedir] = identifier[settings] . identifier[MEDIA_ROOT] ):
literal[string]
keyword[if] identifier[url_path] . identifier[startswith] ( identifier[settings] . identifier[MEDIA_URL] ):
identifier[url_path] = identifier[url_path] [ identifier[len] ( identifier[settings] . identifier[MEDIA_URL] ):]
keyword[return] identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[basedir] , identifier[url2pathname] ( identifier[url_path] ))) | def _get_filesystem_path(self, url_path, basedir=settings.MEDIA_ROOT):
"""Makes a filesystem path from the specified URL path"""
if url_path.startswith(settings.MEDIA_URL):
url_path = url_path[len(settings.MEDIA_URL):] # strip media root url # depends on [control=['if'], data=[]]
return os.path.normpath(os.path.join(basedir, url2pathname(url_path))) |
def _init_metadata(self):
"""stub"""
self._min_time_value = None
self._max_time_value = None
self._time_value_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'time_value'),
'element_label': 'Time Value',
'instructions': 'enter a time duration string / duration',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_duration_values': [{
'hours': 0,
'minutes': 0,
'seconds': 0
}],
'syntax': 'DURATION',
'minimum_time': self._min_time_value,
'maximum_time': self._max_time_value
} | def function[_init_metadata, parameter[self]]:
constant[stub]
name[self]._min_time_value assign[=] constant[None]
name[self]._max_time_value assign[=] constant[None]
name[self]._time_value_metadata assign[=] dictionary[[<ast.Constant object at 0x7da18c4cd690>, <ast.Constant object at 0x7da18c4cc6a0>, <ast.Constant object at 0x7da18c4ccac0>, <ast.Constant object at 0x7da18c4ceec0>, <ast.Constant object at 0x7da18c4cfbe0>, <ast.Constant object at 0x7da18c4cf8b0>, <ast.Constant object at 0x7da18c4ce200>, <ast.Constant object at 0x7da18c4cead0>, <ast.Constant object at 0x7da18c4cfca0>, <ast.Constant object at 0x7da18c4cfcd0>, <ast.Constant object at 0x7da18c4cf730>], [<ast.Call object at 0x7da18c4cd0c0>, <ast.Constant object at 0x7da18c4cfa30>, <ast.Constant object at 0x7da18c4ceb90>, <ast.Constant object at 0x7da18c4cd960>, <ast.Constant object at 0x7da18c4cd4b0>, <ast.Constant object at 0x7da18c4cc0a0>, <ast.Constant object at 0x7da18c4cd6c0>, <ast.List object at 0x7da18c4cf160>, <ast.Constant object at 0x7da18c4cd2a0>, <ast.Attribute object at 0x7da18c4cc7c0>, <ast.Attribute object at 0x7da18c4cfd60>]] | keyword[def] identifier[_init_metadata] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_min_time_value] = keyword[None]
identifier[self] . identifier[_max_time_value] = keyword[None]
identifier[self] . identifier[_time_value_metadata] ={
literal[string] : identifier[Id] ( identifier[self] . identifier[my_osid_object_form] . identifier[_authority] ,
identifier[self] . identifier[my_osid_object_form] . identifier[_namespace] ,
literal[string] ),
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] :[{
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int]
}],
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[_min_time_value] ,
literal[string] : identifier[self] . identifier[_max_time_value]
} | def _init_metadata(self):
"""stub"""
self._min_time_value = None
self._max_time_value = None
self._time_value_metadata = {'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'time_value'), 'element_label': 'Time Value', 'instructions': 'enter a time duration string / duration', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_duration_values': [{'hours': 0, 'minutes': 0, 'seconds': 0}], 'syntax': 'DURATION', 'minimum_time': self._min_time_value, 'maximum_time': self._max_time_value} |
def survey_change_name(request, pk):
"""
Works well with:
http://www.appelsiini.net/projects/jeditable
"""
survey = get_object_or_404(Survey, pk=pk)
if not request.user.has_perm("formly.change_survey_name", obj=survey):
raise PermissionDenied()
survey.name = request.POST.get("name")
survey.save()
return JsonResponse({
"status": "OK",
"name": survey.name
}) | def function[survey_change_name, parameter[request, pk]]:
constant[
Works well with:
http://www.appelsiini.net/projects/jeditable
]
variable[survey] assign[=] call[name[get_object_or_404], parameter[name[Survey]]]
if <ast.UnaryOp object at 0x7da20cabe980> begin[:]
<ast.Raise object at 0x7da20cabed70>
name[survey].name assign[=] call[name[request].POST.get, parameter[constant[name]]]
call[name[survey].save, parameter[]]
return[call[name[JsonResponse], parameter[dictionary[[<ast.Constant object at 0x7da1b25d69e0>, <ast.Constant object at 0x7da1b25d60e0>], [<ast.Constant object at 0x7da1b25d73d0>, <ast.Attribute object at 0x7da1b25d6740>]]]]] | keyword[def] identifier[survey_change_name] ( identifier[request] , identifier[pk] ):
literal[string]
identifier[survey] = identifier[get_object_or_404] ( identifier[Survey] , identifier[pk] = identifier[pk] )
keyword[if] keyword[not] identifier[request] . identifier[user] . identifier[has_perm] ( literal[string] , identifier[obj] = identifier[survey] ):
keyword[raise] identifier[PermissionDenied] ()
identifier[survey] . identifier[name] = identifier[request] . identifier[POST] . identifier[get] ( literal[string] )
identifier[survey] . identifier[save] ()
keyword[return] identifier[JsonResponse] ({
literal[string] : literal[string] ,
literal[string] : identifier[survey] . identifier[name]
}) | def survey_change_name(request, pk):
"""
Works well with:
http://www.appelsiini.net/projects/jeditable
"""
survey = get_object_or_404(Survey, pk=pk)
if not request.user.has_perm('formly.change_survey_name', obj=survey):
raise PermissionDenied() # depends on [control=['if'], data=[]]
survey.name = request.POST.get('name')
survey.save()
return JsonResponse({'status': 'OK', 'name': survey.name}) |
def _CompileTemplate(
template_str, builder, meta='{}', format_char='|', default_formatter='str',
whitespace='smart'):
"""Compile the template string, calling methods on the 'program builder'.
Args:
template_str: The template string. It should not have any compilation
options in the header -- those are parsed by FromString/FromFile
builder: The interface of _ProgramBuilder isn't fixed. Use at your own
risk.
meta: The metacharacters to use, e.g. '{}', '[]'.
default_formatter: The formatter to use for substitutions that are missing a
formatter. The 'str' formatter the "default default" -- it just tries
to convert the context value to a string in some unspecified manner.
whitespace: 'smart' or 'strip-line'. In smart mode, if a directive is alone
on a line, with only whitespace on either side, then the whitespace is
removed. In 'strip-line' mode, every line is stripped of its
leading and trailing whitespace.
Returns:
The compiled program (obtained from the builder)
Raises:
The various subclasses of CompilationError. For example, if
default_formatter=None, and a variable is missing a formatter, then
MissingFormatter is raised.
This function is public so it can be used by other tools, e.g. a syntax
checking tool run before submitting a template to source control.
"""
meta_left, meta_right = SplitMeta(meta)
# : is meant to look like Python 3000 formatting {foo:.3f}. According to
# PEP 3101, that's also what .NET uses.
# | is more readable, but, more importantly, reminiscent of pipes, which is
# useful for multiple formatters, e.g. {name|js-string|html}
if format_char not in (':', '|'):
raise ConfigurationError(
'Only format characters : and | are accepted (got %r)' % format_char)
if whitespace not in ('smart', 'strip-line'):
raise ConfigurationError('Invalid whitespace mode %r' % whitespace)
# If we go to -1, then we got too many {end}. If end at 1, then we're missing
# an {end}.
balance_counter = 0
comment_counter = 0 # ditto for ##BEGIN/##END
has_defines = False
for token_type, token in _Tokenize(template_str, meta_left, meta_right,
whitespace):
if token_type == COMMENT_BEGIN_TOKEN:
comment_counter += 1
continue
if token_type == COMMENT_END_TOKEN:
comment_counter -= 1
if comment_counter < 0:
raise CompilationError('Got too many ##END markers')
continue
# Don't process any tokens
if comment_counter > 0:
continue
if token_type in (LITERAL_TOKEN, META_LITERAL_TOKEN):
if token:
builder.Append(token)
continue
if token_type in (SECTION_TOKEN, REPEATED_SECTION_TOKEN, DEF_TOKEN):
parts = [p.strip() for p in token.split(format_char)]
if len(parts) == 1:
name = parts[0]
formatters = []
else:
name = parts[0]
formatters = parts[1:]
builder.NewSection(token_type, name, formatters)
balance_counter += 1
if token_type == DEF_TOKEN:
has_defines = True
continue
if token_type == PREDICATE_TOKEN:
# {.attr?} lookups
builder.NewPredicateSection(token, test_attr=True)
balance_counter += 1
continue
if token_type == IF_TOKEN:
builder.NewPredicateSection(token, test_attr=False)
balance_counter += 1
continue
if token_type == OR_TOKEN:
builder.NewOrClause(token)
continue
if token_type == ALTERNATES_TOKEN:
builder.AlternatesWith()
continue
if token_type == END_TOKEN:
balance_counter -= 1
if balance_counter < 0:
# TODO: Show some context for errors
raise TemplateSyntaxError(
'Got too many %send%s statements. You may have mistyped an '
"earlier 'section' or 'repeated section' directive."
% (meta_left, meta_right))
builder.EndSection()
continue
if token_type == SUBST_TOKEN:
parts = [p.strip() for p in token.split(format_char)]
if len(parts) == 1:
if default_formatter is None:
raise MissingFormatter('This template requires explicit formatters.')
# If no formatter is specified, the default is the 'str' formatter,
# which the user can define however they desire.
name = token
formatters = [default_formatter]
else:
name = parts[0]
formatters = parts[1:]
builder.AppendSubstitution(name, formatters)
continue
if token_type == SUBST_TEMPLATE_TOKEN:
# no formatters
builder.AppendTemplateSubstitution(token)
continue
if balance_counter != 0:
raise TemplateSyntaxError('Got too few %send%s statements' %
(meta_left, meta_right))
if comment_counter != 0:
raise CompilationError('Got %d more {##BEGIN}s than {##END}s' % comment_counter)
return builder.Root(), has_defines | def function[_CompileTemplate, parameter[template_str, builder, meta, format_char, default_formatter, whitespace]]:
constant[Compile the template string, calling methods on the 'program builder'.
Args:
template_str: The template string. It should not have any compilation
options in the header -- those are parsed by FromString/FromFile
builder: The interface of _ProgramBuilder isn't fixed. Use at your own
risk.
meta: The metacharacters to use, e.g. '{}', '[]'.
default_formatter: The formatter to use for substitutions that are missing a
formatter. The 'str' formatter the "default default" -- it just tries
to convert the context value to a string in some unspecified manner.
whitespace: 'smart' or 'strip-line'. In smart mode, if a directive is alone
on a line, with only whitespace on either side, then the whitespace is
removed. In 'strip-line' mode, every line is stripped of its
leading and trailing whitespace.
Returns:
The compiled program (obtained from the builder)
Raises:
The various subclasses of CompilationError. For example, if
default_formatter=None, and a variable is missing a formatter, then
MissingFormatter is raised.
This function is public so it can be used by other tools, e.g. a syntax
checking tool run before submitting a template to source control.
]
<ast.Tuple object at 0x7da20c76cf70> assign[=] call[name[SplitMeta], parameter[name[meta]]]
if compare[name[format_char] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c76fd60>, <ast.Constant object at 0x7da20c76c460>]]] begin[:]
<ast.Raise object at 0x7da20c76dc90>
if compare[name[whitespace] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c76d8a0>, <ast.Constant object at 0x7da20c76c610>]]] begin[:]
<ast.Raise object at 0x7da20c76d180>
variable[balance_counter] assign[=] constant[0]
variable[comment_counter] assign[=] constant[0]
variable[has_defines] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da20c76f850>, <ast.Name object at 0x7da20c76e890>]]] in starred[call[name[_Tokenize], parameter[name[template_str], name[meta_left], name[meta_right], name[whitespace]]]] begin[:]
if compare[name[token_type] equal[==] name[COMMENT_BEGIN_TOKEN]] begin[:]
<ast.AugAssign object at 0x7da20c76eb60>
continue
if compare[name[token_type] equal[==] name[COMMENT_END_TOKEN]] begin[:]
<ast.AugAssign object at 0x7da20c76fdc0>
if compare[name[comment_counter] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da20c76fc70>
continue
if compare[name[comment_counter] greater[>] constant[0]] begin[:]
continue
if compare[name[token_type] in tuple[[<ast.Name object at 0x7da20c76dd50>, <ast.Name object at 0x7da20c76ef80>]]] begin[:]
if name[token] begin[:]
call[name[builder].Append, parameter[name[token]]]
continue
if compare[name[token_type] in tuple[[<ast.Name object at 0x7da20c76e170>, <ast.Name object at 0x7da20c76f6d0>, <ast.Name object at 0x7da20c76fac0>]]] begin[:]
variable[parts] assign[=] <ast.ListComp object at 0x7da20c76f0d0>
if compare[call[name[len], parameter[name[parts]]] equal[==] constant[1]] begin[:]
variable[name] assign[=] call[name[parts]][constant[0]]
variable[formatters] assign[=] list[[]]
call[name[builder].NewSection, parameter[name[token_type], name[name], name[formatters]]]
<ast.AugAssign object at 0x7da20c76ef50>
if compare[name[token_type] equal[==] name[DEF_TOKEN]] begin[:]
variable[has_defines] assign[=] constant[True]
continue
if compare[name[token_type] equal[==] name[PREDICATE_TOKEN]] begin[:]
call[name[builder].NewPredicateSection, parameter[name[token]]]
<ast.AugAssign object at 0x7da20c76e020>
continue
if compare[name[token_type] equal[==] name[IF_TOKEN]] begin[:]
call[name[builder].NewPredicateSection, parameter[name[token]]]
<ast.AugAssign object at 0x7da20c76c160>
continue
if compare[name[token_type] equal[==] name[OR_TOKEN]] begin[:]
call[name[builder].NewOrClause, parameter[name[token]]]
continue
if compare[name[token_type] equal[==] name[ALTERNATES_TOKEN]] begin[:]
call[name[builder].AlternatesWith, parameter[]]
continue
if compare[name[token_type] equal[==] name[END_TOKEN]] begin[:]
<ast.AugAssign object at 0x7da20c76c9a0>
if compare[name[balance_counter] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da20c76e770>
call[name[builder].EndSection, parameter[]]
continue
if compare[name[token_type] equal[==] name[SUBST_TOKEN]] begin[:]
variable[parts] assign[=] <ast.ListComp object at 0x7da20c76d2d0>
if compare[call[name[len], parameter[name[parts]]] equal[==] constant[1]] begin[:]
if compare[name[default_formatter] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c76edd0>
variable[name] assign[=] name[token]
variable[formatters] assign[=] list[[<ast.Name object at 0x7da20c76eaa0>]]
call[name[builder].AppendSubstitution, parameter[name[name], name[formatters]]]
continue
if compare[name[token_type] equal[==] name[SUBST_TEMPLATE_TOKEN]] begin[:]
call[name[builder].AppendTemplateSubstitution, parameter[name[token]]]
continue
if compare[name[balance_counter] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da20c76c2b0>
if compare[name[comment_counter] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da18bcc8940>
return[tuple[[<ast.Call object at 0x7da18bcc82b0>, <ast.Name object at 0x7da18bcc99f0>]]] | keyword[def] identifier[_CompileTemplate] (
identifier[template_str] , identifier[builder] , identifier[meta] = literal[string] , identifier[format_char] = literal[string] , identifier[default_formatter] = literal[string] ,
identifier[whitespace] = literal[string] ):
literal[string]
identifier[meta_left] , identifier[meta_right] = identifier[SplitMeta] ( identifier[meta] )
keyword[if] identifier[format_char] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[ConfigurationError] (
literal[string] % identifier[format_char] )
keyword[if] identifier[whitespace] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[ConfigurationError] ( literal[string] % identifier[whitespace] )
identifier[balance_counter] = literal[int]
identifier[comment_counter] = literal[int]
identifier[has_defines] = keyword[False]
keyword[for] identifier[token_type] , identifier[token] keyword[in] identifier[_Tokenize] ( identifier[template_str] , identifier[meta_left] , identifier[meta_right] ,
identifier[whitespace] ):
keyword[if] identifier[token_type] == identifier[COMMENT_BEGIN_TOKEN] :
identifier[comment_counter] += literal[int]
keyword[continue]
keyword[if] identifier[token_type] == identifier[COMMENT_END_TOKEN] :
identifier[comment_counter] -= literal[int]
keyword[if] identifier[comment_counter] < literal[int] :
keyword[raise] identifier[CompilationError] ( literal[string] )
keyword[continue]
keyword[if] identifier[comment_counter] > literal[int] :
keyword[continue]
keyword[if] identifier[token_type] keyword[in] ( identifier[LITERAL_TOKEN] , identifier[META_LITERAL_TOKEN] ):
keyword[if] identifier[token] :
identifier[builder] . identifier[Append] ( identifier[token] )
keyword[continue]
keyword[if] identifier[token_type] keyword[in] ( identifier[SECTION_TOKEN] , identifier[REPEATED_SECTION_TOKEN] , identifier[DEF_TOKEN] ):
identifier[parts] =[ identifier[p] . identifier[strip] () keyword[for] identifier[p] keyword[in] identifier[token] . identifier[split] ( identifier[format_char] )]
keyword[if] identifier[len] ( identifier[parts] )== literal[int] :
identifier[name] = identifier[parts] [ literal[int] ]
identifier[formatters] =[]
keyword[else] :
identifier[name] = identifier[parts] [ literal[int] ]
identifier[formatters] = identifier[parts] [ literal[int] :]
identifier[builder] . identifier[NewSection] ( identifier[token_type] , identifier[name] , identifier[formatters] )
identifier[balance_counter] += literal[int]
keyword[if] identifier[token_type] == identifier[DEF_TOKEN] :
identifier[has_defines] = keyword[True]
keyword[continue]
keyword[if] identifier[token_type] == identifier[PREDICATE_TOKEN] :
identifier[builder] . identifier[NewPredicateSection] ( identifier[token] , identifier[test_attr] = keyword[True] )
identifier[balance_counter] += literal[int]
keyword[continue]
keyword[if] identifier[token_type] == identifier[IF_TOKEN] :
identifier[builder] . identifier[NewPredicateSection] ( identifier[token] , identifier[test_attr] = keyword[False] )
identifier[balance_counter] += literal[int]
keyword[continue]
keyword[if] identifier[token_type] == identifier[OR_TOKEN] :
identifier[builder] . identifier[NewOrClause] ( identifier[token] )
keyword[continue]
keyword[if] identifier[token_type] == identifier[ALTERNATES_TOKEN] :
identifier[builder] . identifier[AlternatesWith] ()
keyword[continue]
keyword[if] identifier[token_type] == identifier[END_TOKEN] :
identifier[balance_counter] -= literal[int]
keyword[if] identifier[balance_counter] < literal[int] :
keyword[raise] identifier[TemplateSyntaxError] (
literal[string]
literal[string]
%( identifier[meta_left] , identifier[meta_right] ))
identifier[builder] . identifier[EndSection] ()
keyword[continue]
keyword[if] identifier[token_type] == identifier[SUBST_TOKEN] :
identifier[parts] =[ identifier[p] . identifier[strip] () keyword[for] identifier[p] keyword[in] identifier[token] . identifier[split] ( identifier[format_char] )]
keyword[if] identifier[len] ( identifier[parts] )== literal[int] :
keyword[if] identifier[default_formatter] keyword[is] keyword[None] :
keyword[raise] identifier[MissingFormatter] ( literal[string] )
identifier[name] = identifier[token]
identifier[formatters] =[ identifier[default_formatter] ]
keyword[else] :
identifier[name] = identifier[parts] [ literal[int] ]
identifier[formatters] = identifier[parts] [ literal[int] :]
identifier[builder] . identifier[AppendSubstitution] ( identifier[name] , identifier[formatters] )
keyword[continue]
keyword[if] identifier[token_type] == identifier[SUBST_TEMPLATE_TOKEN] :
identifier[builder] . identifier[AppendTemplateSubstitution] ( identifier[token] )
keyword[continue]
keyword[if] identifier[balance_counter] != literal[int] :
keyword[raise] identifier[TemplateSyntaxError] ( literal[string] %
( identifier[meta_left] , identifier[meta_right] ))
keyword[if] identifier[comment_counter] != literal[int] :
keyword[raise] identifier[CompilationError] ( literal[string] % identifier[comment_counter] )
keyword[return] identifier[builder] . identifier[Root] (), identifier[has_defines] | def _CompileTemplate(template_str, builder, meta='{}', format_char='|', default_formatter='str', whitespace='smart'):
"""Compile the template string, calling methods on the 'program builder'.
Args:
template_str: The template string. It should not have any compilation
options in the header -- those are parsed by FromString/FromFile
builder: The interface of _ProgramBuilder isn't fixed. Use at your own
risk.
meta: The metacharacters to use, e.g. '{}', '[]'.
default_formatter: The formatter to use for substitutions that are missing a
formatter. The 'str' formatter the "default default" -- it just tries
to convert the context value to a string in some unspecified manner.
whitespace: 'smart' or 'strip-line'. In smart mode, if a directive is alone
on a line, with only whitespace on either side, then the whitespace is
removed. In 'strip-line' mode, every line is stripped of its
leading and trailing whitespace.
Returns:
The compiled program (obtained from the builder)
Raises:
The various subclasses of CompilationError. For example, if
default_formatter=None, and a variable is missing a formatter, then
MissingFormatter is raised.
This function is public so it can be used by other tools, e.g. a syntax
checking tool run before submitting a template to source control.
"""
(meta_left, meta_right) = SplitMeta(meta)
# : is meant to look like Python 3000 formatting {foo:.3f}. According to
# PEP 3101, that's also what .NET uses.
# | is more readable, but, more importantly, reminiscent of pipes, which is
# useful for multiple formatters, e.g. {name|js-string|html}
if format_char not in (':', '|'):
raise ConfigurationError('Only format characters : and | are accepted (got %r)' % format_char) # depends on [control=['if'], data=['format_char']]
if whitespace not in ('smart', 'strip-line'):
raise ConfigurationError('Invalid whitespace mode %r' % whitespace) # depends on [control=['if'], data=['whitespace']]
# If we go to -1, then we got too many {end}. If end at 1, then we're missing
# an {end}.
balance_counter = 0
comment_counter = 0 # ditto for ##BEGIN/##END
has_defines = False
for (token_type, token) in _Tokenize(template_str, meta_left, meta_right, whitespace):
if token_type == COMMENT_BEGIN_TOKEN:
comment_counter += 1
continue # depends on [control=['if'], data=[]]
if token_type == COMMENT_END_TOKEN:
comment_counter -= 1
if comment_counter < 0:
raise CompilationError('Got too many ##END markers') # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
# Don't process any tokens
if comment_counter > 0:
continue # depends on [control=['if'], data=[]]
if token_type in (LITERAL_TOKEN, META_LITERAL_TOKEN):
if token:
builder.Append(token) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
if token_type in (SECTION_TOKEN, REPEATED_SECTION_TOKEN, DEF_TOKEN):
parts = [p.strip() for p in token.split(format_char)]
if len(parts) == 1:
name = parts[0]
formatters = [] # depends on [control=['if'], data=[]]
else:
name = parts[0]
formatters = parts[1:]
builder.NewSection(token_type, name, formatters)
balance_counter += 1
if token_type == DEF_TOKEN:
has_defines = True # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=['token_type']]
if token_type == PREDICATE_TOKEN:
# {.attr?} lookups
builder.NewPredicateSection(token, test_attr=True)
balance_counter += 1
continue # depends on [control=['if'], data=[]]
if token_type == IF_TOKEN:
builder.NewPredicateSection(token, test_attr=False)
balance_counter += 1
continue # depends on [control=['if'], data=[]]
if token_type == OR_TOKEN:
builder.NewOrClause(token)
continue # depends on [control=['if'], data=[]]
if token_type == ALTERNATES_TOKEN:
builder.AlternatesWith()
continue # depends on [control=['if'], data=[]]
if token_type == END_TOKEN:
balance_counter -= 1
if balance_counter < 0:
# TODO: Show some context for errors
raise TemplateSyntaxError("Got too many %send%s statements. You may have mistyped an earlier 'section' or 'repeated section' directive." % (meta_left, meta_right)) # depends on [control=['if'], data=[]]
builder.EndSection()
continue # depends on [control=['if'], data=[]]
if token_type == SUBST_TOKEN:
parts = [p.strip() for p in token.split(format_char)]
if len(parts) == 1:
if default_formatter is None:
raise MissingFormatter('This template requires explicit formatters.') # depends on [control=['if'], data=[]]
# If no formatter is specified, the default is the 'str' formatter,
# which the user can define however they desire.
name = token
formatters = [default_formatter] # depends on [control=['if'], data=[]]
else:
name = parts[0]
formatters = parts[1:]
builder.AppendSubstitution(name, formatters)
continue # depends on [control=['if'], data=[]]
if token_type == SUBST_TEMPLATE_TOKEN:
# no formatters
builder.AppendTemplateSubstitution(token)
continue # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if balance_counter != 0:
raise TemplateSyntaxError('Got too few %send%s statements' % (meta_left, meta_right)) # depends on [control=['if'], data=[]]
if comment_counter != 0:
raise CompilationError('Got %d more {##BEGIN}s than {##END}s' % comment_counter) # depends on [control=['if'], data=['comment_counter']]
return (builder.Root(), has_defines) |
def _get_doc_by_raw_offset(self, doc_id):
"""
Load document from xml using bytes offset information.
XXX: this is not tested under Windows.
"""
bounds = self._get_meta()[str(doc_id)].bounds
return xml_utils.load_chunk(self.filename, bounds) | def function[_get_doc_by_raw_offset, parameter[self, doc_id]]:
constant[
Load document from xml using bytes offset information.
XXX: this is not tested under Windows.
]
variable[bounds] assign[=] call[call[name[self]._get_meta, parameter[]]][call[name[str], parameter[name[doc_id]]]].bounds
return[call[name[xml_utils].load_chunk, parameter[name[self].filename, name[bounds]]]] | keyword[def] identifier[_get_doc_by_raw_offset] ( identifier[self] , identifier[doc_id] ):
literal[string]
identifier[bounds] = identifier[self] . identifier[_get_meta] ()[ identifier[str] ( identifier[doc_id] )]. identifier[bounds]
keyword[return] identifier[xml_utils] . identifier[load_chunk] ( identifier[self] . identifier[filename] , identifier[bounds] ) | def _get_doc_by_raw_offset(self, doc_id):
"""
Load document from xml using bytes offset information.
XXX: this is not tested under Windows.
"""
bounds = self._get_meta()[str(doc_id)].bounds
return xml_utils.load_chunk(self.filename, bounds) |
def focusOutEvent(self, event):
""" The default 'focusOutEvent' implementation.
"""
widget = self.widget
type(widget).focusOutEvent(widget, event)
self.declaration.focus_lost() | def function[focusOutEvent, parameter[self, event]]:
constant[ The default 'focusOutEvent' implementation.
]
variable[widget] assign[=] name[self].widget
call[call[name[type], parameter[name[widget]]].focusOutEvent, parameter[name[widget], name[event]]]
call[name[self].declaration.focus_lost, parameter[]] | keyword[def] identifier[focusOutEvent] ( identifier[self] , identifier[event] ):
literal[string]
identifier[widget] = identifier[self] . identifier[widget]
identifier[type] ( identifier[widget] ). identifier[focusOutEvent] ( identifier[widget] , identifier[event] )
identifier[self] . identifier[declaration] . identifier[focus_lost] () | def focusOutEvent(self, event):
""" The default 'focusOutEvent' implementation.
"""
widget = self.widget
type(widget).focusOutEvent(widget, event)
self.declaration.focus_lost() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.