code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def elapsed_time(self):
"""
Return elapsed time as min:sec:ms. The .split separates out the
millisecond
"""
td = (datetime.datetime.now() - self.start_time)
sec = td.seconds
ms = int(td.microseconds / 1000)
return '{:02}:{:02}.{:03}'.format(sec % 3600 // 60, sec % 60, ms) | def function[elapsed_time, parameter[self]]:
constant[
Return elapsed time as min:sec:ms. The .split separates out the
millisecond
]
variable[td] assign[=] binary_operation[call[name[datetime].datetime.now, parameter[]] - name[self].start_time]
variable[sec] assign[=] name[td].seconds
variable[ms] assign[=] call[name[int], parameter[binary_operation[name[td].microseconds / constant[1000]]]]
return[call[constant[{:02}:{:02}.{:03}].format, parameter[binary_operation[binary_operation[name[sec] <ast.Mod object at 0x7da2590d6920> constant[3600]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[60]], binary_operation[name[sec] <ast.Mod object at 0x7da2590d6920> constant[60]], name[ms]]]] | keyword[def] identifier[elapsed_time] ( identifier[self] ):
literal[string]
identifier[td] =( identifier[datetime] . identifier[datetime] . identifier[now] ()- identifier[self] . identifier[start_time] )
identifier[sec] = identifier[td] . identifier[seconds]
identifier[ms] = identifier[int] ( identifier[td] . identifier[microseconds] / literal[int] )
keyword[return] literal[string] . identifier[format] ( identifier[sec] % literal[int] // literal[int] , identifier[sec] % literal[int] , identifier[ms] ) | def elapsed_time(self):
"""
Return elapsed time as min:sec:ms. The .split separates out the
millisecond
"""
td = datetime.datetime.now() - self.start_time
sec = td.seconds
ms = int(td.microseconds / 1000)
return '{:02}:{:02}.{:03}'.format(sec % 3600 // 60, sec % 60, ms) |
def _get_formatting_template(self, number_pattern, number_format):
"""Gets a formatting template which can be used to efficiently
format a partial number where digits are added one by one."""
# Create a phone number consisting only of the digit 9 that matches the
# number_pattern by applying the pattern to the longest_phone_number string.
longest_phone_number = unicod("999999999999999")
number_re = re.compile(number_pattern)
m = number_re.search(longest_phone_number) # this will always succeed
a_phone_number = m.group(0)
# No formatting template can be created if the number of digits
# entered so far is longer than the maximum the current formatting
# rule can accommodate.
if len(a_phone_number) < len(self._national_number):
return U_EMPTY_STRING
# Formats the number according to number_format
template = re.sub(number_pattern, number_format, a_phone_number)
# Replaces each digit with character _DIGIT_PLACEHOLDER
template = re.sub("9", _DIGIT_PLACEHOLDER, template)
return template | def function[_get_formatting_template, parameter[self, number_pattern, number_format]]:
constant[Gets a formatting template which can be used to efficiently
format a partial number where digits are added one by one.]
variable[longest_phone_number] assign[=] call[name[unicod], parameter[constant[999999999999999]]]
variable[number_re] assign[=] call[name[re].compile, parameter[name[number_pattern]]]
variable[m] assign[=] call[name[number_re].search, parameter[name[longest_phone_number]]]
variable[a_phone_number] assign[=] call[name[m].group, parameter[constant[0]]]
if compare[call[name[len], parameter[name[a_phone_number]]] less[<] call[name[len], parameter[name[self]._national_number]]] begin[:]
return[name[U_EMPTY_STRING]]
variable[template] assign[=] call[name[re].sub, parameter[name[number_pattern], name[number_format], name[a_phone_number]]]
variable[template] assign[=] call[name[re].sub, parameter[constant[9], name[_DIGIT_PLACEHOLDER], name[template]]]
return[name[template]] | keyword[def] identifier[_get_formatting_template] ( identifier[self] , identifier[number_pattern] , identifier[number_format] ):
literal[string]
identifier[longest_phone_number] = identifier[unicod] ( literal[string] )
identifier[number_re] = identifier[re] . identifier[compile] ( identifier[number_pattern] )
identifier[m] = identifier[number_re] . identifier[search] ( identifier[longest_phone_number] )
identifier[a_phone_number] = identifier[m] . identifier[group] ( literal[int] )
keyword[if] identifier[len] ( identifier[a_phone_number] )< identifier[len] ( identifier[self] . identifier[_national_number] ):
keyword[return] identifier[U_EMPTY_STRING]
identifier[template] = identifier[re] . identifier[sub] ( identifier[number_pattern] , identifier[number_format] , identifier[a_phone_number] )
identifier[template] = identifier[re] . identifier[sub] ( literal[string] , identifier[_DIGIT_PLACEHOLDER] , identifier[template] )
keyword[return] identifier[template] | def _get_formatting_template(self, number_pattern, number_format):
"""Gets a formatting template which can be used to efficiently
format a partial number where digits are added one by one."""
# Create a phone number consisting only of the digit 9 that matches the
# number_pattern by applying the pattern to the longest_phone_number string.
longest_phone_number = unicod('999999999999999')
number_re = re.compile(number_pattern)
m = number_re.search(longest_phone_number) # this will always succeed
a_phone_number = m.group(0)
# No formatting template can be created if the number of digits
# entered so far is longer than the maximum the current formatting
# rule can accommodate.
if len(a_phone_number) < len(self._national_number):
return U_EMPTY_STRING # depends on [control=['if'], data=[]]
# Formats the number according to number_format
template = re.sub(number_pattern, number_format, a_phone_number)
# Replaces each digit with character _DIGIT_PLACEHOLDER
template = re.sub('9', _DIGIT_PLACEHOLDER, template)
return template |
def is_cached(self, version=None):
'''
Set the cache property to start/stop file caching for this archive
'''
version = _process_version(self, version)
if self.api.cache and self.api.cache.fs.isfile(
self.get_version_path(version)):
return True
return False | def function[is_cached, parameter[self, version]]:
constant[
Set the cache property to start/stop file caching for this archive
]
variable[version] assign[=] call[name[_process_version], parameter[name[self], name[version]]]
if <ast.BoolOp object at 0x7da1b0bb8ee0> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_cached] ( identifier[self] , identifier[version] = keyword[None] ):
literal[string]
identifier[version] = identifier[_process_version] ( identifier[self] , identifier[version] )
keyword[if] identifier[self] . identifier[api] . identifier[cache] keyword[and] identifier[self] . identifier[api] . identifier[cache] . identifier[fs] . identifier[isfile] (
identifier[self] . identifier[get_version_path] ( identifier[version] )):
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_cached(self, version=None):
"""
Set the cache property to start/stop file caching for this archive
"""
version = _process_version(self, version)
if self.api.cache and self.api.cache.fs.isfile(self.get_version_path(version)):
return True # depends on [control=['if'], data=[]]
return False |
def handle_remove_readonly(func, path, exc):
"""Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion.
:param function func: The caller function
:param str path: The target path for removal
:param Exception exc: The raised exception
This function will call check :func:`is_readonly_path` before attempting to call
:func:`set_write_bit` on the target path and try again.
"""
# Check for read-only attribute
from .compat import ResourceWarning, FileNotFoundError, PermissionError
PERM_ERRORS = (errno.EACCES, errno.EPERM, errno.ENOENT)
default_warning_message = "Unable to remove file due to permissions restriction: {!r}"
# split the initial exception out into its type, exception, and traceback
exc_type, exc_exception, exc_tb = exc
if is_readonly_path(path):
# Apply write permission and call original function
set_write_bit(path)
try:
func(path)
except (OSError, IOError, FileNotFoundError, PermissionError) as e:
if e.errno == errno.ENOENT:
return
elif e.errno in PERM_ERRORS:
remaining = None
if os.path.isdir(path):
remaining =_wait_for_files(path)
if remaining:
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
raise
if exc_exception.errno in PERM_ERRORS:
set_write_bit(path)
remaining = _wait_for_files(path)
try:
func(path)
except (OSError, IOError, FileNotFoundError, PermissionError) as e:
if e.errno in PERM_ERRORS:
warnings.warn(default_warning_message.format(path), ResourceWarning)
pass
elif e.errno == errno.ENOENT: # File already gone
pass
else:
raise
else:
return
elif exc_exception.errno == errno.ENOENT:
pass
else:
raise exc_exception | def function[handle_remove_readonly, parameter[func, path, exc]]:
constant[Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion.
:param function func: The caller function
:param str path: The target path for removal
:param Exception exc: The raised exception
This function will call check :func:`is_readonly_path` before attempting to call
:func:`set_write_bit` on the target path and try again.
]
from relative_module[compat] import module[ResourceWarning], module[FileNotFoundError], module[PermissionError]
variable[PERM_ERRORS] assign[=] tuple[[<ast.Attribute object at 0x7da18bcc80a0>, <ast.Attribute object at 0x7da18bccb2b0>, <ast.Attribute object at 0x7da18bcc9330>]]
variable[default_warning_message] assign[=] constant[Unable to remove file due to permissions restriction: {!r}]
<ast.Tuple object at 0x7da18bcca470> assign[=] name[exc]
if call[name[is_readonly_path], parameter[name[path]]] begin[:]
call[name[set_write_bit], parameter[name[path]]]
<ast.Try object at 0x7da18bcca500>
if compare[name[exc_exception].errno in name[PERM_ERRORS]] begin[:]
call[name[set_write_bit], parameter[name[path]]]
variable[remaining] assign[=] call[name[_wait_for_files], parameter[name[path]]]
<ast.Try object at 0x7da18bcc8910> | keyword[def] identifier[handle_remove_readonly] ( identifier[func] , identifier[path] , identifier[exc] ):
literal[string]
keyword[from] . identifier[compat] keyword[import] identifier[ResourceWarning] , identifier[FileNotFoundError] , identifier[PermissionError]
identifier[PERM_ERRORS] =( identifier[errno] . identifier[EACCES] , identifier[errno] . identifier[EPERM] , identifier[errno] . identifier[ENOENT] )
identifier[default_warning_message] = literal[string]
identifier[exc_type] , identifier[exc_exception] , identifier[exc_tb] = identifier[exc]
keyword[if] identifier[is_readonly_path] ( identifier[path] ):
identifier[set_write_bit] ( identifier[path] )
keyword[try] :
identifier[func] ( identifier[path] )
keyword[except] ( identifier[OSError] , identifier[IOError] , identifier[FileNotFoundError] , identifier[PermissionError] ) keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[ENOENT] :
keyword[return]
keyword[elif] identifier[e] . identifier[errno] keyword[in] identifier[PERM_ERRORS] :
identifier[remaining] = keyword[None]
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
identifier[remaining] = identifier[_wait_for_files] ( identifier[path] )
keyword[if] identifier[remaining] :
identifier[warnings] . identifier[warn] ( identifier[default_warning_message] . identifier[format] ( identifier[path] ), identifier[ResourceWarning] )
keyword[return]
keyword[raise]
keyword[if] identifier[exc_exception] . identifier[errno] keyword[in] identifier[PERM_ERRORS] :
identifier[set_write_bit] ( identifier[path] )
identifier[remaining] = identifier[_wait_for_files] ( identifier[path] )
keyword[try] :
identifier[func] ( identifier[path] )
keyword[except] ( identifier[OSError] , identifier[IOError] , identifier[FileNotFoundError] , identifier[PermissionError] ) keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] keyword[in] identifier[PERM_ERRORS] :
identifier[warnings] . identifier[warn] ( identifier[default_warning_message] . identifier[format] ( identifier[path] ), identifier[ResourceWarning] )
keyword[pass]
keyword[elif] identifier[e] . identifier[errno] == identifier[errno] . identifier[ENOENT] :
keyword[pass]
keyword[else] :
keyword[raise]
keyword[else] :
keyword[return]
keyword[elif] identifier[exc_exception] . identifier[errno] == identifier[errno] . identifier[ENOENT] :
keyword[pass]
keyword[else] :
keyword[raise] identifier[exc_exception] | def handle_remove_readonly(func, path, exc):
"""Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion.
:param function func: The caller function
:param str path: The target path for removal
:param Exception exc: The raised exception
This function will call check :func:`is_readonly_path` before attempting to call
:func:`set_write_bit` on the target path and try again.
"""
# Check for read-only attribute
from .compat import ResourceWarning, FileNotFoundError, PermissionError
PERM_ERRORS = (errno.EACCES, errno.EPERM, errno.ENOENT)
default_warning_message = 'Unable to remove file due to permissions restriction: {!r}'
# split the initial exception out into its type, exception, and traceback
(exc_type, exc_exception, exc_tb) = exc
if is_readonly_path(path):
# Apply write permission and call original function
set_write_bit(path)
try:
func(path) # depends on [control=['try'], data=[]]
except (OSError, IOError, FileNotFoundError, PermissionError) as e:
if e.errno == errno.ENOENT:
return # depends on [control=['if'], data=[]]
elif e.errno in PERM_ERRORS:
remaining = None
if os.path.isdir(path):
remaining = _wait_for_files(path) # depends on [control=['if'], data=[]]
if remaining:
warnings.warn(default_warning_message.format(path), ResourceWarning) # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
if exc_exception.errno in PERM_ERRORS:
set_write_bit(path)
remaining = _wait_for_files(path)
try:
func(path) # depends on [control=['try'], data=[]]
except (OSError, IOError, FileNotFoundError, PermissionError) as e:
if e.errno in PERM_ERRORS:
warnings.warn(default_warning_message.format(path), ResourceWarning)
pass # depends on [control=['if'], data=[]]
elif e.errno == errno.ENOENT: # File already gone
pass # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']]
else:
return # depends on [control=['if'], data=['PERM_ERRORS']]
elif exc_exception.errno == errno.ENOENT:
pass # depends on [control=['if'], data=[]]
else:
raise exc_exception |
def rev_comp( seq, molecule='dna' ):
""" DNA|RNA seq -> reverse complement
"""
if molecule == 'dna':
nuc_dict = { "A":"T", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "T":"A", "V":"B", "W":"W", "Y":"R" }
elif molecule == 'rna':
nuc_dict = { "A":"U", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "U":"A", "V":"B", "W":"W", "Y":"R" }
else:
raise ValueError( "rev_comp requires molecule to be dna or rna" )
if not isinstance( seq, six.string_types ):
raise TypeError( "seq must be a string!" )
return ''.join( [ nuc_dict[c] for c in seq.upper()[::-1] ] ) | def function[rev_comp, parameter[seq, molecule]]:
constant[ DNA|RNA seq -> reverse complement
]
if compare[name[molecule] equal[==] constant[dna]] begin[:]
variable[nuc_dict] assign[=] dictionary[[<ast.Constant object at 0x7da20c76f760>, <ast.Constant object at 0x7da20c76cf70>, <ast.Constant object at 0x7da20c76f4f0>, <ast.Constant object at 0x7da20c76ea10>, <ast.Constant object at 0x7da20c76f1f0>, <ast.Constant object at 0x7da20c76fc70>, <ast.Constant object at 0x7da20c76f430>, <ast.Constant object at 0x7da20c76e7a0>, <ast.Constant object at 0x7da20c76df00>, <ast.Constant object at 0x7da20c76fb20>, <ast.Constant object at 0x7da20c76ee00>, <ast.Constant object at 0x7da20c76fac0>, <ast.Constant object at 0x7da20c76dba0>, <ast.Constant object at 0x7da20c76c670>, <ast.Constant object at 0x7da20c76cd60>], [<ast.Constant object at 0x7da20c76cf40>, <ast.Constant object at 0x7da20c76d6f0>, <ast.Constant object at 0x7da20c76d390>, <ast.Constant object at 0x7da20c76d330>, <ast.Constant object at 0x7da20c76c5e0>, <ast.Constant object at 0x7da20c76dc00>, <ast.Constant object at 0x7da20c76e830>, <ast.Constant object at 0x7da20c76d240>, <ast.Constant object at 0x7da20c76c7f0>, <ast.Constant object at 0x7da20c76e8f0>, <ast.Constant object at 0x7da20c76f1c0>, <ast.Constant object at 0x7da20c76d0c0>, <ast.Constant object at 0x7da20c76e2f0>, <ast.Constant object at 0x7da20c76e290>, <ast.Constant object at 0x7da20c76d060>]]
if <ast.UnaryOp object at 0x7da2041db880> begin[:]
<ast.Raise object at 0x7da2041db0a0>
return[call[constant[].join, parameter[<ast.ListComp object at 0x7da2041db1f0>]]] | keyword[def] identifier[rev_comp] ( identifier[seq] , identifier[molecule] = literal[string] ):
literal[string]
keyword[if] identifier[molecule] == literal[string] :
identifier[nuc_dict] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }
keyword[elif] identifier[molecule] == literal[string] :
identifier[nuc_dict] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[seq] , identifier[six] . identifier[string_types] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] literal[string] . identifier[join] ([ identifier[nuc_dict] [ identifier[c] ] keyword[for] identifier[c] keyword[in] identifier[seq] . identifier[upper] ()[::- literal[int] ]]) | def rev_comp(seq, molecule='dna'):
""" DNA|RNA seq -> reverse complement
"""
if molecule == 'dna':
nuc_dict = {'A': 'T', 'B': 'V', 'C': 'G', 'D': 'H', 'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'R': 'Y', 'S': 'S', 'T': 'A', 'V': 'B', 'W': 'W', 'Y': 'R'} # depends on [control=['if'], data=[]]
elif molecule == 'rna':
nuc_dict = {'A': 'U', 'B': 'V', 'C': 'G', 'D': 'H', 'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'R': 'Y', 'S': 'S', 'U': 'A', 'V': 'B', 'W': 'W', 'Y': 'R'} # depends on [control=['if'], data=[]]
else:
raise ValueError('rev_comp requires molecule to be dna or rna')
if not isinstance(seq, six.string_types):
raise TypeError('seq must be a string!') # depends on [control=['if'], data=[]]
return ''.join([nuc_dict[c] for c in seq.upper()[::-1]]) |
def GetWsdlMethod(ns, wsdlName):
""" Get wsdl method from ns, wsdlName """
with _lazyLock:
method = _wsdlMethodMap[(ns, wsdlName)]
if isinstance(method, ManagedMethod):
# The type corresponding to the method is loaded,
# just return the method object
return method
elif method:
# The type is not loaded, the map contains the info
# to load the type. Load the actual type and
# return the method object
LoadManagedType(*method)
return _wsdlMethodMap[(ns, wsdlName)]
else:
raise KeyError("{0} {1}".format(ns, name)) | def function[GetWsdlMethod, parameter[ns, wsdlName]]:
constant[ Get wsdl method from ns, wsdlName ]
with name[_lazyLock] begin[:]
variable[method] assign[=] call[name[_wsdlMethodMap]][tuple[[<ast.Name object at 0x7da2041dab30>, <ast.Name object at 0x7da2041d8eb0>]]]
if call[name[isinstance], parameter[name[method], name[ManagedMethod]]] begin[:]
return[name[method]] | keyword[def] identifier[GetWsdlMethod] ( identifier[ns] , identifier[wsdlName] ):
literal[string]
keyword[with] identifier[_lazyLock] :
identifier[method] = identifier[_wsdlMethodMap] [( identifier[ns] , identifier[wsdlName] )]
keyword[if] identifier[isinstance] ( identifier[method] , identifier[ManagedMethod] ):
keyword[return] identifier[method]
keyword[elif] identifier[method] :
identifier[LoadManagedType] (* identifier[method] )
keyword[return] identifier[_wsdlMethodMap] [( identifier[ns] , identifier[wsdlName] )]
keyword[else] :
keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[ns] , identifier[name] )) | def GetWsdlMethod(ns, wsdlName):
""" Get wsdl method from ns, wsdlName """
with _lazyLock:
method = _wsdlMethodMap[ns, wsdlName]
if isinstance(method, ManagedMethod):
# The type corresponding to the method is loaded,
# just return the method object
return method # depends on [control=['if'], data=[]]
elif method:
# The type is not loaded, the map contains the info
# to load the type. Load the actual type and
# return the method object
LoadManagedType(*method)
return _wsdlMethodMap[ns, wsdlName] # depends on [control=['if'], data=[]]
else:
raise KeyError('{0} {1}'.format(ns, name)) # depends on [control=['with'], data=[]] |
def Query(args):
"""Calls osquery with given query and returns its output.
Args:
args: A query to call osquery with.
Returns:
A "parsed JSON" representation of the osquery output.
Raises:
QueryError: If the query is incorrect.
TimeoutError: If a call to the osquery executable times out.
Error: If anything else goes wrong with the subprocess call.
"""
query = args.query.encode("utf-8")
timeout = args.timeout_millis / 1000 # `subprocess.run` uses seconds.
# TODO: pytype is not aware of the backport.
# pytype: disable=module-attr
try:
# We use `--S` to enforce shell execution. This is because on Windows there
# is only `osqueryd` and `osqueryi` is not available. However, by passing
# `--S` we can make `osqueryd` behave like `osqueryi`. Since this flag also
# works with `osqueryi`, by passing it we simply expand number of supported
# executable types.
command = [config.CONFIG["Osquery.path"], "--S", "--json", query]
proc = subprocess.run(
command,
timeout=timeout,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# TODO: Since we use a backported API, `SubprocessError` is hard
# to work with. Until support for Python 2 is dropped we re-raise with simpler
# exception type because we don't really care that much (the exception message
# should be detailed enough anyway).
except subprocess.TimeoutExpired as error:
raise TimeoutError(cause=error)
except subprocess.CalledProcessError as error:
raise Error("osquery invocation error", cause=error)
# pytype: enable=module-attr
stdout = proc.stdout.decode("utf-8")
stderr = proc.stderr.decode("utf-8").strip()
return ProcOutput(stdout=stdout, stderr=stderr) | def function[Query, parameter[args]]:
constant[Calls osquery with given query and returns its output.
Args:
args: A query to call osquery with.
Returns:
A "parsed JSON" representation of the osquery output.
Raises:
QueryError: If the query is incorrect.
TimeoutError: If a call to the osquery executable times out.
Error: If anything else goes wrong with the subprocess call.
]
variable[query] assign[=] call[name[args].query.encode, parameter[constant[utf-8]]]
variable[timeout] assign[=] binary_operation[name[args].timeout_millis / constant[1000]]
<ast.Try object at 0x7da1b1c24520>
variable[stdout] assign[=] call[name[proc].stdout.decode, parameter[constant[utf-8]]]
variable[stderr] assign[=] call[call[name[proc].stderr.decode, parameter[constant[utf-8]]].strip, parameter[]]
return[call[name[ProcOutput], parameter[]]] | keyword[def] identifier[Query] ( identifier[args] ):
literal[string]
identifier[query] = identifier[args] . identifier[query] . identifier[encode] ( literal[string] )
identifier[timeout] = identifier[args] . identifier[timeout_millis] / literal[int]
keyword[try] :
identifier[command] =[ identifier[config] . identifier[CONFIG] [ literal[string] ], literal[string] , literal[string] , identifier[query] ]
identifier[proc] = identifier[subprocess] . identifier[run] (
identifier[command] ,
identifier[timeout] = identifier[timeout] ,
identifier[check] = keyword[True] ,
identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[subprocess] . identifier[PIPE] )
keyword[except] identifier[subprocess] . identifier[TimeoutExpired] keyword[as] identifier[error] :
keyword[raise] identifier[TimeoutError] ( identifier[cause] = identifier[error] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[error] :
keyword[raise] identifier[Error] ( literal[string] , identifier[cause] = identifier[error] )
identifier[stdout] = identifier[proc] . identifier[stdout] . identifier[decode] ( literal[string] )
identifier[stderr] = identifier[proc] . identifier[stderr] . identifier[decode] ( literal[string] ). identifier[strip] ()
keyword[return] identifier[ProcOutput] ( identifier[stdout] = identifier[stdout] , identifier[stderr] = identifier[stderr] ) | def Query(args):
"""Calls osquery with given query and returns its output.
Args:
args: A query to call osquery with.
Returns:
A "parsed JSON" representation of the osquery output.
Raises:
QueryError: If the query is incorrect.
TimeoutError: If a call to the osquery executable times out.
Error: If anything else goes wrong with the subprocess call.
"""
query = args.query.encode('utf-8')
timeout = args.timeout_millis / 1000 # `subprocess.run` uses seconds.
# TODO: pytype is not aware of the backport.
# pytype: disable=module-attr
try:
# We use `--S` to enforce shell execution. This is because on Windows there
# is only `osqueryd` and `osqueryi` is not available. However, by passing
# `--S` we can make `osqueryd` behave like `osqueryi`. Since this flag also
# works with `osqueryi`, by passing it we simply expand number of supported
# executable types.
command = [config.CONFIG['Osquery.path'], '--S', '--json', query]
proc = subprocess.run(command, timeout=timeout, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # depends on [control=['try'], data=[]]
# TODO: Since we use a backported API, `SubprocessError` is hard
# to work with. Until support for Python 2 is dropped we re-raise with simpler
# exception type because we don't really care that much (the exception message
# should be detailed enough anyway).
except subprocess.TimeoutExpired as error:
raise TimeoutError(cause=error) # depends on [control=['except'], data=['error']]
except subprocess.CalledProcessError as error:
raise Error('osquery invocation error', cause=error) # depends on [control=['except'], data=['error']]
# pytype: enable=module-attr
stdout = proc.stdout.decode('utf-8')
stderr = proc.stderr.decode('utf-8').strip()
return ProcOutput(stdout=stdout, stderr=stderr) |
def list_cleared_orders(self, bet_status='SETTLED', event_type_ids=None, event_ids=None, market_ids=None,
runner_ids=None, bet_ids=None, customer_order_refs=None, customer_strategy_refs=None,
side=None, settled_date_range=time_range(), group_by=None, include_item_description=None,
locale=None, from_record=None, record_count=None, session=None, lightweight=None):
"""
Returns a list of settled bets based on the bet status,
ordered by settled date.
:param str bet_status: Restricts the results to the specified status
:param list event_type_ids: Optionally restricts the results to the specified Event Type IDs
:param list event_ids: Optionally restricts the results to the specified Event IDs
:param list market_ids: Optionally restricts the results to the specified market IDs
:param list runner_ids: Optionally restricts the results to the specified Runners
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param list customer_order_refs: Optionally restricts the results to the specified customer order references
:param list customer_strategy_refs: Optionally restricts the results to the specified customer strategy
references
:param str side: Optionally restricts the results to the specified side
:param dict settled_date_range: Optionally restricts the results to be from/to the specified settled date
:param str group_by: How to aggregate the lines, if not supplied then the lowest level is returned
:param bool include_item_description: If true then an ItemDescription object is included in the response
:param str locale: The language used for the response
:param int from_record: Specifies the first record that will be returned
:param int record_count: Specifies how many records will be returned from the index position 'fromRecord'
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.ClearedOrders
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listClearedOrders')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.ClearedOrders, elapsed_time, lightweight) | def function[list_cleared_orders, parameter[self, bet_status, event_type_ids, event_ids, market_ids, runner_ids, bet_ids, customer_order_refs, customer_strategy_refs, side, settled_date_range, group_by, include_item_description, locale, from_record, record_count, session, lightweight]]:
constant[
Returns a list of settled bets based on the bet status,
ordered by settled date.
:param str bet_status: Restricts the results to the specified status
:param list event_type_ids: Optionally restricts the results to the specified Event Type IDs
:param list event_ids: Optionally restricts the results to the specified Event IDs
:param list market_ids: Optionally restricts the results to the specified market IDs
:param list runner_ids: Optionally restricts the results to the specified Runners
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param list customer_order_refs: Optionally restricts the results to the specified customer order references
:param list customer_strategy_refs: Optionally restricts the results to the specified customer strategy
references
:param str side: Optionally restricts the results to the specified side
:param dict settled_date_range: Optionally restricts the results to be from/to the specified settled date
:param str group_by: How to aggregate the lines, if not supplied then the lowest level is returned
:param bool include_item_description: If true then an ItemDescription object is included in the response
:param str locale: The language used for the response
:param int from_record: Specifies the first record that will be returned
:param int record_count: Specifies how many records will be returned from the index position 'fromRecord'
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.ClearedOrders
]
variable[params] assign[=] call[name[clean_locals], parameter[call[name[locals], parameter[]]]]
variable[method] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1792530>, <ast.Constant object at 0x7da1b1791210>]]]
<ast.Tuple object at 0x7da1b1793040> assign[=] call[name[self].request, parameter[name[method], name[params], name[session]]]
return[call[name[self].process_response, parameter[name[response], name[resources].ClearedOrders, name[elapsed_time], name[lightweight]]]] | keyword[def] identifier[list_cleared_orders] ( identifier[self] , identifier[bet_status] = literal[string] , identifier[event_type_ids] = keyword[None] , identifier[event_ids] = keyword[None] , identifier[market_ids] = keyword[None] ,
identifier[runner_ids] = keyword[None] , identifier[bet_ids] = keyword[None] , identifier[customer_order_refs] = keyword[None] , identifier[customer_strategy_refs] = keyword[None] ,
identifier[side] = keyword[None] , identifier[settled_date_range] = identifier[time_range] (), identifier[group_by] = keyword[None] , identifier[include_item_description] = keyword[None] ,
identifier[locale] = keyword[None] , identifier[from_record] = keyword[None] , identifier[record_count] = keyword[None] , identifier[session] = keyword[None] , identifier[lightweight] = keyword[None] ):
literal[string]
identifier[params] = identifier[clean_locals] ( identifier[locals] ())
identifier[method] = literal[string] %( identifier[self] . identifier[URI] , literal[string] )
( identifier[response] , identifier[elapsed_time] )= identifier[self] . identifier[request] ( identifier[method] , identifier[params] , identifier[session] )
keyword[return] identifier[self] . identifier[process_response] ( identifier[response] , identifier[resources] . identifier[ClearedOrders] , identifier[elapsed_time] , identifier[lightweight] ) | def list_cleared_orders(self, bet_status='SETTLED', event_type_ids=None, event_ids=None, market_ids=None, runner_ids=None, bet_ids=None, customer_order_refs=None, customer_strategy_refs=None, side=None, settled_date_range=time_range(), group_by=None, include_item_description=None, locale=None, from_record=None, record_count=None, session=None, lightweight=None):
"""
Returns a list of settled bets based on the bet status,
ordered by settled date.
:param str bet_status: Restricts the results to the specified status
:param list event_type_ids: Optionally restricts the results to the specified Event Type IDs
:param list event_ids: Optionally restricts the results to the specified Event IDs
:param list market_ids: Optionally restricts the results to the specified market IDs
:param list runner_ids: Optionally restricts the results to the specified Runners
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param list customer_order_refs: Optionally restricts the results to the specified customer order references
:param list customer_strategy_refs: Optionally restricts the results to the specified customer strategy
references
:param str side: Optionally restricts the results to the specified side
:param dict settled_date_range: Optionally restricts the results to be from/to the specified settled date
:param str group_by: How to aggregate the lines, if not supplied then the lowest level is returned
:param bool include_item_description: If true then an ItemDescription object is included in the response
:param str locale: The language used for the response
:param int from_record: Specifies the first record that will be returned
:param int record_count: Specifies how many records will be returned from the index position 'fromRecord'
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.ClearedOrders
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listClearedOrders')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.ClearedOrders, elapsed_time, lightweight) |
def colorize(string, stack):
'''Apply optimal ANSI escape sequences to the string.'''
codes = optimize(stack)
if len(codes):
prefix = SEQ % ';'.join(map(str, codes))
suffix = SEQ % STYLE.reset
return prefix + string + suffix
else:
return string | def function[colorize, parameter[string, stack]]:
constant[Apply optimal ANSI escape sequences to the string.]
variable[codes] assign[=] call[name[optimize], parameter[name[stack]]]
if call[name[len], parameter[name[codes]]] begin[:]
variable[prefix] assign[=] binary_operation[name[SEQ] <ast.Mod object at 0x7da2590d6920> call[constant[;].join, parameter[call[name[map], parameter[name[str], name[codes]]]]]]
variable[suffix] assign[=] binary_operation[name[SEQ] <ast.Mod object at 0x7da2590d6920> name[STYLE].reset]
return[binary_operation[binary_operation[name[prefix] + name[string]] + name[suffix]]] | keyword[def] identifier[colorize] ( identifier[string] , identifier[stack] ):
literal[string]
identifier[codes] = identifier[optimize] ( identifier[stack] )
keyword[if] identifier[len] ( identifier[codes] ):
identifier[prefix] = identifier[SEQ] % literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[codes] ))
identifier[suffix] = identifier[SEQ] % identifier[STYLE] . identifier[reset]
keyword[return] identifier[prefix] + identifier[string] + identifier[suffix]
keyword[else] :
keyword[return] identifier[string] | def colorize(string, stack):
"""Apply optimal ANSI escape sequences to the string."""
codes = optimize(stack)
if len(codes):
prefix = SEQ % ';'.join(map(str, codes))
suffix = SEQ % STYLE.reset
return prefix + string + suffix # depends on [control=['if'], data=[]]
else:
return string |
def add_compound(self, compound_stat, config=None):
"""
Register a compound statistic with this sensor which
yields multiple measurable quantities (like a histogram)
Arguments:
stat (AbstractCompoundStat): The stat to register
config (MetricConfig): The configuration for this stat.
If None then the stat will use the default configuration
for this sensor.
"""
if not compound_stat:
raise ValueError('compound stat must be non-empty')
self._stats.append(compound_stat)
for named_measurable in compound_stat.stats():
metric = KafkaMetric(named_measurable.name, named_measurable.stat,
config or self._config)
self._registry.register_metric(metric)
self._metrics.append(metric) | def function[add_compound, parameter[self, compound_stat, config]]:
constant[
Register a compound statistic with this sensor which
yields multiple measurable quantities (like a histogram)
Arguments:
stat (AbstractCompoundStat): The stat to register
config (MetricConfig): The configuration for this stat.
If None then the stat will use the default configuration
for this sensor.
]
if <ast.UnaryOp object at 0x7da1b1cb1b70> begin[:]
<ast.Raise object at 0x7da1b1cb1e10>
call[name[self]._stats.append, parameter[name[compound_stat]]]
for taget[name[named_measurable]] in starred[call[name[compound_stat].stats, parameter[]]] begin[:]
variable[metric] assign[=] call[name[KafkaMetric], parameter[name[named_measurable].name, name[named_measurable].stat, <ast.BoolOp object at 0x7da1b1cb0250>]]
call[name[self]._registry.register_metric, parameter[name[metric]]]
call[name[self]._metrics.append, parameter[name[metric]]] | keyword[def] identifier[add_compound] ( identifier[self] , identifier[compound_stat] , identifier[config] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[compound_stat] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_stats] . identifier[append] ( identifier[compound_stat] )
keyword[for] identifier[named_measurable] keyword[in] identifier[compound_stat] . identifier[stats] ():
identifier[metric] = identifier[KafkaMetric] ( identifier[named_measurable] . identifier[name] , identifier[named_measurable] . identifier[stat] ,
identifier[config] keyword[or] identifier[self] . identifier[_config] )
identifier[self] . identifier[_registry] . identifier[register_metric] ( identifier[metric] )
identifier[self] . identifier[_metrics] . identifier[append] ( identifier[metric] ) | def add_compound(self, compound_stat, config=None):
"""
Register a compound statistic with this sensor which
yields multiple measurable quantities (like a histogram)
Arguments:
stat (AbstractCompoundStat): The stat to register
config (MetricConfig): The configuration for this stat.
If None then the stat will use the default configuration
for this sensor.
"""
if not compound_stat:
raise ValueError('compound stat must be non-empty') # depends on [control=['if'], data=[]]
self._stats.append(compound_stat)
for named_measurable in compound_stat.stats():
metric = KafkaMetric(named_measurable.name, named_measurable.stat, config or self._config)
self._registry.register_metric(metric)
self._metrics.append(metric) # depends on [control=['for'], data=['named_measurable']] |
def decode(raw):
"""Decode SLIP message."""
return raw \
.replace(bytes([SLIP_ESC, SLIP_ESC_END]), bytes([SLIP_END])) \
.replace(bytes([SLIP_ESC, SLIP_ESC_ESC]), bytes([SLIP_ESC])) | def function[decode, parameter[raw]]:
constant[Decode SLIP message.]
return[call[call[name[raw].replace, parameter[call[name[bytes], parameter[list[[<ast.Name object at 0x7da1b0eee860>, <ast.Name object at 0x7da1b0eec9a0>]]]], call[name[bytes], parameter[list[[<ast.Name object at 0x7da1b0eeda50>]]]]]].replace, parameter[call[name[bytes], parameter[list[[<ast.Name object at 0x7da1b0eecac0>, <ast.Name object at 0x7da1b0eedf60>]]]], call[name[bytes], parameter[list[[<ast.Name object at 0x7da1b0eecb80>]]]]]]] | keyword[def] identifier[decode] ( identifier[raw] ):
literal[string]
keyword[return] identifier[raw] . identifier[replace] ( identifier[bytes] ([ identifier[SLIP_ESC] , identifier[SLIP_ESC_END] ]), identifier[bytes] ([ identifier[SLIP_END] ])). identifier[replace] ( identifier[bytes] ([ identifier[SLIP_ESC] , identifier[SLIP_ESC_ESC] ]), identifier[bytes] ([ identifier[SLIP_ESC] ])) | def decode(raw):
"""Decode SLIP message."""
return raw.replace(bytes([SLIP_ESC, SLIP_ESC_END]), bytes([SLIP_END])).replace(bytes([SLIP_ESC, SLIP_ESC_ESC]), bytes([SLIP_ESC])) |
def ask(self, question, default=None):
"""
Prompt the user for input.
"""
if isinstance(question, Question):
return self._io.ask_question(question)
return self._io.ask(question, default) | def function[ask, parameter[self, question, default]]:
constant[
Prompt the user for input.
]
if call[name[isinstance], parameter[name[question], name[Question]]] begin[:]
return[call[name[self]._io.ask_question, parameter[name[question]]]]
return[call[name[self]._io.ask, parameter[name[question], name[default]]]] | keyword[def] identifier[ask] ( identifier[self] , identifier[question] , identifier[default] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[question] , identifier[Question] ):
keyword[return] identifier[self] . identifier[_io] . identifier[ask_question] ( identifier[question] )
keyword[return] identifier[self] . identifier[_io] . identifier[ask] ( identifier[question] , identifier[default] ) | def ask(self, question, default=None):
"""
Prompt the user for input.
"""
if isinstance(question, Question):
return self._io.ask_question(question) # depends on [control=['if'], data=[]]
return self._io.ask(question, default) |
def update_req(name, old_req, config={}):
"""
Takes a requirement and updates it based on a specific attribute key
args:
name: the name of the attribute
old_req: the requirement definition
"""
if not name:
return old_req
new_req = copy.deepcopy(old_req)
del_idxs = []
if "req_items" in old_req:
req_key = get_req_key(old_req['req_items'])
for i, item in enumerate(old_req['req_items']):
if name == item[req_key] and item.get("dict_params"):
for param, value in item['dict_params'].items():
new_req['item_dict'][param].update(value)
if item.get("remove_if"):
test_val = get_attr(config, item['remove_if']['attr'])
if test_val == item['remove_if']['value']:
del_idxs.append(i)
for idx in sorted(del_idxs, reverse=True):
del new_req['req_items'][idx]
return new_req | def function[update_req, parameter[name, old_req, config]]:
constant[
Takes a requirement and updates it based on a specific attribute key
args:
name: the name of the attribute
old_req: the requirement definition
]
if <ast.UnaryOp object at 0x7da1b2345900> begin[:]
return[name[old_req]]
variable[new_req] assign[=] call[name[copy].deepcopy, parameter[name[old_req]]]
variable[del_idxs] assign[=] list[[]]
if compare[constant[req_items] in name[old_req]] begin[:]
variable[req_key] assign[=] call[name[get_req_key], parameter[call[name[old_req]][constant[req_items]]]]
for taget[tuple[[<ast.Name object at 0x7da1b2344850>, <ast.Name object at 0x7da1b2347910>]]] in starred[call[name[enumerate], parameter[call[name[old_req]][constant[req_items]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b2346530> begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b2345780>, <ast.Name object at 0x7da1b2345c60>]]] in starred[call[call[name[item]][constant[dict_params]].items, parameter[]]] begin[:]
call[call[call[name[new_req]][constant[item_dict]]][name[param]].update, parameter[name[value]]]
if call[name[item].get, parameter[constant[remove_if]]] begin[:]
variable[test_val] assign[=] call[name[get_attr], parameter[name[config], call[call[name[item]][constant[remove_if]]][constant[attr]]]]
if compare[name[test_val] equal[==] call[call[name[item]][constant[remove_if]]][constant[value]]] begin[:]
call[name[del_idxs].append, parameter[name[i]]]
for taget[name[idx]] in starred[call[name[sorted], parameter[name[del_idxs]]]] begin[:]
<ast.Delete object at 0x7da18f00e3e0>
return[name[new_req]] | keyword[def] identifier[update_req] ( identifier[name] , identifier[old_req] , identifier[config] ={}):
literal[string]
keyword[if] keyword[not] identifier[name] :
keyword[return] identifier[old_req]
identifier[new_req] = identifier[copy] . identifier[deepcopy] ( identifier[old_req] )
identifier[del_idxs] =[]
keyword[if] literal[string] keyword[in] identifier[old_req] :
identifier[req_key] = identifier[get_req_key] ( identifier[old_req] [ literal[string] ])
keyword[for] identifier[i] , identifier[item] keyword[in] identifier[enumerate] ( identifier[old_req] [ literal[string] ]):
keyword[if] identifier[name] == identifier[item] [ identifier[req_key] ] keyword[and] identifier[item] . identifier[get] ( literal[string] ):
keyword[for] identifier[param] , identifier[value] keyword[in] identifier[item] [ literal[string] ]. identifier[items] ():
identifier[new_req] [ literal[string] ][ identifier[param] ]. identifier[update] ( identifier[value] )
keyword[if] identifier[item] . identifier[get] ( literal[string] ):
identifier[test_val] = identifier[get_attr] ( identifier[config] , identifier[item] [ literal[string] ][ literal[string] ])
keyword[if] identifier[test_val] == identifier[item] [ literal[string] ][ literal[string] ]:
identifier[del_idxs] . identifier[append] ( identifier[i] )
keyword[for] identifier[idx] keyword[in] identifier[sorted] ( identifier[del_idxs] , identifier[reverse] = keyword[True] ):
keyword[del] identifier[new_req] [ literal[string] ][ identifier[idx] ]
keyword[return] identifier[new_req] | def update_req(name, old_req, config={}):
"""
Takes a requirement and updates it based on a specific attribute key
args:
name: the name of the attribute
old_req: the requirement definition
"""
if not name:
return old_req # depends on [control=['if'], data=[]]
new_req = copy.deepcopy(old_req)
del_idxs = []
if 'req_items' in old_req:
req_key = get_req_key(old_req['req_items'])
for (i, item) in enumerate(old_req['req_items']):
if name == item[req_key] and item.get('dict_params'):
for (param, value) in item['dict_params'].items():
new_req['item_dict'][param].update(value) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if item.get('remove_if'):
test_val = get_attr(config, item['remove_if']['attr'])
if test_val == item['remove_if']['value']:
del_idxs.append(i) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
for idx in sorted(del_idxs, reverse=True):
del new_req['req_items'][idx] # depends on [control=['for'], data=['idx']] # depends on [control=['if'], data=['old_req']]
return new_req |
def parse_duration(self):
"""
Corrects for any offsets that may have been created by loop and skip
events
"""
for edit in self.track_edits:
if edit.edit_type == 'loopPlugin':
self.duration += (
(edit.options['end'] -
edit.options['start']) *
float(edit.options['loop'])
) | def function[parse_duration, parameter[self]]:
constant[
Corrects for any offsets that may have been created by loop and skip
events
]
for taget[name[edit]] in starred[name[self].track_edits] begin[:]
if compare[name[edit].edit_type equal[==] constant[loopPlugin]] begin[:]
<ast.AugAssign object at 0x7da1b0a80be0> | keyword[def] identifier[parse_duration] ( identifier[self] ):
literal[string]
keyword[for] identifier[edit] keyword[in] identifier[self] . identifier[track_edits] :
keyword[if] identifier[edit] . identifier[edit_type] == literal[string] :
identifier[self] . identifier[duration] +=(
( identifier[edit] . identifier[options] [ literal[string] ]-
identifier[edit] . identifier[options] [ literal[string] ])*
identifier[float] ( identifier[edit] . identifier[options] [ literal[string] ])
) | def parse_duration(self):
"""
Corrects for any offsets that may have been created by loop and skip
events
"""
for edit in self.track_edits:
if edit.edit_type == 'loopPlugin':
self.duration += (edit.options['end'] - edit.options['start']) * float(edit.options['loop']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['edit']] |
def erf(x, context=None):
"""
Return the value of the error function at x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_erf,
(BigFloat._implicit_convert(x),),
context,
) | def function[erf, parameter[x, context]]:
constant[
Return the value of the error function at x.
]
return[call[name[_apply_function_in_current_context], parameter[name[BigFloat], name[mpfr].mpfr_erf, tuple[[<ast.Call object at 0x7da207f9aa10>]], name[context]]]] | keyword[def] identifier[erf] ( identifier[x] , identifier[context] = keyword[None] ):
literal[string]
keyword[return] identifier[_apply_function_in_current_context] (
identifier[BigFloat] ,
identifier[mpfr] . identifier[mpfr_erf] ,
( identifier[BigFloat] . identifier[_implicit_convert] ( identifier[x] ),),
identifier[context] ,
) | def erf(x, context=None):
"""
Return the value of the error function at x.
"""
return _apply_function_in_current_context(BigFloat, mpfr.mpfr_erf, (BigFloat._implicit_convert(x),), context) |
def infer_dtypes(fit, model=None):
"""Infer dtypes from Stan model code.
Function strips out generated quantities block and searchs for `int`
dtypes after stripping out comments inside the block.
"""
pattern_remove_comments = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', re.DOTALL | re.MULTILINE
)
stan_integer = r"int"
stan_limits = r"(?:\<[^\>]+\>)*" # ignore group: 0 or more <....>
stan_param = r"([^;=\s\[]+)" # capture group: ends= ";", "=", "[" or whitespace
stan_ws = r"\s*" # 0 or more whitespace
pattern_int = re.compile(
"".join((stan_integer, stan_ws, stan_limits, stan_ws, stan_param)), re.IGNORECASE
)
if model is None:
stan_code = fit.get_stancode()
model_pars = fit.model_pars
else:
stan_code = model.program_code
model_pars = fit.param_names
# remove deprecated comments
stan_code = "\n".join(
line if "#" not in line else line[: line.find("#")] for line in stan_code.splitlines()
)
stan_code = re.sub(pattern_remove_comments, "", stan_code)
stan_code = stan_code.split("generated quantities")[-1]
dtypes = re.findall(pattern_int, stan_code)
dtypes = {item.strip(): "int" for item in dtypes if item.strip() in model_pars}
return dtypes | def function[infer_dtypes, parameter[fit, model]]:
constant[Infer dtypes from Stan model code.
Function strips out generated quantities block and searchs for `int`
dtypes after stripping out comments inside the block.
]
variable[pattern_remove_comments] assign[=] call[name[re].compile, parameter[constant[//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"], binary_operation[name[re].DOTALL <ast.BitOr object at 0x7da2590d6aa0> name[re].MULTILINE]]]
variable[stan_integer] assign[=] constant[int]
variable[stan_limits] assign[=] constant[(?:\<[^\>]+\>)*]
variable[stan_param] assign[=] constant[([^;=\s\[]+)]
variable[stan_ws] assign[=] constant[\s*]
variable[pattern_int] assign[=] call[name[re].compile, parameter[call[constant[].join, parameter[tuple[[<ast.Name object at 0x7da18bc723b0>, <ast.Name object at 0x7da18bc73ca0>, <ast.Name object at 0x7da18bc707c0>, <ast.Name object at 0x7da18bc70970>, <ast.Name object at 0x7da18bc70880>]]]], name[re].IGNORECASE]]
if compare[name[model] is constant[None]] begin[:]
variable[stan_code] assign[=] call[name[fit].get_stancode, parameter[]]
variable[model_pars] assign[=] name[fit].model_pars
variable[stan_code] assign[=] call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da1b1b2a680>]]
variable[stan_code] assign[=] call[name[re].sub, parameter[name[pattern_remove_comments], constant[], name[stan_code]]]
variable[stan_code] assign[=] call[call[name[stan_code].split, parameter[constant[generated quantities]]]][<ast.UnaryOp object at 0x7da1b1b2ab30>]
variable[dtypes] assign[=] call[name[re].findall, parameter[name[pattern_int], name[stan_code]]]
variable[dtypes] assign[=] <ast.DictComp object at 0x7da1b1b2a980>
return[name[dtypes]] | keyword[def] identifier[infer_dtypes] ( identifier[fit] , identifier[model] = keyword[None] ):
literal[string]
identifier[pattern_remove_comments] = identifier[re] . identifier[compile] (
literal[string] , identifier[re] . identifier[DOTALL] | identifier[re] . identifier[MULTILINE]
)
identifier[stan_integer] = literal[string]
identifier[stan_limits] = literal[string]
identifier[stan_param] = literal[string]
identifier[stan_ws] = literal[string]
identifier[pattern_int] = identifier[re] . identifier[compile] (
literal[string] . identifier[join] (( identifier[stan_integer] , identifier[stan_ws] , identifier[stan_limits] , identifier[stan_ws] , identifier[stan_param] )), identifier[re] . identifier[IGNORECASE]
)
keyword[if] identifier[model] keyword[is] keyword[None] :
identifier[stan_code] = identifier[fit] . identifier[get_stancode] ()
identifier[model_pars] = identifier[fit] . identifier[model_pars]
keyword[else] :
identifier[stan_code] = identifier[model] . identifier[program_code]
identifier[model_pars] = identifier[fit] . identifier[param_names]
identifier[stan_code] = literal[string] . identifier[join] (
identifier[line] keyword[if] literal[string] keyword[not] keyword[in] identifier[line] keyword[else] identifier[line] [: identifier[line] . identifier[find] ( literal[string] )] keyword[for] identifier[line] keyword[in] identifier[stan_code] . identifier[splitlines] ()
)
identifier[stan_code] = identifier[re] . identifier[sub] ( identifier[pattern_remove_comments] , literal[string] , identifier[stan_code] )
identifier[stan_code] = identifier[stan_code] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[dtypes] = identifier[re] . identifier[findall] ( identifier[pattern_int] , identifier[stan_code] )
identifier[dtypes] ={ identifier[item] . identifier[strip] (): literal[string] keyword[for] identifier[item] keyword[in] identifier[dtypes] keyword[if] identifier[item] . identifier[strip] () keyword[in] identifier[model_pars] }
keyword[return] identifier[dtypes] | def infer_dtypes(fit, model=None):
"""Infer dtypes from Stan model code.
Function strips out generated quantities block and searchs for `int`
dtypes after stripping out comments inside the block.
"""
pattern_remove_comments = re.compile('//.*?$|/\\*.*?\\*/|\\\'(?:\\\\.|[^\\\\\\\'])*\\\'|"(?:\\\\.|[^\\\\"])*"', re.DOTALL | re.MULTILINE)
stan_integer = 'int'
stan_limits = '(?:\\<[^\\>]+\\>)*' # ignore group: 0 or more <....>
stan_param = '([^;=\\s\\[]+)' # capture group: ends= ";", "=", "[" or whitespace
stan_ws = '\\s*' # 0 or more whitespace
pattern_int = re.compile(''.join((stan_integer, stan_ws, stan_limits, stan_ws, stan_param)), re.IGNORECASE)
if model is None:
stan_code = fit.get_stancode()
model_pars = fit.model_pars # depends on [control=['if'], data=[]]
else:
stan_code = model.program_code
model_pars = fit.param_names # remove deprecated comments
stan_code = '\n'.join((line if '#' not in line else line[:line.find('#')] for line in stan_code.splitlines()))
stan_code = re.sub(pattern_remove_comments, '', stan_code)
stan_code = stan_code.split('generated quantities')[-1]
dtypes = re.findall(pattern_int, stan_code)
dtypes = {item.strip(): 'int' for item in dtypes if item.strip() in model_pars}
return dtypes |
def _handle_info(self, *args, **kwargs):
"""
Handles info messages and executed corresponding code
"""
if 'version' in kwargs:
# set api version number and exit
self.api_version = kwargs['version']
print("Initialized API with version %s" % self.api_version)
return
try:
info_code = str(kwargs['code'])
except KeyError:
raise FaultyPayloadError("_handle_info: %s" % kwargs)
if not info_code.startswith('2'):
raise ValueError("Info Code must start with 2! %s", kwargs)
output_msg = "_handle_info(): %s" % kwargs
log.info(output_msg)
try:
self._code_handlers[info_code]()
except KeyError:
raise UnknownWSSInfo(output_msg) | def function[_handle_info, parameter[self]]:
constant[
Handles info messages and executed corresponding code
]
if compare[constant[version] in name[kwargs]] begin[:]
name[self].api_version assign[=] call[name[kwargs]][constant[version]]
call[name[print], parameter[binary_operation[constant[Initialized API with version %s] <ast.Mod object at 0x7da2590d6920> name[self].api_version]]]
return[None]
<ast.Try object at 0x7da20c990250>
if <ast.UnaryOp object at 0x7da20c990bb0> begin[:]
<ast.Raise object at 0x7da20c9928f0>
variable[output_msg] assign[=] binary_operation[constant[_handle_info(): %s] <ast.Mod object at 0x7da2590d6920> name[kwargs]]
call[name[log].info, parameter[name[output_msg]]]
<ast.Try object at 0x7da20c991840> | keyword[def] identifier[_handle_info] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[self] . identifier[api_version] = identifier[kwargs] [ literal[string] ]
identifier[print] ( literal[string] % identifier[self] . identifier[api_version] )
keyword[return]
keyword[try] :
identifier[info_code] = identifier[str] ( identifier[kwargs] [ literal[string] ])
keyword[except] identifier[KeyError] :
keyword[raise] identifier[FaultyPayloadError] ( literal[string] % identifier[kwargs] )
keyword[if] keyword[not] identifier[info_code] . identifier[startswith] ( literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] , identifier[kwargs] )
identifier[output_msg] = literal[string] % identifier[kwargs]
identifier[log] . identifier[info] ( identifier[output_msg] )
keyword[try] :
identifier[self] . identifier[_code_handlers] [ identifier[info_code] ]()
keyword[except] identifier[KeyError] :
keyword[raise] identifier[UnknownWSSInfo] ( identifier[output_msg] ) | def _handle_info(self, *args, **kwargs):
"""
Handles info messages and executed corresponding code
"""
if 'version' in kwargs:
# set api version number and exit
self.api_version = kwargs['version']
print('Initialized API with version %s' % self.api_version)
return # depends on [control=['if'], data=['kwargs']]
try:
info_code = str(kwargs['code']) # depends on [control=['try'], data=[]]
except KeyError:
raise FaultyPayloadError('_handle_info: %s' % kwargs) # depends on [control=['except'], data=[]]
if not info_code.startswith('2'):
raise ValueError('Info Code must start with 2! %s', kwargs) # depends on [control=['if'], data=[]]
output_msg = '_handle_info(): %s' % kwargs
log.info(output_msg)
try:
self._code_handlers[info_code]() # depends on [control=['try'], data=[]]
except KeyError:
raise UnknownWSSInfo(output_msg) # depends on [control=['except'], data=[]] |
def schema_id(origin_did: str, name: str, version: str) -> str:
"""
Return schema identifier for input origin DID, schema name, and schema version.
:param origin_did: DID of schema originator
:param name: schema name
:param version: schema version
:return: schema identifier
"""
return '{}:2:{}:{}'.format(origin_did, name, version) | def function[schema_id, parameter[origin_did, name, version]]:
constant[
Return schema identifier for input origin DID, schema name, and schema version.
:param origin_did: DID of schema originator
:param name: schema name
:param version: schema version
:return: schema identifier
]
return[call[constant[{}:2:{}:{}].format, parameter[name[origin_did], name[name], name[version]]]] | keyword[def] identifier[schema_id] ( identifier[origin_did] : identifier[str] , identifier[name] : identifier[str] , identifier[version] : identifier[str] )-> identifier[str] :
literal[string]
keyword[return] literal[string] . identifier[format] ( identifier[origin_did] , identifier[name] , identifier[version] ) | def schema_id(origin_did: str, name: str, version: str) -> str:
"""
Return schema identifier for input origin DID, schema name, and schema version.
:param origin_did: DID of schema originator
:param name: schema name
:param version: schema version
:return: schema identifier
"""
return '{}:2:{}:{}'.format(origin_did, name, version) |
def printf(format, *args):
"""Format args with the first argument as format string, and write.
Return the last arg, or format itself if there are no args."""
sys.stdout.write(str(format) % args)
return if_(args, lambda: args[-1], lambda: format) | def function[printf, parameter[format]]:
constant[Format args with the first argument as format string, and write.
Return the last arg, or format itself if there are no args.]
call[name[sys].stdout.write, parameter[binary_operation[call[name[str], parameter[name[format]]] <ast.Mod object at 0x7da2590d6920> name[args]]]]
return[call[name[if_], parameter[name[args], <ast.Lambda object at 0x7da204567850>, <ast.Lambda object at 0x7da20e957d90>]]] | keyword[def] identifier[printf] ( identifier[format] ,* identifier[args] ):
literal[string]
identifier[sys] . identifier[stdout] . identifier[write] ( identifier[str] ( identifier[format] )% identifier[args] )
keyword[return] identifier[if_] ( identifier[args] , keyword[lambda] : identifier[args] [- literal[int] ], keyword[lambda] : identifier[format] ) | def printf(format, *args):
"""Format args with the first argument as format string, and write.
Return the last arg, or format itself if there are no args."""
sys.stdout.write(str(format) % args)
return if_(args, lambda : args[-1], lambda : format) |
def cli(env, identifier, enabled, port, weight, healthcheck_type, ip_address):
"""Edit the properties of a service group."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, service_id = loadbal.parse_id(identifier)
# check if any input is provided
if ((not any([ip_address, weight, port, healthcheck_type])) and
enabled is None):
raise exceptions.CLIAbort(
'At least one property is required to be changed!')
# check if the IP is valid
ip_address_id = None
if ip_address:
ip_service = env.client['Network_Subnet_IpAddress']
ip_record = ip_service.getByIpAddress(ip_address)
ip_address_id = ip_record['id']
mgr.edit_service(loadbal_id,
service_id,
ip_address_id=ip_address_id,
enabled=enabled,
port=port,
weight=weight,
hc_type=healthcheck_type)
env.fout('Load balancer service %s is being modified!' % identifier) | def function[cli, parameter[env, identifier, enabled, port, weight, healthcheck_type, ip_address]]:
constant[Edit the properties of a service group.]
variable[mgr] assign[=] call[name[SoftLayer].LoadBalancerManager, parameter[name[env].client]]
<ast.Tuple object at 0x7da18dc07d30> assign[=] call[name[loadbal].parse_id, parameter[name[identifier]]]
if <ast.BoolOp object at 0x7da18dc076d0> begin[:]
<ast.Raise object at 0x7da18dc06f20>
variable[ip_address_id] assign[=] constant[None]
if name[ip_address] begin[:]
variable[ip_service] assign[=] call[name[env].client][constant[Network_Subnet_IpAddress]]
variable[ip_record] assign[=] call[name[ip_service].getByIpAddress, parameter[name[ip_address]]]
variable[ip_address_id] assign[=] call[name[ip_record]][constant[id]]
call[name[mgr].edit_service, parameter[name[loadbal_id], name[service_id]]]
call[name[env].fout, parameter[binary_operation[constant[Load balancer service %s is being modified!] <ast.Mod object at 0x7da2590d6920> name[identifier]]]] | keyword[def] identifier[cli] ( identifier[env] , identifier[identifier] , identifier[enabled] , identifier[port] , identifier[weight] , identifier[healthcheck_type] , identifier[ip_address] ):
literal[string]
identifier[mgr] = identifier[SoftLayer] . identifier[LoadBalancerManager] ( identifier[env] . identifier[client] )
identifier[loadbal_id] , identifier[service_id] = identifier[loadbal] . identifier[parse_id] ( identifier[identifier] )
keyword[if] (( keyword[not] identifier[any] ([ identifier[ip_address] , identifier[weight] , identifier[port] , identifier[healthcheck_type] ])) keyword[and]
identifier[enabled] keyword[is] keyword[None] ):
keyword[raise] identifier[exceptions] . identifier[CLIAbort] (
literal[string] )
identifier[ip_address_id] = keyword[None]
keyword[if] identifier[ip_address] :
identifier[ip_service] = identifier[env] . identifier[client] [ literal[string] ]
identifier[ip_record] = identifier[ip_service] . identifier[getByIpAddress] ( identifier[ip_address] )
identifier[ip_address_id] = identifier[ip_record] [ literal[string] ]
identifier[mgr] . identifier[edit_service] ( identifier[loadbal_id] ,
identifier[service_id] ,
identifier[ip_address_id] = identifier[ip_address_id] ,
identifier[enabled] = identifier[enabled] ,
identifier[port] = identifier[port] ,
identifier[weight] = identifier[weight] ,
identifier[hc_type] = identifier[healthcheck_type] )
identifier[env] . identifier[fout] ( literal[string] % identifier[identifier] ) | def cli(env, identifier, enabled, port, weight, healthcheck_type, ip_address):
"""Edit the properties of a service group."""
mgr = SoftLayer.LoadBalancerManager(env.client)
(loadbal_id, service_id) = loadbal.parse_id(identifier)
# check if any input is provided
if not any([ip_address, weight, port, healthcheck_type]) and enabled is None:
raise exceptions.CLIAbort('At least one property is required to be changed!') # depends on [control=['if'], data=[]]
# check if the IP is valid
ip_address_id = None
if ip_address:
ip_service = env.client['Network_Subnet_IpAddress']
ip_record = ip_service.getByIpAddress(ip_address)
ip_address_id = ip_record['id'] # depends on [control=['if'], data=[]]
mgr.edit_service(loadbal_id, service_id, ip_address_id=ip_address_id, enabled=enabled, port=port, weight=weight, hc_type=healthcheck_type)
env.fout('Load balancer service %s is being modified!' % identifier) |
def set_web_hook(self, url=None, certificate=None):
"""
Use this method to specify a url and receive incoming updates via an outgoing webhook. Whenever there is an
update for the bot, we will send an HTTPS POST request to the specified url, containing a JSON-serialized
Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts.
"""
payload = dict(url=url, certificate=certificate)
return self._get('setWebHook', payload) | def function[set_web_hook, parameter[self, url, certificate]]:
constant[
Use this method to specify a url and receive incoming updates via an outgoing webhook. Whenever there is an
update for the bot, we will send an HTTPS POST request to the specified url, containing a JSON-serialized
Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts.
]
variable[payload] assign[=] call[name[dict], parameter[]]
return[call[name[self]._get, parameter[constant[setWebHook], name[payload]]]] | keyword[def] identifier[set_web_hook] ( identifier[self] , identifier[url] = keyword[None] , identifier[certificate] = keyword[None] ):
literal[string]
identifier[payload] = identifier[dict] ( identifier[url] = identifier[url] , identifier[certificate] = identifier[certificate] )
keyword[return] identifier[self] . identifier[_get] ( literal[string] , identifier[payload] ) | def set_web_hook(self, url=None, certificate=None):
"""
Use this method to specify a url and receive incoming updates via an outgoing webhook. Whenever there is an
update for the bot, we will send an HTTPS POST request to the specified url, containing a JSON-serialized
Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts.
"""
payload = dict(url=url, certificate=certificate)
return self._get('setWebHook', payload) |
def get_obj_cols(df):
"""
Returns names of 'object' columns in the DataFrame.
"""
obj_cols = []
for idx, dt in enumerate(df.dtypes):
if dt == 'object' or is_category(dt):
obj_cols.append(df.columns.values[idx])
return obj_cols | def function[get_obj_cols, parameter[df]]:
constant[
Returns names of 'object' columns in the DataFrame.
]
variable[obj_cols] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20e957700>, <ast.Name object at 0x7da20e9562f0>]]] in starred[call[name[enumerate], parameter[name[df].dtypes]]] begin[:]
if <ast.BoolOp object at 0x7da20e9548e0> begin[:]
call[name[obj_cols].append, parameter[call[name[df].columns.values][name[idx]]]]
return[name[obj_cols]] | keyword[def] identifier[get_obj_cols] ( identifier[df] ):
literal[string]
identifier[obj_cols] =[]
keyword[for] identifier[idx] , identifier[dt] keyword[in] identifier[enumerate] ( identifier[df] . identifier[dtypes] ):
keyword[if] identifier[dt] == literal[string] keyword[or] identifier[is_category] ( identifier[dt] ):
identifier[obj_cols] . identifier[append] ( identifier[df] . identifier[columns] . identifier[values] [ identifier[idx] ])
keyword[return] identifier[obj_cols] | def get_obj_cols(df):
"""
Returns names of 'object' columns in the DataFrame.
"""
obj_cols = []
for (idx, dt) in enumerate(df.dtypes):
if dt == 'object' or is_category(dt):
obj_cols.append(df.columns.values[idx]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return obj_cols |
def jenkins_request_with_headers(jenkins_server, req):
"""
We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
try:
response = jenkins_server.jenkins_request(req)
response_body = response.content
response_headers = response.headers
if response_body is None:
raise jenkins.EmptyResponseException(
"Error communicating with server[%s]: "
"empty response" % jenkins_server.server)
return {'body': response_body.decode('utf-8'), 'headers': response_headers}
except HTTPError as e:
# Jenkins's funky authentication means its nigh impossible to
# distinguish errors.
if e.code in [401, 403, 500]:
# six.moves.urllib.error.HTTPError provides a 'reason'
# attribute for all python version except for ver 2.6
# Falling back to HTTPError.msg since it contains the
# same info as reason
raise JenkinsException(
'Error in request. ' +
'Possibly authentication failed [%s]: %s' % (
e.code, e.msg)
)
elif e.code == 404:
raise jenkins.NotFoundException('Requested item could not be found')
else:
raise
except socket.timeout as e:
raise jenkins.TimeoutException('Error in request: %s' % e)
except URLError as e:
# python 2.6 compatibility to ensure same exception raised
# since URLError wraps a socket timeout on python 2.6.
if str(e.reason) == "timed out":
raise jenkins.TimeoutException('Error in request: %s' % e.reason)
raise JenkinsException('Error in request: %s' % e.reason) | def function[jenkins_request_with_headers, parameter[jenkins_server, req]]:
constant[
We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers)
]
<ast.Try object at 0x7da1b055b220> | keyword[def] identifier[jenkins_request_with_headers] ( identifier[jenkins_server] , identifier[req] ):
literal[string]
keyword[try] :
identifier[response] = identifier[jenkins_server] . identifier[jenkins_request] ( identifier[req] )
identifier[response_body] = identifier[response] . identifier[content]
identifier[response_headers] = identifier[response] . identifier[headers]
keyword[if] identifier[response_body] keyword[is] keyword[None] :
keyword[raise] identifier[jenkins] . identifier[EmptyResponseException] (
literal[string]
literal[string] % identifier[jenkins_server] . identifier[server] )
keyword[return] { literal[string] : identifier[response_body] . identifier[decode] ( literal[string] ), literal[string] : identifier[response_headers] }
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[code] keyword[in] [ literal[int] , literal[int] , literal[int] ]:
keyword[raise] identifier[JenkinsException] (
literal[string] +
literal[string] %(
identifier[e] . identifier[code] , identifier[e] . identifier[msg] )
)
keyword[elif] identifier[e] . identifier[code] == literal[int] :
keyword[raise] identifier[jenkins] . identifier[NotFoundException] ( literal[string] )
keyword[else] :
keyword[raise]
keyword[except] identifier[socket] . identifier[timeout] keyword[as] identifier[e] :
keyword[raise] identifier[jenkins] . identifier[TimeoutException] ( literal[string] % identifier[e] )
keyword[except] identifier[URLError] keyword[as] identifier[e] :
keyword[if] identifier[str] ( identifier[e] . identifier[reason] )== literal[string] :
keyword[raise] identifier[jenkins] . identifier[TimeoutException] ( literal[string] % identifier[e] . identifier[reason] )
keyword[raise] identifier[JenkinsException] ( literal[string] % identifier[e] . identifier[reason] ) | def jenkins_request_with_headers(jenkins_server, req):
"""
We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
try:
response = jenkins_server.jenkins_request(req)
response_body = response.content
response_headers = response.headers
if response_body is None:
raise jenkins.EmptyResponseException('Error communicating with server[%s]: empty response' % jenkins_server.server) # depends on [control=['if'], data=[]]
return {'body': response_body.decode('utf-8'), 'headers': response_headers} # depends on [control=['try'], data=[]]
except HTTPError as e:
# Jenkins's funky authentication means its nigh impossible to
# distinguish errors.
if e.code in [401, 403, 500]:
# six.moves.urllib.error.HTTPError provides a 'reason'
# attribute for all python version except for ver 2.6
# Falling back to HTTPError.msg since it contains the
# same info as reason
raise JenkinsException('Error in request. ' + 'Possibly authentication failed [%s]: %s' % (e.code, e.msg)) # depends on [control=['if'], data=[]]
elif e.code == 404:
raise jenkins.NotFoundException('Requested item could not be found') # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']]
except socket.timeout as e:
raise jenkins.TimeoutException('Error in request: %s' % e) # depends on [control=['except'], data=['e']]
except URLError as e:
# python 2.6 compatibility to ensure same exception raised
# since URLError wraps a socket timeout on python 2.6.
if str(e.reason) == 'timed out':
raise jenkins.TimeoutException('Error in request: %s' % e.reason) # depends on [control=['if'], data=[]]
raise JenkinsException('Error in request: %s' % e.reason) # depends on [control=['except'], data=['e']] |
def load_background_noise_map(background_noise_map_path, background_noise_map_hdu, pixel_scale,
convert_background_noise_map_from_weight_map,
convert_background_noise_map_from_inverse_noise_map):
"""Factory for loading the background noise-map from a .fits file.
This factory also includes a number of routines for converting the background noise-map from from other units (e.g. \
a weight map).
Parameters
----------
background_noise_map_path : str
The path to the background_noise_map .fits file containing the background noise-map \
(e.g. '/path/to/background_noise_map.fits')
background_noise_map_hdu : int
The hdu the background_noise_map is contained in the .fits file specified by *background_noise_map_path*.
pixel_scale : float
The size of each pixel in arc seconds.
convert_background_noise_map_from_weight_map : bool
If True, the bacground noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \
*NoiseMap.from_weight_map).
convert_background_noise_map_from_inverse_noise_map : bool
If True, the background noise-map loaded from the .fits file is converted from an inverse noise-map to a \
noise-map (see *NoiseMap.from_inverse_noise_map).
"""
background_noise_map_options = sum([convert_background_noise_map_from_weight_map,
convert_background_noise_map_from_inverse_noise_map])
if background_noise_map_options == 0 and background_noise_map_path is not None:
return NoiseMap.from_fits_with_pixel_scale(file_path=background_noise_map_path, hdu=background_noise_map_hdu,
pixel_scale=pixel_scale)
elif convert_background_noise_map_from_weight_map and background_noise_map_path is not None:
weight_map = Array.from_fits(file_path=background_noise_map_path, hdu=background_noise_map_hdu)
return NoiseMap.from_weight_map(weight_map=weight_map, pixel_scale=pixel_scale)
elif convert_background_noise_map_from_inverse_noise_map and background_noise_map_path is not None:
inverse_noise_map = Array.from_fits(file_path=background_noise_map_path, hdu=background_noise_map_hdu)
return NoiseMap.from_inverse_noise_map(inverse_noise_map=inverse_noise_map, pixel_scale=pixel_scale)
else:
return None | def function[load_background_noise_map, parameter[background_noise_map_path, background_noise_map_hdu, pixel_scale, convert_background_noise_map_from_weight_map, convert_background_noise_map_from_inverse_noise_map]]:
constant[Factory for loading the background noise-map from a .fits file.
This factory also includes a number of routines for converting the background noise-map from from other units (e.g. a weight map).
Parameters
----------
background_noise_map_path : str
The path to the background_noise_map .fits file containing the background noise-map (e.g. '/path/to/background_noise_map.fits')
background_noise_map_hdu : int
The hdu the background_noise_map is contained in the .fits file specified by *background_noise_map_path*.
pixel_scale : float
The size of each pixel in arc seconds.
convert_background_noise_map_from_weight_map : bool
If True, the bacground noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see *NoiseMap.from_weight_map).
convert_background_noise_map_from_inverse_noise_map : bool
If True, the background noise-map loaded from the .fits file is converted from an inverse noise-map to a noise-map (see *NoiseMap.from_inverse_noise_map).
]
variable[background_noise_map_options] assign[=] call[name[sum], parameter[list[[<ast.Name object at 0x7da20c76d810>, <ast.Name object at 0x7da20c76d930>]]]]
if <ast.BoolOp object at 0x7da20c76f1f0> begin[:]
return[call[name[NoiseMap].from_fits_with_pixel_scale, parameter[]]] | keyword[def] identifier[load_background_noise_map] ( identifier[background_noise_map_path] , identifier[background_noise_map_hdu] , identifier[pixel_scale] ,
identifier[convert_background_noise_map_from_weight_map] ,
identifier[convert_background_noise_map_from_inverse_noise_map] ):
literal[string]
identifier[background_noise_map_options] = identifier[sum] ([ identifier[convert_background_noise_map_from_weight_map] ,
identifier[convert_background_noise_map_from_inverse_noise_map] ])
keyword[if] identifier[background_noise_map_options] == literal[int] keyword[and] identifier[background_noise_map_path] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[NoiseMap] . identifier[from_fits_with_pixel_scale] ( identifier[file_path] = identifier[background_noise_map_path] , identifier[hdu] = identifier[background_noise_map_hdu] ,
identifier[pixel_scale] = identifier[pixel_scale] )
keyword[elif] identifier[convert_background_noise_map_from_weight_map] keyword[and] identifier[background_noise_map_path] keyword[is] keyword[not] keyword[None] :
identifier[weight_map] = identifier[Array] . identifier[from_fits] ( identifier[file_path] = identifier[background_noise_map_path] , identifier[hdu] = identifier[background_noise_map_hdu] )
keyword[return] identifier[NoiseMap] . identifier[from_weight_map] ( identifier[weight_map] = identifier[weight_map] , identifier[pixel_scale] = identifier[pixel_scale] )
keyword[elif] identifier[convert_background_noise_map_from_inverse_noise_map] keyword[and] identifier[background_noise_map_path] keyword[is] keyword[not] keyword[None] :
identifier[inverse_noise_map] = identifier[Array] . identifier[from_fits] ( identifier[file_path] = identifier[background_noise_map_path] , identifier[hdu] = identifier[background_noise_map_hdu] )
keyword[return] identifier[NoiseMap] . identifier[from_inverse_noise_map] ( identifier[inverse_noise_map] = identifier[inverse_noise_map] , identifier[pixel_scale] = identifier[pixel_scale] )
keyword[else] :
keyword[return] keyword[None] | def load_background_noise_map(background_noise_map_path, background_noise_map_hdu, pixel_scale, convert_background_noise_map_from_weight_map, convert_background_noise_map_from_inverse_noise_map):
"""Factory for loading the background noise-map from a .fits file.
This factory also includes a number of routines for converting the background noise-map from from other units (e.g. a weight map).
Parameters
----------
background_noise_map_path : str
The path to the background_noise_map .fits file containing the background noise-map (e.g. '/path/to/background_noise_map.fits')
background_noise_map_hdu : int
The hdu the background_noise_map is contained in the .fits file specified by *background_noise_map_path*.
pixel_scale : float
The size of each pixel in arc seconds.
convert_background_noise_map_from_weight_map : bool
If True, the bacground noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see *NoiseMap.from_weight_map).
convert_background_noise_map_from_inverse_noise_map : bool
If True, the background noise-map loaded from the .fits file is converted from an inverse noise-map to a noise-map (see *NoiseMap.from_inverse_noise_map).
"""
background_noise_map_options = sum([convert_background_noise_map_from_weight_map, convert_background_noise_map_from_inverse_noise_map])
if background_noise_map_options == 0 and background_noise_map_path is not None:
return NoiseMap.from_fits_with_pixel_scale(file_path=background_noise_map_path, hdu=background_noise_map_hdu, pixel_scale=pixel_scale) # depends on [control=['if'], data=[]]
elif convert_background_noise_map_from_weight_map and background_noise_map_path is not None:
weight_map = Array.from_fits(file_path=background_noise_map_path, hdu=background_noise_map_hdu)
return NoiseMap.from_weight_map(weight_map=weight_map, pixel_scale=pixel_scale) # depends on [control=['if'], data=[]]
elif convert_background_noise_map_from_inverse_noise_map and background_noise_map_path is not None:
inverse_noise_map = Array.from_fits(file_path=background_noise_map_path, hdu=background_noise_map_hdu)
return NoiseMap.from_inverse_noise_map(inverse_noise_map=inverse_noise_map, pixel_scale=pixel_scale) # depends on [control=['if'], data=[]]
else:
return None |
def handle(self, signum, frame):
"""This method is called when a signal is received."""
if self.print_method:
self.print_method('\nProgram received signal %s.'
% self.signame)
if self.print_stack:
import traceback
strings = traceback.format_stack(frame)
for s in strings:
if s[-1] == '\n': s = s[0:-1]
self.print_method(s)
pass
pass
if self.b_stop:
core = self.dbgr.core
old_trace_hook_suspend = core.trace_hook_suspend
core.trace_hook_suspend = True
core.stop_reason = ('intercepting signal %s (%d)' %
(self.signame, signum))
core.processor.event_processor(frame, 'signal', signum)
core.trace_hook_suspend = old_trace_hook_suspend
pass
if self.pass_along:
# pass the signal to the program
if self.old_handler:
self.old_handler(signum, frame)
pass
pass
return | def function[handle, parameter[self, signum, frame]]:
constant[This method is called when a signal is received.]
if name[self].print_method begin[:]
call[name[self].print_method, parameter[binary_operation[constant[
Program received signal %s.] <ast.Mod object at 0x7da2590d6920> name[self].signame]]]
if name[self].print_stack begin[:]
import module[traceback]
variable[strings] assign[=] call[name[traceback].format_stack, parameter[name[frame]]]
for taget[name[s]] in starred[name[strings]] begin[:]
if compare[call[name[s]][<ast.UnaryOp object at 0x7da1b05335e0>] equal[==] constant[
]] begin[:]
variable[s] assign[=] call[name[s]][<ast.Slice object at 0x7da1b05336d0>]
call[name[self].print_method, parameter[name[s]]]
pass
pass
if name[self].b_stop begin[:]
variable[core] assign[=] name[self].dbgr.core
variable[old_trace_hook_suspend] assign[=] name[core].trace_hook_suspend
name[core].trace_hook_suspend assign[=] constant[True]
name[core].stop_reason assign[=] binary_operation[constant[intercepting signal %s (%d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0532ad0>, <ast.Name object at 0x7da1b0531a80>]]]
call[name[core].processor.event_processor, parameter[name[frame], constant[signal], name[signum]]]
name[core].trace_hook_suspend assign[=] name[old_trace_hook_suspend]
pass
if name[self].pass_along begin[:]
if name[self].old_handler begin[:]
call[name[self].old_handler, parameter[name[signum], name[frame]]]
pass
pass
return[None] | keyword[def] identifier[handle] ( identifier[self] , identifier[signum] , identifier[frame] ):
literal[string]
keyword[if] identifier[self] . identifier[print_method] :
identifier[self] . identifier[print_method] ( literal[string]
% identifier[self] . identifier[signame] )
keyword[if] identifier[self] . identifier[print_stack] :
keyword[import] identifier[traceback]
identifier[strings] = identifier[traceback] . identifier[format_stack] ( identifier[frame] )
keyword[for] identifier[s] keyword[in] identifier[strings] :
keyword[if] identifier[s] [- literal[int] ]== literal[string] : identifier[s] = identifier[s] [ literal[int] :- literal[int] ]
identifier[self] . identifier[print_method] ( identifier[s] )
keyword[pass]
keyword[pass]
keyword[if] identifier[self] . identifier[b_stop] :
identifier[core] = identifier[self] . identifier[dbgr] . identifier[core]
identifier[old_trace_hook_suspend] = identifier[core] . identifier[trace_hook_suspend]
identifier[core] . identifier[trace_hook_suspend] = keyword[True]
identifier[core] . identifier[stop_reason] =( literal[string] %
( identifier[self] . identifier[signame] , identifier[signum] ))
identifier[core] . identifier[processor] . identifier[event_processor] ( identifier[frame] , literal[string] , identifier[signum] )
identifier[core] . identifier[trace_hook_suspend] = identifier[old_trace_hook_suspend]
keyword[pass]
keyword[if] identifier[self] . identifier[pass_along] :
keyword[if] identifier[self] . identifier[old_handler] :
identifier[self] . identifier[old_handler] ( identifier[signum] , identifier[frame] )
keyword[pass]
keyword[pass]
keyword[return] | def handle(self, signum, frame):
"""This method is called when a signal is received."""
if self.print_method:
self.print_method('\nProgram received signal %s.' % self.signame) # depends on [control=['if'], data=[]]
if self.print_stack:
import traceback
strings = traceback.format_stack(frame)
for s in strings:
if s[-1] == '\n':
s = s[0:-1] # depends on [control=['if'], data=[]]
self.print_method(s)
pass # depends on [control=['for'], data=['s']]
pass # depends on [control=['if'], data=[]]
if self.b_stop:
core = self.dbgr.core
old_trace_hook_suspend = core.trace_hook_suspend
core.trace_hook_suspend = True
core.stop_reason = 'intercepting signal %s (%d)' % (self.signame, signum)
core.processor.event_processor(frame, 'signal', signum)
core.trace_hook_suspend = old_trace_hook_suspend
pass # depends on [control=['if'], data=[]]
if self.pass_along:
# pass the signal to the program
if self.old_handler:
self.old_handler(signum, frame)
pass # depends on [control=['if'], data=[]]
pass # depends on [control=['if'], data=[]]
return |
def update(self, password=values.unset):
"""
Update the CredentialInstance
:param unicode password: The password will not be returned in the response
:returns: Updated CredentialInstance
:rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialInstance
"""
return self._proxy.update(password=password, ) | def function[update, parameter[self, password]]:
constant[
Update the CredentialInstance
:param unicode password: The password will not be returned in the response
:returns: Updated CredentialInstance
:rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialInstance
]
return[call[name[self]._proxy.update, parameter[]]] | keyword[def] identifier[update] ( identifier[self] , identifier[password] = identifier[values] . identifier[unset] ):
literal[string]
keyword[return] identifier[self] . identifier[_proxy] . identifier[update] ( identifier[password] = identifier[password] ,) | def update(self, password=values.unset):
"""
Update the CredentialInstance
:param unicode password: The password will not be returned in the response
:returns: Updated CredentialInstance
:rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialInstance
"""
return self._proxy.update(password=password) |
def write_unitary_matrix_to_hdf5(temperature,
mesh,
unitary_matrix=None,
sigma=None,
sigma_cutoff=None,
solver=None,
filename=None,
verbose=False):
"""Write eigenvectors of collision matrices at temperatures.
Depending on the choice of the solver, eigenvectors are sotred in
either column-wise or row-wise.
"""
suffix = _get_filename_suffix(mesh,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
hdf5_filename = "unitary" + suffix + ".hdf5"
with h5py.File(hdf5_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
if unitary_matrix is not None:
w.create_dataset('unitary_matrix', data=unitary_matrix)
if solver is not None:
w.create_dataset('solver', data=solver)
if verbose:
if len(temperature) > 1:
text = "Unitary matrices "
else:
text = "Unitary matrix "
if sigma is not None:
text += "at sigma %s " % _del_zeros(sigma)
if sigma_cutoff is not None:
text += "(%4.2f SD) " % sigma_cutoff
if len(temperature) > 1:
text += "were written into "
else:
text += "was written into "
if sigma is not None:
text += "\n"
text += "\"%s\"." % hdf5_filename
print(text) | def function[write_unitary_matrix_to_hdf5, parameter[temperature, mesh, unitary_matrix, sigma, sigma_cutoff, solver, filename, verbose]]:
constant[Write eigenvectors of collision matrices at temperatures.
Depending on the choice of the solver, eigenvectors are sotred in
either column-wise or row-wise.
]
variable[suffix] assign[=] call[name[_get_filename_suffix], parameter[name[mesh]]]
variable[hdf5_filename] assign[=] binary_operation[binary_operation[constant[unitary] + name[suffix]] + constant[.hdf5]]
with call[name[h5py].File, parameter[name[hdf5_filename], constant[w]]] begin[:]
call[name[w].create_dataset, parameter[constant[temperature]]]
if compare[name[unitary_matrix] is_not constant[None]] begin[:]
call[name[w].create_dataset, parameter[constant[unitary_matrix]]]
if compare[name[solver] is_not constant[None]] begin[:]
call[name[w].create_dataset, parameter[constant[solver]]]
if name[verbose] begin[:]
if compare[call[name[len], parameter[name[temperature]]] greater[>] constant[1]] begin[:]
variable[text] assign[=] constant[Unitary matrices ]
if compare[name[sigma] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da18f09c3d0>
if compare[name[sigma_cutoff] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da18f09f970>
if compare[call[name[len], parameter[name[temperature]]] greater[>] constant[1]] begin[:]
<ast.AugAssign object at 0x7da18f09ea40>
if compare[name[sigma] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da18bcc98d0>
<ast.AugAssign object at 0x7da18bcc9150>
call[name[print], parameter[name[text]]] | keyword[def] identifier[write_unitary_matrix_to_hdf5] ( identifier[temperature] ,
identifier[mesh] ,
identifier[unitary_matrix] = keyword[None] ,
identifier[sigma] = keyword[None] ,
identifier[sigma_cutoff] = keyword[None] ,
identifier[solver] = keyword[None] ,
identifier[filename] = keyword[None] ,
identifier[verbose] = keyword[False] ):
literal[string]
identifier[suffix] = identifier[_get_filename_suffix] ( identifier[mesh] ,
identifier[sigma] = identifier[sigma] ,
identifier[sigma_cutoff] = identifier[sigma_cutoff] ,
identifier[filename] = identifier[filename] )
identifier[hdf5_filename] = literal[string] + identifier[suffix] + literal[string]
keyword[with] identifier[h5py] . identifier[File] ( identifier[hdf5_filename] , literal[string] ) keyword[as] identifier[w] :
identifier[w] . identifier[create_dataset] ( literal[string] , identifier[data] = identifier[temperature] )
keyword[if] identifier[unitary_matrix] keyword[is] keyword[not] keyword[None] :
identifier[w] . identifier[create_dataset] ( literal[string] , identifier[data] = identifier[unitary_matrix] )
keyword[if] identifier[solver] keyword[is] keyword[not] keyword[None] :
identifier[w] . identifier[create_dataset] ( literal[string] , identifier[data] = identifier[solver] )
keyword[if] identifier[verbose] :
keyword[if] identifier[len] ( identifier[temperature] )> literal[int] :
identifier[text] = literal[string]
keyword[else] :
identifier[text] = literal[string]
keyword[if] identifier[sigma] keyword[is] keyword[not] keyword[None] :
identifier[text] += literal[string] % identifier[_del_zeros] ( identifier[sigma] )
keyword[if] identifier[sigma_cutoff] keyword[is] keyword[not] keyword[None] :
identifier[text] += literal[string] % identifier[sigma_cutoff]
keyword[if] identifier[len] ( identifier[temperature] )> literal[int] :
identifier[text] += literal[string]
keyword[else] :
identifier[text] += literal[string]
keyword[if] identifier[sigma] keyword[is] keyword[not] keyword[None] :
identifier[text] += literal[string]
identifier[text] += literal[string] % identifier[hdf5_filename]
identifier[print] ( identifier[text] ) | def write_unitary_matrix_to_hdf5(temperature, mesh, unitary_matrix=None, sigma=None, sigma_cutoff=None, solver=None, filename=None, verbose=False):
"""Write eigenvectors of collision matrices at temperatures.
Depending on the choice of the solver, eigenvectors are sotred in
either column-wise or row-wise.
"""
suffix = _get_filename_suffix(mesh, sigma=sigma, sigma_cutoff=sigma_cutoff, filename=filename)
hdf5_filename = 'unitary' + suffix + '.hdf5'
with h5py.File(hdf5_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
if unitary_matrix is not None:
w.create_dataset('unitary_matrix', data=unitary_matrix) # depends on [control=['if'], data=['unitary_matrix']]
if solver is not None:
w.create_dataset('solver', data=solver) # depends on [control=['if'], data=['solver']]
if verbose:
if len(temperature) > 1:
text = 'Unitary matrices ' # depends on [control=['if'], data=[]]
else:
text = 'Unitary matrix '
if sigma is not None:
text += 'at sigma %s ' % _del_zeros(sigma)
if sigma_cutoff is not None:
text += '(%4.2f SD) ' % sigma_cutoff # depends on [control=['if'], data=['sigma_cutoff']] # depends on [control=['if'], data=['sigma']]
if len(temperature) > 1:
text += 'were written into ' # depends on [control=['if'], data=[]]
else:
text += 'was written into '
if sigma is not None:
text += '\n' # depends on [control=['if'], data=[]]
text += '"%s".' % hdf5_filename
print(text) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['w']] |
def sample_ising(self, h, J, offset=0, scalar=None,
bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
""" Scale and sample from the problem provided by h, J, offset
if scalar is not given, problem is scaled based on bias and quadratic
ranges.
Args:
h (dict): linear biases
J (dict): quadratic or higher order biases
offset (float, optional): constant energy offset
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
if any(len(inter) > 2 for inter in J):
# handle HUBO
import warnings
msg = ("Support for higher order Ising models in ScaleComposite is "
"deprecated and will be removed in dimod 0.9.0. Please use "
"PolyScaleComposite.sample_hising instead.")
warnings.warn(msg, DeprecationWarning)
from dimod.reference.composites.higherordercomposites import PolyScaleComposite
from dimod.higherorder.polynomial import BinaryPolynomial
poly = BinaryPolynomial.from_hising(h, J, offset=offset)
ignored_terms = set()
if ignored_variables is not None:
ignored_terms.update(frozenset(v) for v in ignored_variables)
if ignored_interactions is not None:
ignored_terms.update(frozenset(inter) for inter in ignored_interactions)
if ignore_offset:
ignored_terms.add(frozenset())
return PolyScaleComposite(self.child).sample_poly(poly, scalar=scalar,
bias_range=bias_range,
poly_range=quadratic_range,
ignored_terms=ignored_terms,
**parameters)
bqm = BinaryQuadraticModel.from_ising(h, J, offset=offset)
return self.sample(bqm, scalar=scalar,
bias_range=bias_range,
quadratic_range=quadratic_range,
ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset, **parameters) | def function[sample_ising, parameter[self, h, J, offset, scalar, bias_range, quadratic_range, ignored_variables, ignored_interactions, ignore_offset]]:
constant[ Scale and sample from the problem provided by h, J, offset
if scalar is not given, problem is scaled based on bias and quadratic
ranges.
Args:
h (dict): linear biases
J (dict): quadratic or higher order biases
offset (float, optional): constant energy offset
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b07f4b50>]] begin[:]
import module[warnings]
variable[msg] assign[=] constant[Support for higher order Ising models in ScaleComposite is deprecated and will be removed in dimod 0.9.0. Please use PolyScaleComposite.sample_hising instead.]
call[name[warnings].warn, parameter[name[msg], name[DeprecationWarning]]]
from relative_module[dimod.reference.composites.higherordercomposites] import module[PolyScaleComposite]
from relative_module[dimod.higherorder.polynomial] import module[BinaryPolynomial]
variable[poly] assign[=] call[name[BinaryPolynomial].from_hising, parameter[name[h], name[J]]]
variable[ignored_terms] assign[=] call[name[set], parameter[]]
if compare[name[ignored_variables] is_not constant[None]] begin[:]
call[name[ignored_terms].update, parameter[<ast.GeneratorExp object at 0x7da1b07f4b80>]]
if compare[name[ignored_interactions] is_not constant[None]] begin[:]
call[name[ignored_terms].update, parameter[<ast.GeneratorExp object at 0x7da1b07f6d70>]]
if name[ignore_offset] begin[:]
call[name[ignored_terms].add, parameter[call[name[frozenset], parameter[]]]]
return[call[call[name[PolyScaleComposite], parameter[name[self].child]].sample_poly, parameter[name[poly]]]]
variable[bqm] assign[=] call[name[BinaryQuadraticModel].from_ising, parameter[name[h], name[J]]]
return[call[name[self].sample, parameter[name[bqm]]]] | keyword[def] identifier[sample_ising] ( identifier[self] , identifier[h] , identifier[J] , identifier[offset] = literal[int] , identifier[scalar] = keyword[None] ,
identifier[bias_range] = literal[int] , identifier[quadratic_range] = keyword[None] ,
identifier[ignored_variables] = keyword[None] , identifier[ignored_interactions] = keyword[None] ,
identifier[ignore_offset] = keyword[False] ,** identifier[parameters] ):
literal[string]
keyword[if] identifier[any] ( identifier[len] ( identifier[inter] )> literal[int] keyword[for] identifier[inter] keyword[in] identifier[J] ):
keyword[import] identifier[warnings]
identifier[msg] =( literal[string]
literal[string]
literal[string] )
identifier[warnings] . identifier[warn] ( identifier[msg] , identifier[DeprecationWarning] )
keyword[from] identifier[dimod] . identifier[reference] . identifier[composites] . identifier[higherordercomposites] keyword[import] identifier[PolyScaleComposite]
keyword[from] identifier[dimod] . identifier[higherorder] . identifier[polynomial] keyword[import] identifier[BinaryPolynomial]
identifier[poly] = identifier[BinaryPolynomial] . identifier[from_hising] ( identifier[h] , identifier[J] , identifier[offset] = identifier[offset] )
identifier[ignored_terms] = identifier[set] ()
keyword[if] identifier[ignored_variables] keyword[is] keyword[not] keyword[None] :
identifier[ignored_terms] . identifier[update] ( identifier[frozenset] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[ignored_variables] )
keyword[if] identifier[ignored_interactions] keyword[is] keyword[not] keyword[None] :
identifier[ignored_terms] . identifier[update] ( identifier[frozenset] ( identifier[inter] ) keyword[for] identifier[inter] keyword[in] identifier[ignored_interactions] )
keyword[if] identifier[ignore_offset] :
identifier[ignored_terms] . identifier[add] ( identifier[frozenset] ())
keyword[return] identifier[PolyScaleComposite] ( identifier[self] . identifier[child] ). identifier[sample_poly] ( identifier[poly] , identifier[scalar] = identifier[scalar] ,
identifier[bias_range] = identifier[bias_range] ,
identifier[poly_range] = identifier[quadratic_range] ,
identifier[ignored_terms] = identifier[ignored_terms] ,
** identifier[parameters] )
identifier[bqm] = identifier[BinaryQuadraticModel] . identifier[from_ising] ( identifier[h] , identifier[J] , identifier[offset] = identifier[offset] )
keyword[return] identifier[self] . identifier[sample] ( identifier[bqm] , identifier[scalar] = identifier[scalar] ,
identifier[bias_range] = identifier[bias_range] ,
identifier[quadratic_range] = identifier[quadratic_range] ,
identifier[ignored_variables] = identifier[ignored_variables] ,
identifier[ignored_interactions] = identifier[ignored_interactions] ,
identifier[ignore_offset] = identifier[ignore_offset] ,** identifier[parameters] ) | def sample_ising(self, h, J, offset=0, scalar=None, bias_range=1, quadratic_range=None, ignored_variables=None, ignored_interactions=None, ignore_offset=False, **parameters):
""" Scale and sample from the problem provided by h, J, offset
if scalar is not given, problem is scaled based on bias and quadratic
ranges.
Args:
h (dict): linear biases
J (dict): quadratic or higher order biases
offset (float, optional): constant energy offset
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
if any((len(inter) > 2 for inter in J)):
# handle HUBO
import warnings
msg = 'Support for higher order Ising models in ScaleComposite is deprecated and will be removed in dimod 0.9.0. Please use PolyScaleComposite.sample_hising instead.'
warnings.warn(msg, DeprecationWarning)
from dimod.reference.composites.higherordercomposites import PolyScaleComposite
from dimod.higherorder.polynomial import BinaryPolynomial
poly = BinaryPolynomial.from_hising(h, J, offset=offset)
ignored_terms = set()
if ignored_variables is not None:
ignored_terms.update((frozenset(v) for v in ignored_variables)) # depends on [control=['if'], data=['ignored_variables']]
if ignored_interactions is not None:
ignored_terms.update((frozenset(inter) for inter in ignored_interactions)) # depends on [control=['if'], data=['ignored_interactions']]
if ignore_offset:
ignored_terms.add(frozenset()) # depends on [control=['if'], data=[]]
return PolyScaleComposite(self.child).sample_poly(poly, scalar=scalar, bias_range=bias_range, poly_range=quadratic_range, ignored_terms=ignored_terms, **parameters) # depends on [control=['if'], data=[]]
bqm = BinaryQuadraticModel.from_ising(h, J, offset=offset)
return self.sample(bqm, scalar=scalar, bias_range=bias_range, quadratic_range=quadratic_range, ignored_variables=ignored_variables, ignored_interactions=ignored_interactions, ignore_offset=ignore_offset, **parameters) |
def get_metadata(main_file):
"""Get metadata about the package/module.
Positional arguments:
main_file -- python file path within `HERE` which has __author__ and the others defined as global variables.
Returns:
Dictionary to be passed into setuptools.setup().
"""
with open(os.path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open(os.path.join(HERE, main_file), encoding='utf-8') as f:
lines = [l.strip() for l in f if l.startswith('__')]
metadata = ast.literal_eval("{'" + ", '".join([l.replace(' = ', "': ") for l in lines]) + '}')
__author__, __license__, __version__ = [metadata[k] for k in ('__author__', '__license__', '__version__')]
everything = dict(version=__version__, long_description=long_description, author=__author__, license=__license__)
if not all(everything.values()):
raise ValueError('Failed to obtain metadata from package/module.')
return everything | def function[get_metadata, parameter[main_file]]:
constant[Get metadata about the package/module.
Positional arguments:
main_file -- python file path within `HERE` which has __author__ and the others defined as global variables.
Returns:
Dictionary to be passed into setuptools.setup().
]
with call[name[open], parameter[call[name[os].path.join, parameter[name[HERE], constant[README.md]]]]] begin[:]
variable[long_description] assign[=] call[name[f].read, parameter[]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[HERE], name[main_file]]]]] begin[:]
variable[lines] assign[=] <ast.ListComp object at 0x7da1b222f790>
variable[metadata] assign[=] call[name[ast].literal_eval, parameter[binary_operation[binary_operation[constant[{'] + call[constant[, '].join, parameter[<ast.ListComp object at 0x7da1b242b400>]]] + constant[}]]]]
<ast.Tuple object at 0x7da1b2429c30> assign[=] <ast.ListComp object at 0x7da1b242b2b0>
variable[everything] assign[=] call[name[dict], parameter[]]
if <ast.UnaryOp object at 0x7da1b2429b70> begin[:]
<ast.Raise object at 0x7da1b242b6d0>
return[name[everything]] | keyword[def] identifier[get_metadata] ( identifier[main_file] ):
literal[string]
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[HERE] , literal[string] ), identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[long_description] = identifier[f] . identifier[read] ()
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[HERE] , identifier[main_file] ), identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[lines] =[ identifier[l] . identifier[strip] () keyword[for] identifier[l] keyword[in] identifier[f] keyword[if] identifier[l] . identifier[startswith] ( literal[string] )]
identifier[metadata] = identifier[ast] . identifier[literal_eval] ( literal[string] + literal[string] . identifier[join] ([ identifier[l] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[l] keyword[in] identifier[lines] ])+ literal[string] )
identifier[__author__] , identifier[__license__] , identifier[__version__] =[ identifier[metadata] [ identifier[k] ] keyword[for] identifier[k] keyword[in] ( literal[string] , literal[string] , literal[string] )]
identifier[everything] = identifier[dict] ( identifier[version] = identifier[__version__] , identifier[long_description] = identifier[long_description] , identifier[author] = identifier[__author__] , identifier[license] = identifier[__license__] )
keyword[if] keyword[not] identifier[all] ( identifier[everything] . identifier[values] ()):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[everything] | def get_metadata(main_file):
"""Get metadata about the package/module.
Positional arguments:
main_file -- python file path within `HERE` which has __author__ and the others defined as global variables.
Returns:
Dictionary to be passed into setuptools.setup().
"""
with open(os.path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read() # depends on [control=['with'], data=['f']]
with open(os.path.join(HERE, main_file), encoding='utf-8') as f:
lines = [l.strip() for l in f if l.startswith('__')] # depends on [control=['with'], data=['f']]
metadata = ast.literal_eval("{'" + ", '".join([l.replace(' = ', "': ") for l in lines]) + '}')
(__author__, __license__, __version__) = [metadata[k] for k in ('__author__', '__license__', '__version__')]
everything = dict(version=__version__, long_description=long_description, author=__author__, license=__license__)
if not all(everything.values()):
raise ValueError('Failed to obtain metadata from package/module.') # depends on [control=['if'], data=[]]
return everything |
def first_order_score(y, mean, scale, shape, skewness):
""" GAS Laplace Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Laplace distribution
scale : float
scale parameter for the Laplace distribution
shape : float
tail thickness parameter for the Laplace distribution
skewness : float
skewness parameter for the Laplace distribution
Returns
----------
- Score of the Laplace family
"""
return (y-mean)/float(scale*np.abs(y-mean)) | def function[first_order_score, parameter[y, mean, scale, shape, skewness]]:
constant[ GAS Laplace Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Laplace distribution
scale : float
scale parameter for the Laplace distribution
shape : float
tail thickness parameter for the Laplace distribution
skewness : float
skewness parameter for the Laplace distribution
Returns
----------
- Score of the Laplace family
]
return[binary_operation[binary_operation[name[y] - name[mean]] / call[name[float], parameter[binary_operation[name[scale] * call[name[np].abs, parameter[binary_operation[name[y] - name[mean]]]]]]]]] | keyword[def] identifier[first_order_score] ( identifier[y] , identifier[mean] , identifier[scale] , identifier[shape] , identifier[skewness] ):
literal[string]
keyword[return] ( identifier[y] - identifier[mean] )/ identifier[float] ( identifier[scale] * identifier[np] . identifier[abs] ( identifier[y] - identifier[mean] )) | def first_order_score(y, mean, scale, shape, skewness):
""" GAS Laplace Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Laplace distribution
scale : float
scale parameter for the Laplace distribution
shape : float
tail thickness parameter for the Laplace distribution
skewness : float
skewness parameter for the Laplace distribution
Returns
----------
- Score of the Laplace family
"""
return (y - mean) / float(scale * np.abs(y - mean)) |
def _shl32(ins):
""" Logical Left shift 32bit unsigned integers.
The result is pushed onto the stack.
Optimizations:
* If 2nd operand is 0, do nothing
"""
op1, op2 = tuple(ins.quad[2:])
if is_int(op2):
output = _32bit_oper(op1)
if int(op2) == 0:
output.append('push de')
output.append('push hl')
return output
if int(op2) > 1:
label = tmp_label()
output.append('ld b, %s' % op2)
output.append('%s:' % label)
output.append('call __SHL32')
output.append('djnz %s' % label)
else:
output.append('call __SHL32')
output.append('push de')
output.append('push hl')
REQUIRES.add('shl32.asm')
return output
output = _8bit_oper(op2)
output.append('ld b, a')
output.extend(_32bit_oper(op1))
label = tmp_label()
output.append('%s:' % label)
output.append('call __SHL32')
output.append('djnz %s' % label)
output.append('push de')
output.append('push hl')
REQUIRES.add('shl32.asm')
return output | def function[_shl32, parameter[ins]]:
constant[ Logical Left shift 32bit unsigned integers.
The result is pushed onto the stack.
Optimizations:
* If 2nd operand is 0, do nothing
]
<ast.Tuple object at 0x7da204566d10> assign[=] call[name[tuple], parameter[call[name[ins].quad][<ast.Slice object at 0x7da2045658d0>]]]
if call[name[is_int], parameter[name[op2]]] begin[:]
variable[output] assign[=] call[name[_32bit_oper], parameter[name[op1]]]
if compare[call[name[int], parameter[name[op2]]] equal[==] constant[0]] begin[:]
call[name[output].append, parameter[constant[push de]]]
call[name[output].append, parameter[constant[push hl]]]
return[name[output]]
if compare[call[name[int], parameter[name[op2]]] greater[>] constant[1]] begin[:]
variable[label] assign[=] call[name[tmp_label], parameter[]]
call[name[output].append, parameter[binary_operation[constant[ld b, %s] <ast.Mod object at 0x7da2590d6920> name[op2]]]]
call[name[output].append, parameter[binary_operation[constant[%s:] <ast.Mod object at 0x7da2590d6920> name[label]]]]
call[name[output].append, parameter[constant[call __SHL32]]]
call[name[output].append, parameter[binary_operation[constant[djnz %s] <ast.Mod object at 0x7da2590d6920> name[label]]]]
call[name[output].append, parameter[constant[push de]]]
call[name[output].append, parameter[constant[push hl]]]
call[name[REQUIRES].add, parameter[constant[shl32.asm]]]
return[name[output]]
variable[output] assign[=] call[name[_8bit_oper], parameter[name[op2]]]
call[name[output].append, parameter[constant[ld b, a]]]
call[name[output].extend, parameter[call[name[_32bit_oper], parameter[name[op1]]]]]
variable[label] assign[=] call[name[tmp_label], parameter[]]
call[name[output].append, parameter[binary_operation[constant[%s:] <ast.Mod object at 0x7da2590d6920> name[label]]]]
call[name[output].append, parameter[constant[call __SHL32]]]
call[name[output].append, parameter[binary_operation[constant[djnz %s] <ast.Mod object at 0x7da2590d6920> name[label]]]]
call[name[output].append, parameter[constant[push de]]]
call[name[output].append, parameter[constant[push hl]]]
call[name[REQUIRES].add, parameter[constant[shl32.asm]]]
return[name[output]] | keyword[def] identifier[_shl32] ( identifier[ins] ):
literal[string]
identifier[op1] , identifier[op2] = identifier[tuple] ( identifier[ins] . identifier[quad] [ literal[int] :])
keyword[if] identifier[is_int] ( identifier[op2] ):
identifier[output] = identifier[_32bit_oper] ( identifier[op1] )
keyword[if] identifier[int] ( identifier[op2] )== literal[int] :
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
keyword[return] identifier[output]
keyword[if] identifier[int] ( identifier[op2] )> literal[int] :
identifier[label] = identifier[tmp_label] ()
identifier[output] . identifier[append] ( literal[string] % identifier[op2] )
identifier[output] . identifier[append] ( literal[string] % identifier[label] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] % identifier[label] )
keyword[else] :
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[REQUIRES] . identifier[add] ( literal[string] )
keyword[return] identifier[output]
identifier[output] = identifier[_8bit_oper] ( identifier[op2] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[extend] ( identifier[_32bit_oper] ( identifier[op1] ))
identifier[label] = identifier[tmp_label] ()
identifier[output] . identifier[append] ( literal[string] % identifier[label] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] % identifier[label] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[REQUIRES] . identifier[add] ( literal[string] )
keyword[return] identifier[output] | def _shl32(ins):
""" Logical Left shift 32bit unsigned integers.
The result is pushed onto the stack.
Optimizations:
* If 2nd operand is 0, do nothing
"""
(op1, op2) = tuple(ins.quad[2:])
if is_int(op2):
output = _32bit_oper(op1)
if int(op2) == 0:
output.append('push de')
output.append('push hl')
return output # depends on [control=['if'], data=[]]
if int(op2) > 1:
label = tmp_label()
output.append('ld b, %s' % op2)
output.append('%s:' % label)
output.append('call __SHL32')
output.append('djnz %s' % label) # depends on [control=['if'], data=[]]
else:
output.append('call __SHL32')
output.append('push de')
output.append('push hl')
REQUIRES.add('shl32.asm')
return output # depends on [control=['if'], data=[]]
output = _8bit_oper(op2)
output.append('ld b, a')
output.extend(_32bit_oper(op1))
label = tmp_label()
output.append('%s:' % label)
output.append('call __SHL32')
output.append('djnz %s' % label)
output.append('push de')
output.append('push hl')
REQUIRES.add('shl32.asm')
return output |
def copy(self, source, destination, recursive=False, use_sudo=False):
"""
Copy a file or directory
"""
func = use_sudo and run_as_root or self.run
options = '-r ' if recursive else ''
func('/bin/cp {0}{1} {2}'.format(options, quote(source), quote(destination))) | def function[copy, parameter[self, source, destination, recursive, use_sudo]]:
constant[
Copy a file or directory
]
variable[func] assign[=] <ast.BoolOp object at 0x7da1b00deb30>
variable[options] assign[=] <ast.IfExp object at 0x7da1b00dd510>
call[name[func], parameter[call[constant[/bin/cp {0}{1} {2}].format, parameter[name[options], call[name[quote], parameter[name[source]]], call[name[quote], parameter[name[destination]]]]]]] | keyword[def] identifier[copy] ( identifier[self] , identifier[source] , identifier[destination] , identifier[recursive] = keyword[False] , identifier[use_sudo] = keyword[False] ):
literal[string]
identifier[func] = identifier[use_sudo] keyword[and] identifier[run_as_root] keyword[or] identifier[self] . identifier[run]
identifier[options] = literal[string] keyword[if] identifier[recursive] keyword[else] literal[string]
identifier[func] ( literal[string] . identifier[format] ( identifier[options] , identifier[quote] ( identifier[source] ), identifier[quote] ( identifier[destination] ))) | def copy(self, source, destination, recursive=False, use_sudo=False):
"""
Copy a file or directory
"""
func = use_sudo and run_as_root or self.run
options = '-r ' if recursive else ''
func('/bin/cp {0}{1} {2}'.format(options, quote(source), quote(destination))) |
def _calculate_aes_cipher(key):
"""
Determines if the key is a valid AES 128, 192 or 256 key
:param key:
A byte string of the key to use
:raises:
ValueError - when an invalid key is provided
:return:
A unicode string of the AES variation - "aes128", "aes192" or "aes256"
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if len(key) == 16:
cipher = 'aes128'
elif len(key) == 24:
cipher = 'aes192'
elif len(key) == 32:
cipher = 'aes256'
return cipher | def function[_calculate_aes_cipher, parameter[key]]:
constant[
Determines if the key is a valid AES 128, 192 or 256 key
:param key:
A byte string of the key to use
:raises:
ValueError - when an invalid key is provided
:return:
A unicode string of the AES variation - "aes128", "aes192" or "aes256"
]
if compare[call[name[len], parameter[name[key]]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b0030100>, <ast.Constant object at 0x7da1b00331f0>, <ast.Constant object at 0x7da1b00322f0>]]] begin[:]
<ast.Raise object at 0x7da1b00316c0>
if compare[call[name[len], parameter[name[key]]] equal[==] constant[16]] begin[:]
variable[cipher] assign[=] constant[aes128]
return[name[cipher]] | keyword[def] identifier[_calculate_aes_cipher] ( identifier[key] ):
literal[string]
keyword[if] identifier[len] ( identifier[key] ) keyword[not] keyword[in] [ literal[int] , literal[int] , literal[int] ]:
keyword[raise] identifier[ValueError] ( identifier[pretty_message] (
literal[string] ,
identifier[len] ( identifier[key] )
))
keyword[if] identifier[len] ( identifier[key] )== literal[int] :
identifier[cipher] = literal[string]
keyword[elif] identifier[len] ( identifier[key] )== literal[int] :
identifier[cipher] = literal[string]
keyword[elif] identifier[len] ( identifier[key] )== literal[int] :
identifier[cipher] = literal[string]
keyword[return] identifier[cipher] | def _calculate_aes_cipher(key):
"""
Determines if the key is a valid AES 128, 192 or 256 key
:param key:
A byte string of the key to use
:raises:
ValueError - when an invalid key is provided
:return:
A unicode string of the AES variation - "aes128", "aes192" or "aes256"
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message('\n key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)\n long - is %s\n ', len(key))) # depends on [control=['if'], data=[]]
if len(key) == 16:
cipher = 'aes128' # depends on [control=['if'], data=[]]
elif len(key) == 24:
cipher = 'aes192' # depends on [control=['if'], data=[]]
elif len(key) == 32:
cipher = 'aes256' # depends on [control=['if'], data=[]]
return cipher |
def update(name, maximum_version=None, required_version=None):
'''
Update a PowerShell module to a specific version, or the newest
:param name: Name of a Powershell module
:type name: ``str``
:param maximum_version: The maximum version to install, e.g. 1.23.2
:type maximum_version: ``str``
:param required_version: Install a specific version
:type required_version: ``str``
CLI Example:
.. code-block:: bash
salt 'win01' psget.update PowerPlan
'''
# Putting quotes around the parameter protects against command injection
flags = [('Name', name)]
if maximum_version is not None:
flags.append(('MaximumVersion', maximum_version))
if required_version is not None:
flags.append(('RequiredVersion', required_version))
params = ''
for flag, value in flags:
params += '-{0} {1} '.format(flag, value)
cmd = 'Update-Module {0} -Force'.format(params)
_pshell(cmd)
return name in list_modules() | def function[update, parameter[name, maximum_version, required_version]]:
constant[
Update a PowerShell module to a specific version, or the newest
:param name: Name of a Powershell module
:type name: ``str``
:param maximum_version: The maximum version to install, e.g. 1.23.2
:type maximum_version: ``str``
:param required_version: Install a specific version
:type required_version: ``str``
CLI Example:
.. code-block:: bash
salt 'win01' psget.update PowerPlan
]
variable[flags] assign[=] list[[<ast.Tuple object at 0x7da1b1c159f0>]]
if compare[name[maximum_version] is_not constant[None]] begin[:]
call[name[flags].append, parameter[tuple[[<ast.Constant object at 0x7da1b1c167a0>, <ast.Name object at 0x7da1b1c171c0>]]]]
if compare[name[required_version] is_not constant[None]] begin[:]
call[name[flags].append, parameter[tuple[[<ast.Constant object at 0x7da1b1c2b100>, <ast.Name object at 0x7da1b1c287f0>]]]]
variable[params] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da1b1c29f60>, <ast.Name object at 0x7da1b1c29420>]]] in starred[name[flags]] begin[:]
<ast.AugAssign object at 0x7da1b1c293f0>
variable[cmd] assign[=] call[constant[Update-Module {0} -Force].format, parameter[name[params]]]
call[name[_pshell], parameter[name[cmd]]]
return[compare[name[name] in call[name[list_modules], parameter[]]]] | keyword[def] identifier[update] ( identifier[name] , identifier[maximum_version] = keyword[None] , identifier[required_version] = keyword[None] ):
literal[string]
identifier[flags] =[( literal[string] , identifier[name] )]
keyword[if] identifier[maximum_version] keyword[is] keyword[not] keyword[None] :
identifier[flags] . identifier[append] (( literal[string] , identifier[maximum_version] ))
keyword[if] identifier[required_version] keyword[is] keyword[not] keyword[None] :
identifier[flags] . identifier[append] (( literal[string] , identifier[required_version] ))
identifier[params] = literal[string]
keyword[for] identifier[flag] , identifier[value] keyword[in] identifier[flags] :
identifier[params] += literal[string] . identifier[format] ( identifier[flag] , identifier[value] )
identifier[cmd] = literal[string] . identifier[format] ( identifier[params] )
identifier[_pshell] ( identifier[cmd] )
keyword[return] identifier[name] keyword[in] identifier[list_modules] () | def update(name, maximum_version=None, required_version=None):
"""
Update a PowerShell module to a specific version, or the newest
:param name: Name of a Powershell module
:type name: ``str``
:param maximum_version: The maximum version to install, e.g. 1.23.2
:type maximum_version: ``str``
:param required_version: Install a specific version
:type required_version: ``str``
CLI Example:
.. code-block:: bash
salt 'win01' psget.update PowerPlan
"""
# Putting quotes around the parameter protects against command injection
flags = [('Name', name)]
if maximum_version is not None:
flags.append(('MaximumVersion', maximum_version)) # depends on [control=['if'], data=['maximum_version']]
if required_version is not None:
flags.append(('RequiredVersion', required_version)) # depends on [control=['if'], data=['required_version']]
params = ''
for (flag, value) in flags:
params += '-{0} {1} '.format(flag, value) # depends on [control=['for'], data=[]]
cmd = 'Update-Module {0} -Force'.format(params)
_pshell(cmd)
return name in list_modules() |
def _g_3(self):
"""omega3 < omega < omega4"""
# return 3 * (1.0 - self._n_3()) / (self._vertices_omegas[3] - self._omega)
return (3 * self._f(1, 3) * self._f(2, 3) /
(self._vertices_omegas[3] - self._vertices_omegas[0])) | def function[_g_3, parameter[self]]:
constant[omega3 < omega < omega4]
return[binary_operation[binary_operation[binary_operation[constant[3] * call[name[self]._f, parameter[constant[1], constant[3]]]] * call[name[self]._f, parameter[constant[2], constant[3]]]] / binary_operation[call[name[self]._vertices_omegas][constant[3]] - call[name[self]._vertices_omegas][constant[0]]]]] | keyword[def] identifier[_g_3] ( identifier[self] ):
literal[string]
keyword[return] ( literal[int] * identifier[self] . identifier[_f] ( literal[int] , literal[int] )* identifier[self] . identifier[_f] ( literal[int] , literal[int] )/
( identifier[self] . identifier[_vertices_omegas] [ literal[int] ]- identifier[self] . identifier[_vertices_omegas] [ literal[int] ])) | def _g_3(self):
"""omega3 < omega < omega4"""
# return 3 * (1.0 - self._n_3()) / (self._vertices_omegas[3] - self._omega)
return 3 * self._f(1, 3) * self._f(2, 3) / (self._vertices_omegas[3] - self._vertices_omegas[0]) |
def _run_player(self, extra_cmd):
"""
运行播放器(若当前已有正在运行的,强制推出)
extra_cmd: 额外的参数 (list)
"""
# Force quit old process
if self.is_alive:
self.quit()
args = self._args + extra_cmd
logger.debug("Exec: " + ' '.join(args))
self.sub_proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=self._null_file,
preexec_fn=os.setsid
)
# Set up NONBLOCKING flag for the pipe
flags = fcntl.fcntl(self.sub_proc.stdout, fcntl.F_GETFL)
flags |= os.O_NONBLOCK
fcntl.fcntl(self.sub_proc.stdout, fcntl.F_SETFL, flags)
# Start watchdog
Thread(target=self._watchdog).start() | def function[_run_player, parameter[self, extra_cmd]]:
constant[
运行播放器(若当前已有正在运行的,强制推出)
extra_cmd: 额外的参数 (list)
]
if name[self].is_alive begin[:]
call[name[self].quit, parameter[]]
variable[args] assign[=] binary_operation[name[self]._args + name[extra_cmd]]
call[name[logger].debug, parameter[binary_operation[constant[Exec: ] + call[constant[ ].join, parameter[name[args]]]]]]
name[self].sub_proc assign[=] call[name[subprocess].Popen, parameter[name[args]]]
variable[flags] assign[=] call[name[fcntl].fcntl, parameter[name[self].sub_proc.stdout, name[fcntl].F_GETFL]]
<ast.AugAssign object at 0x7da18fe92950>
call[name[fcntl].fcntl, parameter[name[self].sub_proc.stdout, name[fcntl].F_SETFL, name[flags]]]
call[call[name[Thread], parameter[]].start, parameter[]] | keyword[def] identifier[_run_player] ( identifier[self] , identifier[extra_cmd] ):
literal[string]
keyword[if] identifier[self] . identifier[is_alive] :
identifier[self] . identifier[quit] ()
identifier[args] = identifier[self] . identifier[_args] + identifier[extra_cmd]
identifier[logger] . identifier[debug] ( literal[string] + literal[string] . identifier[join] ( identifier[args] ))
identifier[self] . identifier[sub_proc] = identifier[subprocess] . identifier[Popen] (
identifier[args] ,
identifier[stdin] = identifier[subprocess] . identifier[PIPE] ,
identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[self] . identifier[_null_file] ,
identifier[preexec_fn] = identifier[os] . identifier[setsid]
)
identifier[flags] = identifier[fcntl] . identifier[fcntl] ( identifier[self] . identifier[sub_proc] . identifier[stdout] , identifier[fcntl] . identifier[F_GETFL] )
identifier[flags] |= identifier[os] . identifier[O_NONBLOCK]
identifier[fcntl] . identifier[fcntl] ( identifier[self] . identifier[sub_proc] . identifier[stdout] , identifier[fcntl] . identifier[F_SETFL] , identifier[flags] )
identifier[Thread] ( identifier[target] = identifier[self] . identifier[_watchdog] ). identifier[start] () | def _run_player(self, extra_cmd):
"""
运行播放器(若当前已有正在运行的,强制推出)
extra_cmd: 额外的参数 (list)
"""
# Force quit old process
if self.is_alive:
self.quit() # depends on [control=['if'], data=[]]
args = self._args + extra_cmd
logger.debug('Exec: ' + ' '.join(args))
self.sub_proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=self._null_file, preexec_fn=os.setsid)
# Set up NONBLOCKING flag for the pipe
flags = fcntl.fcntl(self.sub_proc.stdout, fcntl.F_GETFL)
flags |= os.O_NONBLOCK
fcntl.fcntl(self.sub_proc.stdout, fcntl.F_SETFL, flags)
# Start watchdog
Thread(target=self._watchdog).start() |
def minimal_medium(model, min_objective_value=0.1, exports=False,
minimize_components=False, open_exchanges=False):
"""
Find the minimal growth medium for the model.
Finds the minimal growth medium for the model which allows for
model as well as individual growth. Here, a minimal medium can either
be the medium requiring the smallest total import flux or the medium
requiring the least components (ergo ingredients), which will be much
slower due to being a mixed integer problem (MIP).
Arguments
---------
model : cobra.model
The model to modify.
min_objective_value : positive float or array-like object
The minimum growth rate (objective) that has to be achieved.
exports : boolean
Whether to include export fluxes in the returned medium. Defaults to
False which will only return import fluxes.
minimize_components : boolean or positive int
Whether to minimize the number of components instead of the total
import flux. Might be more intuitive if set to True but may also be
slow to calculate for large communities. If set to a number `n` will
return up to `n` alternative solutions all with the same number of
components.
open_exchanges : boolean or number
Whether to ignore currently set bounds and make all exchange reactions
in the model possible. If set to a number all exchange reactions will
be opened with (-number, number) as bounds.
Returns
-------
pandas.Series, pandas.DataFrame or None
A series giving the import flux for each required import
reaction and (optionally) the associated export fluxes. All exchange
fluxes are oriented into the import reaction e.g. positive fluxes
denote imports and negative fluxes exports. If `minimize_components`
is a number larger 1 may return a DataFrame where each column is a
minimal medium. Returns None if the minimization is infeasible
(for instance if min_growth > maximum growth rate).
Notes
-----
Due to numerical issues the `minimize_components` option will usually only
minimize the number of "large" import fluxes. Specifically, the detection
limit is given by ``integrality_tolerance * max_bound`` where ``max_bound``
is the largest bound on an import reaction. Thus, if you are interested
in small import fluxes as well you may have to adjust the integrality
tolerance at first with
`model.solver.configuration.tolerances.integrality = 1e-7` for instance.
However, this will be *very* slow for large models especially with GLPK.
"""
exchange_rxns = find_boundary_types(model, "exchange")
if isinstance(open_exchanges, bool):
open_bound = 1000
else:
open_bound = open_exchanges
with model as mod:
if open_exchanges:
LOGGER.debug("Opening exchanges for %d imports.",
len(exchange_rxns))
for rxn in exchange_rxns:
rxn.bounds = (-open_bound, open_bound)
LOGGER.debug("Applying objective value constraints.")
obj_const = mod.problem.Constraint(
mod.objective.expression, lb=min_objective_value,
name="medium_obj_constraint")
mod.add_cons_vars([obj_const])
mod.solver.update()
mod.objective = Zero
LOGGER.debug("Adding new media objective.")
tol = mod.solver.configuration.tolerances.feasibility
if minimize_components:
add_mip_obj(mod)
if isinstance(minimize_components, bool):
minimize_components = 1
seen = set()
best = num_components = mod.slim_optimize()
if mod.solver.status != OPTIMAL:
LOGGER.warning("Minimization of medium was infeasible.")
return None
exclusion = mod.problem.Constraint(Zero, ub=0)
mod.add_cons_vars([exclusion])
mod.solver.update()
media = []
for i in range(minimize_components):
LOGGER.info("Finding alternative medium #%d.", (i + 1))
vars = [mod.variables["ind_" + s] for s in seen]
if len(seen) > 0:
exclusion.set_linear_coefficients(
dict.fromkeys(vars, 1))
exclusion.ub = best - 1
num_components = mod.slim_optimize()
if mod.solver.status != OPTIMAL or num_components > best:
break
medium = _as_medium(exchange_rxns, tol, exports=exports)
media.append(medium)
seen.update(medium[medium > 0].index)
if len(media) > 1:
medium = pd.concat(media, axis=1, sort=True).fillna(0.0)
medium.sort_index(axis=1, inplace=True)
else:
medium = media[0]
else:
add_linear_obj(mod)
mod.slim_optimize()
if mod.solver.status != OPTIMAL:
LOGGER.warning("Minimization of medium was infeasible.")
return None
medium = _as_medium(exchange_rxns, tol, exports=exports)
return medium | def function[minimal_medium, parameter[model, min_objective_value, exports, minimize_components, open_exchanges]]:
constant[
Find the minimal growth medium for the model.
Finds the minimal growth medium for the model which allows for
model as well as individual growth. Here, a minimal medium can either
be the medium requiring the smallest total import flux or the medium
requiring the least components (ergo ingredients), which will be much
slower due to being a mixed integer problem (MIP).
Arguments
---------
model : cobra.model
The model to modify.
min_objective_value : positive float or array-like object
The minimum growth rate (objective) that has to be achieved.
exports : boolean
Whether to include export fluxes in the returned medium. Defaults to
False which will only return import fluxes.
minimize_components : boolean or positive int
Whether to minimize the number of components instead of the total
import flux. Might be more intuitive if set to True but may also be
slow to calculate for large communities. If set to a number `n` will
return up to `n` alternative solutions all with the same number of
components.
open_exchanges : boolean or number
Whether to ignore currently set bounds and make all exchange reactions
in the model possible. If set to a number all exchange reactions will
be opened with (-number, number) as bounds.
Returns
-------
pandas.Series, pandas.DataFrame or None
A series giving the import flux for each required import
reaction and (optionally) the associated export fluxes. All exchange
fluxes are oriented into the import reaction e.g. positive fluxes
denote imports and negative fluxes exports. If `minimize_components`
is a number larger 1 may return a DataFrame where each column is a
minimal medium. Returns None if the minimization is infeasible
(for instance if min_growth > maximum growth rate).
Notes
-----
Due to numerical issues the `minimize_components` option will usually only
minimize the number of "large" import fluxes. Specifically, the detection
limit is given by ``integrality_tolerance * max_bound`` where ``max_bound``
is the largest bound on an import reaction. Thus, if you are interested
in small import fluxes as well you may have to adjust the integrality
tolerance at first with
`model.solver.configuration.tolerances.integrality = 1e-7` for instance.
However, this will be *very* slow for large models especially with GLPK.
]
variable[exchange_rxns] assign[=] call[name[find_boundary_types], parameter[name[model], constant[exchange]]]
if call[name[isinstance], parameter[name[open_exchanges], name[bool]]] begin[:]
variable[open_bound] assign[=] constant[1000]
with name[model] begin[:]
if name[open_exchanges] begin[:]
call[name[LOGGER].debug, parameter[constant[Opening exchanges for %d imports.], call[name[len], parameter[name[exchange_rxns]]]]]
for taget[name[rxn]] in starred[name[exchange_rxns]] begin[:]
name[rxn].bounds assign[=] tuple[[<ast.UnaryOp object at 0x7da1b007cc70>, <ast.Name object at 0x7da1b007cd00>]]
call[name[LOGGER].debug, parameter[constant[Applying objective value constraints.]]]
variable[obj_const] assign[=] call[name[mod].problem.Constraint, parameter[name[mod].objective.expression]]
call[name[mod].add_cons_vars, parameter[list[[<ast.Name object at 0x7da1b007d210>]]]]
call[name[mod].solver.update, parameter[]]
name[mod].objective assign[=] name[Zero]
call[name[LOGGER].debug, parameter[constant[Adding new media objective.]]]
variable[tol] assign[=] name[mod].solver.configuration.tolerances.feasibility
if name[minimize_components] begin[:]
call[name[add_mip_obj], parameter[name[mod]]]
if call[name[isinstance], parameter[name[minimize_components], name[bool]]] begin[:]
variable[minimize_components] assign[=] constant[1]
variable[seen] assign[=] call[name[set], parameter[]]
variable[best] assign[=] call[name[mod].slim_optimize, parameter[]]
if compare[name[mod].solver.status not_equal[!=] name[OPTIMAL]] begin[:]
call[name[LOGGER].warning, parameter[constant[Minimization of medium was infeasible.]]]
return[constant[None]]
variable[exclusion] assign[=] call[name[mod].problem.Constraint, parameter[name[Zero]]]
call[name[mod].add_cons_vars, parameter[list[[<ast.Name object at 0x7da1b007eb30>]]]]
call[name[mod].solver.update, parameter[]]
variable[media] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[name[minimize_components]]]] begin[:]
call[name[LOGGER].info, parameter[constant[Finding alternative medium #%d.], binary_operation[name[i] + constant[1]]]]
variable[vars] assign[=] <ast.ListComp object at 0x7da1b007ef20>
if compare[call[name[len], parameter[name[seen]]] greater[>] constant[0]] begin[:]
call[name[exclusion].set_linear_coefficients, parameter[call[name[dict].fromkeys, parameter[name[vars], constant[1]]]]]
name[exclusion].ub assign[=] binary_operation[name[best] - constant[1]]
variable[num_components] assign[=] call[name[mod].slim_optimize, parameter[]]
if <ast.BoolOp object at 0x7da1b007f7f0> begin[:]
break
variable[medium] assign[=] call[name[_as_medium], parameter[name[exchange_rxns], name[tol]]]
call[name[media].append, parameter[name[medium]]]
call[name[seen].update, parameter[call[name[medium]][compare[name[medium] greater[>] constant[0]]].index]]
if compare[call[name[len], parameter[name[media]]] greater[>] constant[1]] begin[:]
variable[medium] assign[=] call[call[name[pd].concat, parameter[name[media]]].fillna, parameter[constant[0.0]]]
call[name[medium].sort_index, parameter[]]
return[name[medium]] | keyword[def] identifier[minimal_medium] ( identifier[model] , identifier[min_objective_value] = literal[int] , identifier[exports] = keyword[False] ,
identifier[minimize_components] = keyword[False] , identifier[open_exchanges] = keyword[False] ):
literal[string]
identifier[exchange_rxns] = identifier[find_boundary_types] ( identifier[model] , literal[string] )
keyword[if] identifier[isinstance] ( identifier[open_exchanges] , identifier[bool] ):
identifier[open_bound] = literal[int]
keyword[else] :
identifier[open_bound] = identifier[open_exchanges]
keyword[with] identifier[model] keyword[as] identifier[mod] :
keyword[if] identifier[open_exchanges] :
identifier[LOGGER] . identifier[debug] ( literal[string] ,
identifier[len] ( identifier[exchange_rxns] ))
keyword[for] identifier[rxn] keyword[in] identifier[exchange_rxns] :
identifier[rxn] . identifier[bounds] =(- identifier[open_bound] , identifier[open_bound] )
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[obj_const] = identifier[mod] . identifier[problem] . identifier[Constraint] (
identifier[mod] . identifier[objective] . identifier[expression] , identifier[lb] = identifier[min_objective_value] ,
identifier[name] = literal[string] )
identifier[mod] . identifier[add_cons_vars] ([ identifier[obj_const] ])
identifier[mod] . identifier[solver] . identifier[update] ()
identifier[mod] . identifier[objective] = identifier[Zero]
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[tol] = identifier[mod] . identifier[solver] . identifier[configuration] . identifier[tolerances] . identifier[feasibility]
keyword[if] identifier[minimize_components] :
identifier[add_mip_obj] ( identifier[mod] )
keyword[if] identifier[isinstance] ( identifier[minimize_components] , identifier[bool] ):
identifier[minimize_components] = literal[int]
identifier[seen] = identifier[set] ()
identifier[best] = identifier[num_components] = identifier[mod] . identifier[slim_optimize] ()
keyword[if] identifier[mod] . identifier[solver] . identifier[status] != identifier[OPTIMAL] :
identifier[LOGGER] . identifier[warning] ( literal[string] )
keyword[return] keyword[None]
identifier[exclusion] = identifier[mod] . identifier[problem] . identifier[Constraint] ( identifier[Zero] , identifier[ub] = literal[int] )
identifier[mod] . identifier[add_cons_vars] ([ identifier[exclusion] ])
identifier[mod] . identifier[solver] . identifier[update] ()
identifier[media] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[minimize_components] ):
identifier[LOGGER] . identifier[info] ( literal[string] ,( identifier[i] + literal[int] ))
identifier[vars] =[ identifier[mod] . identifier[variables] [ literal[string] + identifier[s] ] keyword[for] identifier[s] keyword[in] identifier[seen] ]
keyword[if] identifier[len] ( identifier[seen] )> literal[int] :
identifier[exclusion] . identifier[set_linear_coefficients] (
identifier[dict] . identifier[fromkeys] ( identifier[vars] , literal[int] ))
identifier[exclusion] . identifier[ub] = identifier[best] - literal[int]
identifier[num_components] = identifier[mod] . identifier[slim_optimize] ()
keyword[if] identifier[mod] . identifier[solver] . identifier[status] != identifier[OPTIMAL] keyword[or] identifier[num_components] > identifier[best] :
keyword[break]
identifier[medium] = identifier[_as_medium] ( identifier[exchange_rxns] , identifier[tol] , identifier[exports] = identifier[exports] )
identifier[media] . identifier[append] ( identifier[medium] )
identifier[seen] . identifier[update] ( identifier[medium] [ identifier[medium] > literal[int] ]. identifier[index] )
keyword[if] identifier[len] ( identifier[media] )> literal[int] :
identifier[medium] = identifier[pd] . identifier[concat] ( identifier[media] , identifier[axis] = literal[int] , identifier[sort] = keyword[True] ). identifier[fillna] ( literal[int] )
identifier[medium] . identifier[sort_index] ( identifier[axis] = literal[int] , identifier[inplace] = keyword[True] )
keyword[else] :
identifier[medium] = identifier[media] [ literal[int] ]
keyword[else] :
identifier[add_linear_obj] ( identifier[mod] )
identifier[mod] . identifier[slim_optimize] ()
keyword[if] identifier[mod] . identifier[solver] . identifier[status] != identifier[OPTIMAL] :
identifier[LOGGER] . identifier[warning] ( literal[string] )
keyword[return] keyword[None]
identifier[medium] = identifier[_as_medium] ( identifier[exchange_rxns] , identifier[tol] , identifier[exports] = identifier[exports] )
keyword[return] identifier[medium] | def minimal_medium(model, min_objective_value=0.1, exports=False, minimize_components=False, open_exchanges=False):
"""
Find the minimal growth medium for the model.
Finds the minimal growth medium for the model which allows for
model as well as individual growth. Here, a minimal medium can either
be the medium requiring the smallest total import flux or the medium
requiring the least components (ergo ingredients), which will be much
slower due to being a mixed integer problem (MIP).
Arguments
---------
model : cobra.model
The model to modify.
min_objective_value : positive float or array-like object
The minimum growth rate (objective) that has to be achieved.
exports : boolean
Whether to include export fluxes in the returned medium. Defaults to
False which will only return import fluxes.
minimize_components : boolean or positive int
Whether to minimize the number of components instead of the total
import flux. Might be more intuitive if set to True but may also be
slow to calculate for large communities. If set to a number `n` will
return up to `n` alternative solutions all with the same number of
components.
open_exchanges : boolean or number
Whether to ignore currently set bounds and make all exchange reactions
in the model possible. If set to a number all exchange reactions will
be opened with (-number, number) as bounds.
Returns
-------
pandas.Series, pandas.DataFrame or None
A series giving the import flux for each required import
reaction and (optionally) the associated export fluxes. All exchange
fluxes are oriented into the import reaction e.g. positive fluxes
denote imports and negative fluxes exports. If `minimize_components`
is a number larger 1 may return a DataFrame where each column is a
minimal medium. Returns None if the minimization is infeasible
(for instance if min_growth > maximum growth rate).
Notes
-----
Due to numerical issues the `minimize_components` option will usually only
minimize the number of "large" import fluxes. Specifically, the detection
limit is given by ``integrality_tolerance * max_bound`` where ``max_bound``
is the largest bound on an import reaction. Thus, if you are interested
in small import fluxes as well you may have to adjust the integrality
tolerance at first with
`model.solver.configuration.tolerances.integrality = 1e-7` for instance.
However, this will be *very* slow for large models especially with GLPK.
"""
exchange_rxns = find_boundary_types(model, 'exchange')
if isinstance(open_exchanges, bool):
open_bound = 1000 # depends on [control=['if'], data=[]]
else:
open_bound = open_exchanges
with model as mod:
if open_exchanges:
LOGGER.debug('Opening exchanges for %d imports.', len(exchange_rxns))
for rxn in exchange_rxns:
rxn.bounds = (-open_bound, open_bound) # depends on [control=['for'], data=['rxn']] # depends on [control=['if'], data=[]]
LOGGER.debug('Applying objective value constraints.')
obj_const = mod.problem.Constraint(mod.objective.expression, lb=min_objective_value, name='medium_obj_constraint')
mod.add_cons_vars([obj_const])
mod.solver.update()
mod.objective = Zero
LOGGER.debug('Adding new media objective.')
tol = mod.solver.configuration.tolerances.feasibility
if minimize_components:
add_mip_obj(mod)
if isinstance(minimize_components, bool):
minimize_components = 1 # depends on [control=['if'], data=[]]
seen = set()
best = num_components = mod.slim_optimize()
if mod.solver.status != OPTIMAL:
LOGGER.warning('Minimization of medium was infeasible.')
return None # depends on [control=['if'], data=[]]
exclusion = mod.problem.Constraint(Zero, ub=0)
mod.add_cons_vars([exclusion])
mod.solver.update()
media = []
for i in range(minimize_components):
LOGGER.info('Finding alternative medium #%d.', i + 1)
vars = [mod.variables['ind_' + s] for s in seen]
if len(seen) > 0:
exclusion.set_linear_coefficients(dict.fromkeys(vars, 1))
exclusion.ub = best - 1 # depends on [control=['if'], data=[]]
num_components = mod.slim_optimize()
if mod.solver.status != OPTIMAL or num_components > best:
break # depends on [control=['if'], data=[]]
medium = _as_medium(exchange_rxns, tol, exports=exports)
media.append(medium)
seen.update(medium[medium > 0].index) # depends on [control=['for'], data=['i']]
if len(media) > 1:
medium = pd.concat(media, axis=1, sort=True).fillna(0.0)
medium.sort_index(axis=1, inplace=True) # depends on [control=['if'], data=[]]
else:
medium = media[0] # depends on [control=['if'], data=[]]
else:
add_linear_obj(mod)
mod.slim_optimize()
if mod.solver.status != OPTIMAL:
LOGGER.warning('Minimization of medium was infeasible.')
return None # depends on [control=['if'], data=[]]
medium = _as_medium(exchange_rxns, tol, exports=exports) # depends on [control=['with'], data=['mod']]
return medium |
def configure_pipeline(p, dataset, model_dir, output_csv, output_bq_table):
"""Configures a dataflow pipeline for batch prediction."""
data = _util.get_sources_from_dataset(p, dataset, 'predict')
if len(dataset.schema) == 2:
output_schema = [
{'name': 'image_url', 'type': 'STRING'},
{'name': 'target', 'type': 'STRING'},
{'name': 'predicted', 'type': 'STRING'},
{'name': 'target_prob', 'type': 'FLOAT'},
{'name': 'predicted_prob', 'type': 'FLOAT'},
]
else:
output_schema = [
{'name': 'image_url', 'type': 'STRING'},
{'name': 'predicted', 'type': 'STRING'},
{'name': 'predicted_prob', 'type': 'FLOAT'},
]
results = (data |
'Load Images' >> beam.ParDo(LoadImagesDoFn()) |
'Batch Inputs' >> beam.ParDo(EmitAsBatchDoFn(20)) |
'Batch Predict' >> beam.ParDo(PredictBatchDoFn(model_dir)) |
'Unbatch' >> beam.ParDo(UnbatchDoFn()) |
'Process Results' >> beam.ParDo(ProcessResultsDoFn()))
if output_csv is not None:
schema_file = output_csv + '.schema.json'
results_save = (results |
'Prepare For Output' >> beam.ParDo(MakeCsvLineDoFn()) |
'Write Csv Results' >> beam.io.textio.WriteToText(output_csv,
shard_name_template=''))
(results_save |
'Sample One' >> beam.transforms.combiners.Sample.FixedSizeGlobally(1) |
'Serialize Schema' >> beam.Map(lambda path: json.dumps(output_schema)) |
'Write Schema' >> beam.io.textio.WriteToText(schema_file, shard_name_template=''))
if output_bq_table is not None:
# BigQuery sink takes schema in the form of 'field1:type1,field2:type2...'
bq_schema_string = ','.join(x['name'] + ':' + x['type'] for x in output_schema)
sink = beam.io.BigQuerySink(output_bq_table, schema=bq_schema_string,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
results | 'Write BQ Results' >> beam.io.Write(sink) | def function[configure_pipeline, parameter[p, dataset, model_dir, output_csv, output_bq_table]]:
constant[Configures a dataflow pipeline for batch prediction.]
variable[data] assign[=] call[name[_util].get_sources_from_dataset, parameter[name[p], name[dataset], constant[predict]]]
if compare[call[name[len], parameter[name[dataset].schema]] equal[==] constant[2]] begin[:]
variable[output_schema] assign[=] list[[<ast.Dict object at 0x7da1b2346f50>, <ast.Dict object at 0x7da1b2345450>, <ast.Dict object at 0x7da1b2346560>, <ast.Dict object at 0x7da1b23445e0>, <ast.Dict object at 0x7da1b23461a0>]]
variable[results] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[data] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Load Images] <ast.RShift object at 0x7da2590d6a40> call[name[beam].ParDo, parameter[call[name[LoadImagesDoFn], parameter[]]]]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Batch Inputs] <ast.RShift object at 0x7da2590d6a40> call[name[beam].ParDo, parameter[call[name[EmitAsBatchDoFn], parameter[constant[20]]]]]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Batch Predict] <ast.RShift object at 0x7da2590d6a40> call[name[beam].ParDo, parameter[call[name[PredictBatchDoFn], parameter[name[model_dir]]]]]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Unbatch] <ast.RShift object at 0x7da2590d6a40> call[name[beam].ParDo, parameter[call[name[UnbatchDoFn], parameter[]]]]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Process Results] <ast.RShift object at 0x7da2590d6a40> call[name[beam].ParDo, parameter[call[name[ProcessResultsDoFn], parameter[]]]]]]
if compare[name[output_csv] is_not constant[None]] begin[:]
variable[schema_file] assign[=] binary_operation[name[output_csv] + constant[.schema.json]]
variable[results_save] assign[=] binary_operation[binary_operation[name[results] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Prepare For Output] <ast.RShift object at 0x7da2590d6a40> call[name[beam].ParDo, parameter[call[name[MakeCsvLineDoFn], parameter[]]]]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Write Csv Results] <ast.RShift object at 0x7da2590d6a40> call[name[beam].io.textio.WriteToText, parameter[name[output_csv]]]]]
binary_operation[binary_operation[binary_operation[name[results_save] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Sample One] <ast.RShift object at 0x7da2590d6a40> call[name[beam].transforms.combiners.Sample.FixedSizeGlobally, parameter[constant[1]]]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Serialize Schema] <ast.RShift object at 0x7da2590d6a40> call[name[beam].Map, parameter[<ast.Lambda object at 0x7da1b2346710>]]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Write Schema] <ast.RShift object at 0x7da2590d6a40> call[name[beam].io.textio.WriteToText, parameter[name[schema_file]]]]]
if compare[name[output_bq_table] is_not constant[None]] begin[:]
variable[bq_schema_string] assign[=] call[constant[,].join, parameter[<ast.GeneratorExp object at 0x7da18fe902e0>]]
variable[sink] assign[=] call[name[beam].io.BigQuerySink, parameter[name[output_bq_table]]]
binary_operation[name[results] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[Write BQ Results] <ast.RShift object at 0x7da2590d6a40> call[name[beam].io.Write, parameter[name[sink]]]]] | keyword[def] identifier[configure_pipeline] ( identifier[p] , identifier[dataset] , identifier[model_dir] , identifier[output_csv] , identifier[output_bq_table] ):
literal[string]
identifier[data] = identifier[_util] . identifier[get_sources_from_dataset] ( identifier[p] , identifier[dataset] , literal[string] )
keyword[if] identifier[len] ( identifier[dataset] . identifier[schema] )== literal[int] :
identifier[output_schema] =[
{ literal[string] : literal[string] , literal[string] : literal[string] },
{ literal[string] : literal[string] , literal[string] : literal[string] },
{ literal[string] : literal[string] , literal[string] : literal[string] },
{ literal[string] : literal[string] , literal[string] : literal[string] },
{ literal[string] : literal[string] , literal[string] : literal[string] },
]
keyword[else] :
identifier[output_schema] =[
{ literal[string] : literal[string] , literal[string] : literal[string] },
{ literal[string] : literal[string] , literal[string] : literal[string] },
{ literal[string] : literal[string] , literal[string] : literal[string] },
]
identifier[results] =( identifier[data] |
literal[string] >> identifier[beam] . identifier[ParDo] ( identifier[LoadImagesDoFn] ())|
literal[string] >> identifier[beam] . identifier[ParDo] ( identifier[EmitAsBatchDoFn] ( literal[int] ))|
literal[string] >> identifier[beam] . identifier[ParDo] ( identifier[PredictBatchDoFn] ( identifier[model_dir] ))|
literal[string] >> identifier[beam] . identifier[ParDo] ( identifier[UnbatchDoFn] ())|
literal[string] >> identifier[beam] . identifier[ParDo] ( identifier[ProcessResultsDoFn] ()))
keyword[if] identifier[output_csv] keyword[is] keyword[not] keyword[None] :
identifier[schema_file] = identifier[output_csv] + literal[string]
identifier[results_save] =( identifier[results] |
literal[string] >> identifier[beam] . identifier[ParDo] ( identifier[MakeCsvLineDoFn] ())|
literal[string] >> identifier[beam] . identifier[io] . identifier[textio] . identifier[WriteToText] ( identifier[output_csv] ,
identifier[shard_name_template] = literal[string] ))
( identifier[results_save] |
literal[string] >> identifier[beam] . identifier[transforms] . identifier[combiners] . identifier[Sample] . identifier[FixedSizeGlobally] ( literal[int] )|
literal[string] >> identifier[beam] . identifier[Map] ( keyword[lambda] identifier[path] : identifier[json] . identifier[dumps] ( identifier[output_schema] ))|
literal[string] >> identifier[beam] . identifier[io] . identifier[textio] . identifier[WriteToText] ( identifier[schema_file] , identifier[shard_name_template] = literal[string] ))
keyword[if] identifier[output_bq_table] keyword[is] keyword[not] keyword[None] :
identifier[bq_schema_string] = literal[string] . identifier[join] ( identifier[x] [ literal[string] ]+ literal[string] + identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[output_schema] )
identifier[sink] = identifier[beam] . identifier[io] . identifier[BigQuerySink] ( identifier[output_bq_table] , identifier[schema] = identifier[bq_schema_string] ,
identifier[write_disposition] = identifier[beam] . identifier[io] . identifier[BigQueryDisposition] . identifier[WRITE_TRUNCATE] )
identifier[results] | literal[string] >> identifier[beam] . identifier[io] . identifier[Write] ( identifier[sink] ) | def configure_pipeline(p, dataset, model_dir, output_csv, output_bq_table):
"""Configures a dataflow pipeline for batch prediction."""
data = _util.get_sources_from_dataset(p, dataset, 'predict')
if len(dataset.schema) == 2:
output_schema = [{'name': 'image_url', 'type': 'STRING'}, {'name': 'target', 'type': 'STRING'}, {'name': 'predicted', 'type': 'STRING'}, {'name': 'target_prob', 'type': 'FLOAT'}, {'name': 'predicted_prob', 'type': 'FLOAT'}] # depends on [control=['if'], data=[]]
else:
output_schema = [{'name': 'image_url', 'type': 'STRING'}, {'name': 'predicted', 'type': 'STRING'}, {'name': 'predicted_prob', 'type': 'FLOAT'}]
results = data | 'Load Images' >> beam.ParDo(LoadImagesDoFn()) | 'Batch Inputs' >> beam.ParDo(EmitAsBatchDoFn(20)) | 'Batch Predict' >> beam.ParDo(PredictBatchDoFn(model_dir)) | 'Unbatch' >> beam.ParDo(UnbatchDoFn()) | 'Process Results' >> beam.ParDo(ProcessResultsDoFn())
if output_csv is not None:
schema_file = output_csv + '.schema.json'
results_save = results | 'Prepare For Output' >> beam.ParDo(MakeCsvLineDoFn()) | 'Write Csv Results' >> beam.io.textio.WriteToText(output_csv, shard_name_template='')
results_save | 'Sample One' >> beam.transforms.combiners.Sample.FixedSizeGlobally(1) | 'Serialize Schema' >> beam.Map(lambda path: json.dumps(output_schema)) | 'Write Schema' >> beam.io.textio.WriteToText(schema_file, shard_name_template='') # depends on [control=['if'], data=['output_csv']]
if output_bq_table is not None:
# BigQuery sink takes schema in the form of 'field1:type1,field2:type2...'
bq_schema_string = ','.join((x['name'] + ':' + x['type'] for x in output_schema))
sink = beam.io.BigQuerySink(output_bq_table, schema=bq_schema_string, write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
results | 'Write BQ Results' >> beam.io.Write(sink) # depends on [control=['if'], data=['output_bq_table']] |
def encode_basestring(s, _PY3=PY3, _q=u('"')):
"""Return a JSON representation of a Python string
"""
if _PY3:
if isinstance(s, binary_type):
s = s.decode('utf-8')
else:
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return _q + ESCAPE.sub(replace, s) + _q | def function[encode_basestring, parameter[s, _PY3, _q]]:
constant[Return a JSON representation of a Python string
]
if name[_PY3] begin[:]
if call[name[isinstance], parameter[name[s], name[binary_type]]] begin[:]
variable[s] assign[=] call[name[s].decode, parameter[constant[utf-8]]]
def function[replace, parameter[match]]:
return[call[name[ESCAPE_DCT]][call[name[match].group, parameter[constant[0]]]]]
return[binary_operation[binary_operation[name[_q] + call[name[ESCAPE].sub, parameter[name[replace], name[s]]]] + name[_q]]] | keyword[def] identifier[encode_basestring] ( identifier[s] , identifier[_PY3] = identifier[PY3] , identifier[_q] = identifier[u] ( literal[string] )):
literal[string]
keyword[if] identifier[_PY3] :
keyword[if] identifier[isinstance] ( identifier[s] , identifier[binary_type] ):
identifier[s] = identifier[s] . identifier[decode] ( literal[string] )
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[s] , identifier[str] ) keyword[and] identifier[HAS_UTF8] . identifier[search] ( identifier[s] ) keyword[is] keyword[not] keyword[None] :
identifier[s] = identifier[s] . identifier[decode] ( literal[string] )
keyword[def] identifier[replace] ( identifier[match] ):
keyword[return] identifier[ESCAPE_DCT] [ identifier[match] . identifier[group] ( literal[int] )]
keyword[return] identifier[_q] + identifier[ESCAPE] . identifier[sub] ( identifier[replace] , identifier[s] )+ identifier[_q] | def encode_basestring(s, _PY3=PY3, _q=u('"')):
"""Return a JSON representation of a Python string
"""
if _PY3:
if isinstance(s, binary_type):
s = s.decode('utf-8') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8') # depends on [control=['if'], data=[]]
def replace(match):
return ESCAPE_DCT[match.group(0)]
return _q + ESCAPE.sub(replace, s) + _q |
def save(self, filething=None, deleteid3=False, padding=None):
"""Save metadata blocks to a file.
Args:
filething (filething)
deleteid3 (bool): delete id3 tags while at it
padding (:obj:`mutagen.PaddingFunction`)
If no filename is given, the one most recently loaded is used.
"""
self._save(filething, self.metadata_blocks, deleteid3, padding) | def function[save, parameter[self, filething, deleteid3, padding]]:
constant[Save metadata blocks to a file.
Args:
filething (filething)
deleteid3 (bool): delete id3 tags while at it
padding (:obj:`mutagen.PaddingFunction`)
If no filename is given, the one most recently loaded is used.
]
call[name[self]._save, parameter[name[filething], name[self].metadata_blocks, name[deleteid3], name[padding]]] | keyword[def] identifier[save] ( identifier[self] , identifier[filething] = keyword[None] , identifier[deleteid3] = keyword[False] , identifier[padding] = keyword[None] ):
literal[string]
identifier[self] . identifier[_save] ( identifier[filething] , identifier[self] . identifier[metadata_blocks] , identifier[deleteid3] , identifier[padding] ) | def save(self, filething=None, deleteid3=False, padding=None):
"""Save metadata blocks to a file.
Args:
filething (filething)
deleteid3 (bool): delete id3 tags while at it
padding (:obj:`mutagen.PaddingFunction`)
If no filename is given, the one most recently loaded is used.
"""
self._save(filething, self.metadata_blocks, deleteid3, padding) |
def getReliableListeners(self):
"""
Return an iterable of the listeners which have been added to
this batch processor.
"""
for rellist in self.store.query(_ReliableListener, _ReliableListener.processor == self):
yield rellist.listener | def function[getReliableListeners, parameter[self]]:
constant[
Return an iterable of the listeners which have been added to
this batch processor.
]
for taget[name[rellist]] in starred[call[name[self].store.query, parameter[name[_ReliableListener], compare[name[_ReliableListener].processor equal[==] name[self]]]]] begin[:]
<ast.Yield object at 0x7da1b0d18400> | keyword[def] identifier[getReliableListeners] ( identifier[self] ):
literal[string]
keyword[for] identifier[rellist] keyword[in] identifier[self] . identifier[store] . identifier[query] ( identifier[_ReliableListener] , identifier[_ReliableListener] . identifier[processor] == identifier[self] ):
keyword[yield] identifier[rellist] . identifier[listener] | def getReliableListeners(self):
"""
Return an iterable of the listeners which have been added to
this batch processor.
"""
for rellist in self.store.query(_ReliableListener, _ReliableListener.processor == self):
yield rellist.listener # depends on [control=['for'], data=['rellist']] |
def restore_version(self, symbol, as_of, prune_previous_version=True):
"""
Restore the specified 'symbol' data and metadata to the state of a given version/snapshot/date.
Returns a VersionedItem object only with a metadata element.
Fast operation: Zero data/segment read/write operations.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or `int` or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
prune_previous_version : `bool`
Removes previous (non-snapshotted) versions from the database.
Default: True
Returns
-------
`VersionedItem`
VersionedItem named tuple containing the metadata of the written symbol's version document in the store.
"""
# TODO: This operation is tricky as it may create history branches and lead to corrupted symbols.
# To avoid this we do concat_rewrite (see Issue #579)
# Investigate how this can be optimized and maintain safety (i.e. avoid read/write with serialization
# and compression costs, but instead:
# clone segments (server-side?) / crate new (base) version / update segments' parent).
version_to_restore = self._read_metadata(symbol, as_of=as_of)
# At this point it is guaranteed that the as_of version exists and doesn't have the symbol marked as deleted.
# If we try to restore the last version, do nothing (No-Op) and return the associated VesionedItem.
if self._last_version_seqnum(symbol) == version_to_restore['version']:
return VersionedItem(symbol=symbol, library=self._arctic_lib.get_name(),
version=version_to_restore['version'],
host=self._arctic_lib.arctic.mongo_host,
metadata=version_to_restore.pop('metadata', None), data=None)
# Read the existing data from as_of
item = self.read(symbol, as_of=as_of)
# Write back, creating a new base version
new_item = self.write(symbol,
data=item.data, metadata=item.metadata, prune_previous_version=prune_previous_version)
return new_item | def function[restore_version, parameter[self, symbol, as_of, prune_previous_version]]:
constant[
Restore the specified 'symbol' data and metadata to the state of a given version/snapshot/date.
Returns a VersionedItem object only with a metadata element.
Fast operation: Zero data/segment read/write operations.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or `int` or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
prune_previous_version : `bool`
Removes previous (non-snapshotted) versions from the database.
Default: True
Returns
-------
`VersionedItem`
VersionedItem named tuple containing the metadata of the written symbol's version document in the store.
]
variable[version_to_restore] assign[=] call[name[self]._read_metadata, parameter[name[symbol]]]
if compare[call[name[self]._last_version_seqnum, parameter[name[symbol]]] equal[==] call[name[version_to_restore]][constant[version]]] begin[:]
return[call[name[VersionedItem], parameter[]]]
variable[item] assign[=] call[name[self].read, parameter[name[symbol]]]
variable[new_item] assign[=] call[name[self].write, parameter[name[symbol]]]
return[name[new_item]] | keyword[def] identifier[restore_version] ( identifier[self] , identifier[symbol] , identifier[as_of] , identifier[prune_previous_version] = keyword[True] ):
literal[string]
identifier[version_to_restore] = identifier[self] . identifier[_read_metadata] ( identifier[symbol] , identifier[as_of] = identifier[as_of] )
keyword[if] identifier[self] . identifier[_last_version_seqnum] ( identifier[symbol] )== identifier[version_to_restore] [ literal[string] ]:
keyword[return] identifier[VersionedItem] ( identifier[symbol] = identifier[symbol] , identifier[library] = identifier[self] . identifier[_arctic_lib] . identifier[get_name] (),
identifier[version] = identifier[version_to_restore] [ literal[string] ],
identifier[host] = identifier[self] . identifier[_arctic_lib] . identifier[arctic] . identifier[mongo_host] ,
identifier[metadata] = identifier[version_to_restore] . identifier[pop] ( literal[string] , keyword[None] ), identifier[data] = keyword[None] )
identifier[item] = identifier[self] . identifier[read] ( identifier[symbol] , identifier[as_of] = identifier[as_of] )
identifier[new_item] = identifier[self] . identifier[write] ( identifier[symbol] ,
identifier[data] = identifier[item] . identifier[data] , identifier[metadata] = identifier[item] . identifier[metadata] , identifier[prune_previous_version] = identifier[prune_previous_version] )
keyword[return] identifier[new_item] | def restore_version(self, symbol, as_of, prune_previous_version=True):
"""
Restore the specified 'symbol' data and metadata to the state of a given version/snapshot/date.
Returns a VersionedItem object only with a metadata element.
Fast operation: Zero data/segment read/write operations.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or `int` or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
prune_previous_version : `bool`
Removes previous (non-snapshotted) versions from the database.
Default: True
Returns
-------
`VersionedItem`
VersionedItem named tuple containing the metadata of the written symbol's version document in the store.
"""
# TODO: This operation is tricky as it may create history branches and lead to corrupted symbols.
# To avoid this we do concat_rewrite (see Issue #579)
# Investigate how this can be optimized and maintain safety (i.e. avoid read/write with serialization
# and compression costs, but instead:
# clone segments (server-side?) / crate new (base) version / update segments' parent).
version_to_restore = self._read_metadata(symbol, as_of=as_of)
# At this point it is guaranteed that the as_of version exists and doesn't have the symbol marked as deleted.
# If we try to restore the last version, do nothing (No-Op) and return the associated VesionedItem.
if self._last_version_seqnum(symbol) == version_to_restore['version']:
return VersionedItem(symbol=symbol, library=self._arctic_lib.get_name(), version=version_to_restore['version'], host=self._arctic_lib.arctic.mongo_host, metadata=version_to_restore.pop('metadata', None), data=None) # depends on [control=['if'], data=[]]
# Read the existing data from as_of
item = self.read(symbol, as_of=as_of)
# Write back, creating a new base version
new_item = self.write(symbol, data=item.data, metadata=item.metadata, prune_previous_version=prune_previous_version)
return new_item |
async def execute_with_timeout(self, timeout, subprocess):
"""
Execute a subprocess with timeout. If time limit exceeds, the subprocess is terminated,
and `is_timeout` is set to True; otherwise the `is_timeout` is set to False.
You can uses `execute_with_timeout` with other help functions to create time limit for them::
timeout, result = await container.execute_with_timeout(10, container.execute_all([routine1(), routine2()]))
:return: (is_timeout, result) When is_timeout = True, result = None
"""
if timeout is None:
return (False, await subprocess)
else:
th = self.scheduler.setTimer(timeout)
try:
tm = TimerEvent.createMatcher(th)
try:
r = await self.with_exception(subprocess, tm)
except RoutineException as exc:
if exc.matcher is tm:
return True, None
else:
raise
else:
return False, r
finally:
self.scheduler.cancelTimer(th) | <ast.AsyncFunctionDef object at 0x7da1b26aeb00> | keyword[async] keyword[def] identifier[execute_with_timeout] ( identifier[self] , identifier[timeout] , identifier[subprocess] ):
literal[string]
keyword[if] identifier[timeout] keyword[is] keyword[None] :
keyword[return] ( keyword[False] , keyword[await] identifier[subprocess] )
keyword[else] :
identifier[th] = identifier[self] . identifier[scheduler] . identifier[setTimer] ( identifier[timeout] )
keyword[try] :
identifier[tm] = identifier[TimerEvent] . identifier[createMatcher] ( identifier[th] )
keyword[try] :
identifier[r] = keyword[await] identifier[self] . identifier[with_exception] ( identifier[subprocess] , identifier[tm] )
keyword[except] identifier[RoutineException] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[matcher] keyword[is] identifier[tm] :
keyword[return] keyword[True] , keyword[None]
keyword[else] :
keyword[raise]
keyword[else] :
keyword[return] keyword[False] , identifier[r]
keyword[finally] :
identifier[self] . identifier[scheduler] . identifier[cancelTimer] ( identifier[th] ) | async def execute_with_timeout(self, timeout, subprocess):
"""
Execute a subprocess with timeout. If time limit exceeds, the subprocess is terminated,
and `is_timeout` is set to True; otherwise the `is_timeout` is set to False.
You can uses `execute_with_timeout` with other help functions to create time limit for them::
timeout, result = await container.execute_with_timeout(10, container.execute_all([routine1(), routine2()]))
:return: (is_timeout, result) When is_timeout = True, result = None
"""
if timeout is None:
return (False, await subprocess) # depends on [control=['if'], data=[]]
else:
th = self.scheduler.setTimer(timeout)
try:
tm = TimerEvent.createMatcher(th)
try:
r = await self.with_exception(subprocess, tm) # depends on [control=['try'], data=[]]
except RoutineException as exc:
if exc.matcher is tm:
return (True, None) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['exc']]
else:
return (False, r) # depends on [control=['try'], data=[]]
finally:
self.scheduler.cancelTimer(th) |
def fleets(self):
"""
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetList
"""
if self._fleets is None:
self._fleets = FleetList(self)
return self._fleets | def function[fleets, parameter[self]]:
constant[
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetList
]
if compare[name[self]._fleets is constant[None]] begin[:]
name[self]._fleets assign[=] call[name[FleetList], parameter[name[self]]]
return[name[self]._fleets] | keyword[def] identifier[fleets] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_fleets] keyword[is] keyword[None] :
identifier[self] . identifier[_fleets] = identifier[FleetList] ( identifier[self] )
keyword[return] identifier[self] . identifier[_fleets] | def fleets(self):
"""
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetList
"""
if self._fleets is None:
self._fleets = FleetList(self) # depends on [control=['if'], data=[]]
return self._fleets |
def soft_kill(jid, state_id=None):
'''
Set up a state run to die before executing the given state id,
this instructs a running state to safely exit at a given
state id. This needs to pass in the jid of the running state.
If a state_id is not passed then the jid referenced will be safely exited
at the beginning of the next state run.
The given state id is the id got a given state execution, so given a state
that looks like this:
.. code-block:: yaml
vim:
pkg.installed: []
The state_id to pass to `soft_kill` is `vim`
CLI Examples:
.. code-block:: bash
salt '*' state.soft_kill 20171130110407769519
salt '*' state.soft_kill 20171130110407769519 vim
'''
jid = six.text_type(jid)
if state_id is None:
state_id = '__all__'
data, pause_path = _get_pause(jid, state_id)
data[state_id]['kill'] = True
with salt.utils.files.fopen(pause_path, 'wb') as fp_:
fp_.write(salt.utils.msgpack.dumps(data)) | def function[soft_kill, parameter[jid, state_id]]:
constant[
Set up a state run to die before executing the given state id,
this instructs a running state to safely exit at a given
state id. This needs to pass in the jid of the running state.
If a state_id is not passed then the jid referenced will be safely exited
at the beginning of the next state run.
The given state id is the id got a given state execution, so given a state
that looks like this:
.. code-block:: yaml
vim:
pkg.installed: []
The state_id to pass to `soft_kill` is `vim`
CLI Examples:
.. code-block:: bash
salt '*' state.soft_kill 20171130110407769519
salt '*' state.soft_kill 20171130110407769519 vim
]
variable[jid] assign[=] call[name[six].text_type, parameter[name[jid]]]
if compare[name[state_id] is constant[None]] begin[:]
variable[state_id] assign[=] constant[__all__]
<ast.Tuple object at 0x7da18ede5270> assign[=] call[name[_get_pause], parameter[name[jid], name[state_id]]]
call[call[name[data]][name[state_id]]][constant[kill]] assign[=] constant[True]
with call[name[salt].utils.files.fopen, parameter[name[pause_path], constant[wb]]] begin[:]
call[name[fp_].write, parameter[call[name[salt].utils.msgpack.dumps, parameter[name[data]]]]] | keyword[def] identifier[soft_kill] ( identifier[jid] , identifier[state_id] = keyword[None] ):
literal[string]
identifier[jid] = identifier[six] . identifier[text_type] ( identifier[jid] )
keyword[if] identifier[state_id] keyword[is] keyword[None] :
identifier[state_id] = literal[string]
identifier[data] , identifier[pause_path] = identifier[_get_pause] ( identifier[jid] , identifier[state_id] )
identifier[data] [ identifier[state_id] ][ literal[string] ]= keyword[True]
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[pause_path] , literal[string] ) keyword[as] identifier[fp_] :
identifier[fp_] . identifier[write] ( identifier[salt] . identifier[utils] . identifier[msgpack] . identifier[dumps] ( identifier[data] )) | def soft_kill(jid, state_id=None):
"""
Set up a state run to die before executing the given state id,
this instructs a running state to safely exit at a given
state id. This needs to pass in the jid of the running state.
If a state_id is not passed then the jid referenced will be safely exited
at the beginning of the next state run.
The given state id is the id got a given state execution, so given a state
that looks like this:
.. code-block:: yaml
vim:
pkg.installed: []
The state_id to pass to `soft_kill` is `vim`
CLI Examples:
.. code-block:: bash
salt '*' state.soft_kill 20171130110407769519
salt '*' state.soft_kill 20171130110407769519 vim
"""
jid = six.text_type(jid)
if state_id is None:
state_id = '__all__' # depends on [control=['if'], data=['state_id']]
(data, pause_path) = _get_pause(jid, state_id)
data[state_id]['kill'] = True
with salt.utils.files.fopen(pause_path, 'wb') as fp_:
fp_.write(salt.utils.msgpack.dumps(data)) # depends on [control=['with'], data=['fp_']] |
def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle) | def function[packexe, parameter[exefile, srcdir]]:
constant[Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs]
variable[exefile] assign[=] call[name[cygpath], parameter[call[name[os].path.abspath, parameter[name[exefile]]]]]
variable[appbundle] assign[=] binary_operation[name[exefile] + constant[.app.7z]]
if call[name[os].path.exists, parameter[name[appbundle]]] begin[:]
<ast.Raise object at 0x7da1b0a6d180>
variable[files] assign[=] call[name[os].listdir, parameter[name[srcdir]]]
variable[SEVENZIP_ARGS] assign[=] list[[<ast.Constant object at 0x7da1b0a6d300>, <ast.Constant object at 0x7da1b0a6d2a0>, <ast.Constant object at 0x7da1b0a6dff0>, <ast.Constant object at 0x7da1b0a6d930>, <ast.Constant object at 0x7da1b0a6dd50>, <ast.Constant object at 0x7da1b0a6cc70>, <ast.Constant object at 0x7da1b0a6c850>, <ast.Constant object at 0x7da1b0a6c730>, <ast.Constant object at 0x7da1b0a6cd00>, <ast.Constant object at 0x7da1b0a6d780>, <ast.Constant object at 0x7da1b0a6dd80>, <ast.Constant object at 0x7da1b0a6d3c0>]]
variable[stdout] assign[=] call[name[tempfile].TemporaryFile, parameter[]]
<ast.Try object at 0x7da1b0a6dc90>
call[name[stdout].close, parameter[]]
variable[o] assign[=] call[name[open], parameter[name[exefile], constant[wb]]]
variable[parts] assign[=] list[[<ast.Constant object at 0x7da1b0a6dde0>, <ast.Constant object at 0x7da1b0a6cd60>, <ast.Name object at 0x7da1b0a6c490>]]
for taget[name[part]] in starred[name[parts]] begin[:]
variable[i] assign[=] call[name[open], parameter[name[part]]]
while constant[True] begin[:]
variable[block] assign[=] call[name[i].read, parameter[constant[4096]]]
if <ast.UnaryOp object at 0x7da1b0b81270> begin[:]
break
call[name[o].write, parameter[name[block]]]
call[name[i].close, parameter[]]
call[name[o].close, parameter[]]
call[name[os].unlink, parameter[name[appbundle]]] | keyword[def] identifier[packexe] ( identifier[exefile] , identifier[srcdir] ):
literal[string]
identifier[exefile] = identifier[cygpath] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[exefile] ))
identifier[appbundle] = identifier[exefile] + literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[appbundle] ):
keyword[raise] identifier[OSError] ( literal[string] % identifier[appbundle] )
identifier[files] = identifier[os] . identifier[listdir] ( identifier[srcdir] )
identifier[SEVENZIP_ARGS] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ]
identifier[stdout] = identifier[tempfile] . identifier[TemporaryFile] ()
keyword[try] :
identifier[check_call] ([ identifier[SEVENZIP] , literal[string] ]+ identifier[SEVENZIP_ARGS] +[ identifier[appbundle] ]+ identifier[files] ,
identifier[cwd] = identifier[srcdir] , identifier[stdout] = identifier[stdout] , identifier[preexec_fn] = identifier[_noumask] )
keyword[except] identifier[Exception] :
identifier[stdout] . identifier[seek] ( literal[int] )
identifier[data] = identifier[stdout] . identifier[read] ()
identifier[log] . identifier[error] ( identifier[data] )
identifier[log] . identifier[exception] ( literal[string] , identifier[exefile] , identifier[srcdir] )
keyword[raise]
identifier[stdout] . identifier[close] ()
identifier[o] = identifier[open] ( identifier[exefile] , literal[string] )
identifier[parts] =[
literal[string] ,
literal[string] ,
identifier[appbundle]
]
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[i] = identifier[open] ( identifier[part] )
keyword[while] keyword[True] :
identifier[block] = identifier[i] . identifier[read] ( literal[int] )
keyword[if] keyword[not] identifier[block] :
keyword[break]
identifier[o] . identifier[write] ( identifier[block] )
identifier[i] . identifier[close] ()
identifier[o] . identifier[close] ()
identifier[os] . identifier[unlink] ( identifier[appbundle] ) | def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + '.app.7z'
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError('%s already exists' % appbundle) # depends on [control=['if'], data=[]]
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27', '-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2', '-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files, cwd=srcdir, stdout=stdout, preexec_fn=_noumask) # depends on [control=['try'], data=[]]
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception('Error packing exe %s from %s', exefile, srcdir)
raise # depends on [control=['except'], data=[]]
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, 'wb')
parts = ['checkouts/stubs/7z/7zSD.sfx.compressed', 'checkouts/stubs/tagfile/app.tag', appbundle]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break # depends on [control=['if'], data=[]]
o.write(block) # depends on [control=['while'], data=[]]
i.close() # depends on [control=['for'], data=['part']]
o.close()
os.unlink(appbundle) |
def _zip(self) -> ArrayValue:
"""Zip the receiver into an array and return it."""
res = list(self.before)
res.reverse()
res.append(self.value)
res.extend(list(self.after))
return ArrayValue(res, self.timestamp) | def function[_zip, parameter[self]]:
constant[Zip the receiver into an array and return it.]
variable[res] assign[=] call[name[list], parameter[name[self].before]]
call[name[res].reverse, parameter[]]
call[name[res].append, parameter[name[self].value]]
call[name[res].extend, parameter[call[name[list], parameter[name[self].after]]]]
return[call[name[ArrayValue], parameter[name[res], name[self].timestamp]]] | keyword[def] identifier[_zip] ( identifier[self] )-> identifier[ArrayValue] :
literal[string]
identifier[res] = identifier[list] ( identifier[self] . identifier[before] )
identifier[res] . identifier[reverse] ()
identifier[res] . identifier[append] ( identifier[self] . identifier[value] )
identifier[res] . identifier[extend] ( identifier[list] ( identifier[self] . identifier[after] ))
keyword[return] identifier[ArrayValue] ( identifier[res] , identifier[self] . identifier[timestamp] ) | def _zip(self) -> ArrayValue:
"""Zip the receiver into an array and return it."""
res = list(self.before)
res.reverse()
res.append(self.value)
res.extend(list(self.after))
return ArrayValue(res, self.timestamp) |
def update_gradebook(self, *args, **kwargs):
"""Pass through to provider GradebookAdminSession.update_gradebook"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.update_bin
# OSID spec does not require returning updated catalog
return Gradebook(
self._provider_manager,
self._get_provider_session('gradebook_admin_session').update_gradebook(*args, **kwargs),
self._runtime,
self._proxy) | def function[update_gradebook, parameter[self]]:
constant[Pass through to provider GradebookAdminSession.update_gradebook]
return[call[name[Gradebook], parameter[name[self]._provider_manager, call[call[name[self]._get_provider_session, parameter[constant[gradebook_admin_session]]].update_gradebook, parameter[<ast.Starred object at 0x7da20c7cbf70>]], name[self]._runtime, name[self]._proxy]]] | keyword[def] identifier[update_gradebook] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[Gradebook] (
identifier[self] . identifier[_provider_manager] ,
identifier[self] . identifier[_get_provider_session] ( literal[string] ). identifier[update_gradebook] (* identifier[args] ,** identifier[kwargs] ),
identifier[self] . identifier[_runtime] ,
identifier[self] . identifier[_proxy] ) | def update_gradebook(self, *args, **kwargs):
"""Pass through to provider GradebookAdminSession.update_gradebook"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.update_bin
# OSID spec does not require returning updated catalog
return Gradebook(self._provider_manager, self._get_provider_session('gradebook_admin_session').update_gradebook(*args, **kwargs), self._runtime, self._proxy) |
def signing_keys(self):
"""
Access the signing_keys
:returns: twilio.rest.api.v2010.account.signing_key.SigningKeyList
:rtype: twilio.rest.api.v2010.account.signing_key.SigningKeyList
"""
if self._signing_keys is None:
self._signing_keys = SigningKeyList(self._version, account_sid=self._solution['sid'], )
return self._signing_keys | def function[signing_keys, parameter[self]]:
constant[
Access the signing_keys
:returns: twilio.rest.api.v2010.account.signing_key.SigningKeyList
:rtype: twilio.rest.api.v2010.account.signing_key.SigningKeyList
]
if compare[name[self]._signing_keys is constant[None]] begin[:]
name[self]._signing_keys assign[=] call[name[SigningKeyList], parameter[name[self]._version]]
return[name[self]._signing_keys] | keyword[def] identifier[signing_keys] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_signing_keys] keyword[is] keyword[None] :
identifier[self] . identifier[_signing_keys] = identifier[SigningKeyList] ( identifier[self] . identifier[_version] , identifier[account_sid] = identifier[self] . identifier[_solution] [ literal[string] ],)
keyword[return] identifier[self] . identifier[_signing_keys] | def signing_keys(self):
"""
Access the signing_keys
:returns: twilio.rest.api.v2010.account.signing_key.SigningKeyList
:rtype: twilio.rest.api.v2010.account.signing_key.SigningKeyList
"""
if self._signing_keys is None:
self._signing_keys = SigningKeyList(self._version, account_sid=self._solution['sid']) # depends on [control=['if'], data=[]]
return self._signing_keys |
def configure(default=None, dev=None):
"""
The inner control loops for user interaction during quickstart
configuration.
"""
cache_loc = openaccess_epub.utils.cache_location()
config_loc = openaccess_epub.utils.config_location()
#Make the cache directory
openaccess_epub.utils.mkdir_p(cache_loc)
defaults = {'now': time.asctime(),
'oae-version': openaccess_epub.__version__,
'cache-location': unix_path_coercion(cache_loc),
'input-relative-images': 'images-*',
'use-input-relative-images': 'y',
'image-cache': os.path.join(cache_loc, 'img_cache'),
'use-image-cache': 'n',
'use-image-fetching': 'y',
'default-output': '.',
'input-relative-css': '.',
'epubcheck-jarfile': os.path.join(cache_loc,
'epubcheck-3.0',
'epubcheck-3.0.jar')}
if default or dev: # Skip interactive and apply defaults
#Pass through the validation/modification steps
if dev: # The only current difference between dev and default
defaults['use-image-cache'] = 'y'
defaults['input-relative-images'] = list_opts(defaults['input-relative-images'])
defaults['use-input-relative-images'] = boolean(defaults['use-input-relative-images'])
defaults['image-cache'] = absolute_path(defaults['image-cache'])
defaults['use-image-cache'] = boolean(defaults['use-image-cache'])
defaults['use-image-fetching'] = boolean(defaults['use-image-fetching'])
defaults['default-output'] = nonempty(defaults['default-output'])
defaults['input-relative-css'] = nonempty(defaults['input-relative-css'])
defaults['epubcheck-jarfile'] = absolute_path(defaults['epubcheck-jarfile'])
config = config_formatter(CONFIG_TEXT, defaults)
with open(config_loc, 'wb') as conf_out:
conf_out.write(bytes(config, 'UTF-8'))
print('The config file has been written to {0}'.format(config_loc))
return
config_dict = {'now': time.asctime(),
'oae-version': openaccess_epub.__version__,
'cache-location': unix_path_coercion(cache_loc)}
print('''\nWelcome to the interactive configuration for OpenAccess_EPUB''')
print('''
Please enter values for the following settings. To accept the default value
for the settings, shown in brackets, just push Enter.
-------------------------------------------------------------------------------\
''')
print('''
OpenAccess_EPUB defines a default cache location for the storage of various
data (and the global config.py file), this location is:\n\n{0}
'''.format(cache_loc))
input('Press Enter to start...')
#Image Configuration
print('''
-- Configure Image Behavior --
When OpenAccess_EPUB is executed using the oaepub script, it can find the
images for the input articles using the following strategies (in order of
preference):
Input-Relative: a path relative to the input file
Cached Images: locate the images in a cache
Fetched Online: attempts to download from the Internet (may fail)
We'll configure some values for each of these, and you\'ll also have the option
to turn them off.''')
#Input-relative image details
print('''
Where should OpenAccess_EPUB look for images relative to the input file?
A star "*" may be used as a wildcard to match the name of the input file.
Multiple path values may be specified if separated by commas.''')
user_prompt(config_dict, 'input-relative-images', 'Input-relative images?:',
default=defaults['input-relative-images'], validator=list_opts)
print('''
Should OpenAccess_EPUB look for images relative to the input file by default?\
''')
user_prompt(config_dict, 'use-input-relative-images',
'Use input-relative images?: (Y/n)',
default=defaults['use-input-relative-images'],
validator=boolean)
#Image cache details
print('''
Where should OpenAccess_EPUB place the image cache?''')
user_prompt(config_dict, 'image-cache', 'Image cache?:',
default=defaults['image-cache'],
validator=absolute_path)
print('''
Should OpenAccess_EPUB use the image cache by default? This feature is intended
for developers and testers without local access to the image files and will
consume extra disk space for storage.''')
user_prompt(config_dict, 'use-image-cache', 'Use image cache?: (y/N)',
default=defaults['use-image-cache'],
validator=boolean)
#Image fetching online details
print('''
Should OpenAccess_EPUB attempt to download the images from the Internet? This
is not supported for all publishers and not 100% guaranteed to succeed, you may
need to download them manually if this does not work.''')
user_prompt(config_dict, 'use-image-fetching', 'Attempt image download?: (Y/n)',
default=defaults['use-image-fetching'],
validator=boolean)
#Output configuration
print('''
-- Configure Output Behavior --
OpenAccess_EPUB produces ePub and log files as output. The following options
will determine what is done with these.
Where should OpenAccess_EPUB place the output ePub and log files? If you supply
a relative path, the output path will be relative to the input; if you supply
an absolute path, the output will always be placed there. The default behavior
is to place them in the same directory as the input.''')
user_prompt(config_dict, 'default-output', 'Output path?:',
default=defaults['default-output'],
validator=nonempty)
print('''
-- Configure CSS Behavior --
ePub files use CSS for improved styling, and ePub-readers must support a basic
subset of CSS functions. OpenAccess_EPUB provides a default CSS file, but a
manual one may be supplied, relative to the input. Please define an
appropriate input-relative path.''')
user_prompt(config_dict, 'input-relative-css', 'Input-relative CSS path?:',
default=defaults['input-relative-css'],
validator=nonempty)
print('''
-- Configure EpubCheck --
EpubCheck is a program written and maintained by the IDPF as a tool to validate
ePub. In order to use it, your system must have Java installed and it is
recommended to use the latest version. Downloads of this program are found here:
https://github.com/IDPF/epubcheck/releases
Once you have downloaded the zip file for the program, unzip the archive and
write a path to the .jar file here.''')
user_prompt(config_dict, 'epubcheck-jarfile', 'Absolute path to epubcheck?:',
default=defaults['epubcheck-jarfile'], validator=absolute_path)
#Write the config.py file
config = config_formatter(CONFIG_TEXT, config_dict)
with open(config_loc, 'wb') as conf_out:
conf_out.write(bytes(config, 'UTF-8'))
print('''
Done configuring OpenAccess_EPUB!''') | def function[configure, parameter[default, dev]]:
constant[
The inner control loops for user interaction during quickstart
configuration.
]
variable[cache_loc] assign[=] call[name[openaccess_epub].utils.cache_location, parameter[]]
variable[config_loc] assign[=] call[name[openaccess_epub].utils.config_location, parameter[]]
call[name[openaccess_epub].utils.mkdir_p, parameter[name[cache_loc]]]
variable[defaults] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a8a00>, <ast.Constant object at 0x7da20c6aa5f0>, <ast.Constant object at 0x7da20c6a93c0>, <ast.Constant object at 0x7da20c6abfd0>, <ast.Constant object at 0x7da20c6a9e40>, <ast.Constant object at 0x7da20c6abb50>, <ast.Constant object at 0x7da20c6a9ae0>, <ast.Constant object at 0x7da20c6a99f0>, <ast.Constant object at 0x7da20c6abbe0>, <ast.Constant object at 0x7da20c6a8550>, <ast.Constant object at 0x7da20c6a80a0>], [<ast.Call object at 0x7da20c6aa470>, <ast.Attribute object at 0x7da20c6a9480>, <ast.Call object at 0x7da20c6abc70>, <ast.Constant object at 0x7da20c6a84f0>, <ast.Constant object at 0x7da20c6a8e20>, <ast.Call object at 0x7da20c6a9a50>, <ast.Constant object at 0x7da20c6aa1a0>, <ast.Constant object at 0x7da20c6ab640>, <ast.Constant object at 0x7da20c6a8220>, <ast.Constant object at 0x7da20c6abe20>, <ast.Call object at 0x7da20c6ab700>]]
if <ast.BoolOp object at 0x7da20c6ab2b0> begin[:]
if name[dev] begin[:]
call[name[defaults]][constant[use-image-cache]] assign[=] constant[y]
call[name[defaults]][constant[input-relative-images]] assign[=] call[name[list_opts], parameter[call[name[defaults]][constant[input-relative-images]]]]
call[name[defaults]][constant[use-input-relative-images]] assign[=] call[name[boolean], parameter[call[name[defaults]][constant[use-input-relative-images]]]]
call[name[defaults]][constant[image-cache]] assign[=] call[name[absolute_path], parameter[call[name[defaults]][constant[image-cache]]]]
call[name[defaults]][constant[use-image-cache]] assign[=] call[name[boolean], parameter[call[name[defaults]][constant[use-image-cache]]]]
call[name[defaults]][constant[use-image-fetching]] assign[=] call[name[boolean], parameter[call[name[defaults]][constant[use-image-fetching]]]]
call[name[defaults]][constant[default-output]] assign[=] call[name[nonempty], parameter[call[name[defaults]][constant[default-output]]]]
call[name[defaults]][constant[input-relative-css]] assign[=] call[name[nonempty], parameter[call[name[defaults]][constant[input-relative-css]]]]
call[name[defaults]][constant[epubcheck-jarfile]] assign[=] call[name[absolute_path], parameter[call[name[defaults]][constant[epubcheck-jarfile]]]]
variable[config] assign[=] call[name[config_formatter], parameter[name[CONFIG_TEXT], name[defaults]]]
with call[name[open], parameter[name[config_loc], constant[wb]]] begin[:]
call[name[conf_out].write, parameter[call[name[bytes], parameter[name[config], constant[UTF-8]]]]]
call[name[print], parameter[call[constant[The config file has been written to {0}].format, parameter[name[config_loc]]]]]
return[None]
variable[config_dict] assign[=] dictionary[[<ast.Constant object at 0x7da20c6aaa40>, <ast.Constant object at 0x7da20c6a9cf0>, <ast.Constant object at 0x7da20c6ab1f0>], [<ast.Call object at 0x7da20c6a9120>, <ast.Attribute object at 0x7da20c6a8d90>, <ast.Call object at 0x7da20c6aac50>]]
call[name[print], parameter[constant[
Welcome to the interactive configuration for OpenAccess_EPUB]]]
call[name[print], parameter[constant[
Please enter values for the following settings. To accept the default value
for the settings, shown in brackets, just push Enter.
-------------------------------------------------------------------------------]]]
call[name[print], parameter[call[constant[
OpenAccess_EPUB defines a default cache location for the storage of various
data (and the global config.py file), this location is:
{0}
].format, parameter[name[cache_loc]]]]]
call[name[input], parameter[constant[Press Enter to start...]]]
call[name[print], parameter[constant[
-- Configure Image Behavior --
When OpenAccess_EPUB is executed using the oaepub script, it can find the
images for the input articles using the following strategies (in order of
preference):
Input-Relative: a path relative to the input file
Cached Images: locate the images in a cache
Fetched Online: attempts to download from the Internet (may fail)
We'll configure some values for each of these, and you'll also have the option
to turn them off.]]]
call[name[print], parameter[constant[
Where should OpenAccess_EPUB look for images relative to the input file?
A star "*" may be used as a wildcard to match the name of the input file.
Multiple path values may be specified if separated by commas.]]]
call[name[user_prompt], parameter[name[config_dict], constant[input-relative-images], constant[Input-relative images?:]]]
call[name[print], parameter[constant[
Should OpenAccess_EPUB look for images relative to the input file by default?]]]
call[name[user_prompt], parameter[name[config_dict], constant[use-input-relative-images], constant[Use input-relative images?: (Y/n)]]]
call[name[print], parameter[constant[
Where should OpenAccess_EPUB place the image cache?]]]
call[name[user_prompt], parameter[name[config_dict], constant[image-cache], constant[Image cache?:]]]
call[name[print], parameter[constant[
Should OpenAccess_EPUB use the image cache by default? This feature is intended
for developers and testers without local access to the image files and will
consume extra disk space for storage.]]]
call[name[user_prompt], parameter[name[config_dict], constant[use-image-cache], constant[Use image cache?: (y/N)]]]
call[name[print], parameter[constant[
Should OpenAccess_EPUB attempt to download the images from the Internet? This
is not supported for all publishers and not 100% guaranteed to succeed, you may
need to download them manually if this does not work.]]]
call[name[user_prompt], parameter[name[config_dict], constant[use-image-fetching], constant[Attempt image download?: (Y/n)]]]
call[name[print], parameter[constant[
-- Configure Output Behavior --
OpenAccess_EPUB produces ePub and log files as output. The following options
will determine what is done with these.
Where should OpenAccess_EPUB place the output ePub and log files? If you supply
a relative path, the output path will be relative to the input; if you supply
an absolute path, the output will always be placed there. The default behavior
is to place them in the same directory as the input.]]]
call[name[user_prompt], parameter[name[config_dict], constant[default-output], constant[Output path?:]]]
call[name[print], parameter[constant[
-- Configure CSS Behavior --
ePub files use CSS for improved styling, and ePub-readers must support a basic
subset of CSS functions. OpenAccess_EPUB provides a default CSS file, but a
manual one may be supplied, relative to the input. Please define an
appropriate input-relative path.]]]
call[name[user_prompt], parameter[name[config_dict], constant[input-relative-css], constant[Input-relative CSS path?:]]]
call[name[print], parameter[constant[
-- Configure EpubCheck --
EpubCheck is a program written and maintained by the IDPF as a tool to validate
ePub. In order to use it, your system must have Java installed and it is
recommended to use the latest version. Downloads of this program are found here:
https://github.com/IDPF/epubcheck/releases
Once you have downloaded the zip file for the program, unzip the archive and
write a path to the .jar file here.]]]
call[name[user_prompt], parameter[name[config_dict], constant[epubcheck-jarfile], constant[Absolute path to epubcheck?:]]]
variable[config] assign[=] call[name[config_formatter], parameter[name[CONFIG_TEXT], name[config_dict]]]
with call[name[open], parameter[name[config_loc], constant[wb]]] begin[:]
call[name[conf_out].write, parameter[call[name[bytes], parameter[name[config], constant[UTF-8]]]]]
call[name[print], parameter[constant[
Done configuring OpenAccess_EPUB!]]] | keyword[def] identifier[configure] ( identifier[default] = keyword[None] , identifier[dev] = keyword[None] ):
literal[string]
identifier[cache_loc] = identifier[openaccess_epub] . identifier[utils] . identifier[cache_location] ()
identifier[config_loc] = identifier[openaccess_epub] . identifier[utils] . identifier[config_location] ()
identifier[openaccess_epub] . identifier[utils] . identifier[mkdir_p] ( identifier[cache_loc] )
identifier[defaults] ={ literal[string] : identifier[time] . identifier[asctime] (),
literal[string] : identifier[openaccess_epub] . identifier[__version__] ,
literal[string] : identifier[unix_path_coercion] ( identifier[cache_loc] ),
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[os] . identifier[path] . identifier[join] ( identifier[cache_loc] , literal[string] ),
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[os] . identifier[path] . identifier[join] ( identifier[cache_loc] ,
literal[string] ,
literal[string] )}
keyword[if] identifier[default] keyword[or] identifier[dev] :
keyword[if] identifier[dev] :
identifier[defaults] [ literal[string] ]= literal[string]
identifier[defaults] [ literal[string] ]= identifier[list_opts] ( identifier[defaults] [ literal[string] ])
identifier[defaults] [ literal[string] ]= identifier[boolean] ( identifier[defaults] [ literal[string] ])
identifier[defaults] [ literal[string] ]= identifier[absolute_path] ( identifier[defaults] [ literal[string] ])
identifier[defaults] [ literal[string] ]= identifier[boolean] ( identifier[defaults] [ literal[string] ])
identifier[defaults] [ literal[string] ]= identifier[boolean] ( identifier[defaults] [ literal[string] ])
identifier[defaults] [ literal[string] ]= identifier[nonempty] ( identifier[defaults] [ literal[string] ])
identifier[defaults] [ literal[string] ]= identifier[nonempty] ( identifier[defaults] [ literal[string] ])
identifier[defaults] [ literal[string] ]= identifier[absolute_path] ( identifier[defaults] [ literal[string] ])
identifier[config] = identifier[config_formatter] ( identifier[CONFIG_TEXT] , identifier[defaults] )
keyword[with] identifier[open] ( identifier[config_loc] , literal[string] ) keyword[as] identifier[conf_out] :
identifier[conf_out] . identifier[write] ( identifier[bytes] ( identifier[config] , literal[string] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[config_loc] ))
keyword[return]
identifier[config_dict] ={ literal[string] : identifier[time] . identifier[asctime] (),
literal[string] : identifier[openaccess_epub] . identifier[__version__] ,
literal[string] : identifier[unix_path_coercion] ( identifier[cache_loc] )}
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] . identifier[format] ( identifier[cache_loc] ))
identifier[input] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[user_prompt] ( identifier[config_dict] , literal[string] , literal[string] ,
identifier[default] = identifier[defaults] [ literal[string] ], identifier[validator] = identifier[list_opts] )
identifier[print] ( literal[string] )
identifier[user_prompt] ( identifier[config_dict] , literal[string] ,
literal[string] ,
identifier[default] = identifier[defaults] [ literal[string] ],
identifier[validator] = identifier[boolean] )
identifier[print] ( literal[string] )
identifier[user_prompt] ( identifier[config_dict] , literal[string] , literal[string] ,
identifier[default] = identifier[defaults] [ literal[string] ],
identifier[validator] = identifier[absolute_path] )
identifier[print] ( literal[string] )
identifier[user_prompt] ( identifier[config_dict] , literal[string] , literal[string] ,
identifier[default] = identifier[defaults] [ literal[string] ],
identifier[validator] = identifier[boolean] )
identifier[print] ( literal[string] )
identifier[user_prompt] ( identifier[config_dict] , literal[string] , literal[string] ,
identifier[default] = identifier[defaults] [ literal[string] ],
identifier[validator] = identifier[boolean] )
identifier[print] ( literal[string] )
identifier[user_prompt] ( identifier[config_dict] , literal[string] , literal[string] ,
identifier[default] = identifier[defaults] [ literal[string] ],
identifier[validator] = identifier[nonempty] )
identifier[print] ( literal[string] )
identifier[user_prompt] ( identifier[config_dict] , literal[string] , literal[string] ,
identifier[default] = identifier[defaults] [ literal[string] ],
identifier[validator] = identifier[nonempty] )
identifier[print] ( literal[string] )
identifier[user_prompt] ( identifier[config_dict] , literal[string] , literal[string] ,
identifier[default] = identifier[defaults] [ literal[string] ], identifier[validator] = identifier[absolute_path] )
identifier[config] = identifier[config_formatter] ( identifier[CONFIG_TEXT] , identifier[config_dict] )
keyword[with] identifier[open] ( identifier[config_loc] , literal[string] ) keyword[as] identifier[conf_out] :
identifier[conf_out] . identifier[write] ( identifier[bytes] ( identifier[config] , literal[string] ))
identifier[print] ( literal[string] ) | def configure(default=None, dev=None):
"""
The inner control loops for user interaction during quickstart
configuration.
"""
cache_loc = openaccess_epub.utils.cache_location()
config_loc = openaccess_epub.utils.config_location()
#Make the cache directory
openaccess_epub.utils.mkdir_p(cache_loc)
defaults = {'now': time.asctime(), 'oae-version': openaccess_epub.__version__, 'cache-location': unix_path_coercion(cache_loc), 'input-relative-images': 'images-*', 'use-input-relative-images': 'y', 'image-cache': os.path.join(cache_loc, 'img_cache'), 'use-image-cache': 'n', 'use-image-fetching': 'y', 'default-output': '.', 'input-relative-css': '.', 'epubcheck-jarfile': os.path.join(cache_loc, 'epubcheck-3.0', 'epubcheck-3.0.jar')}
if default or dev: # Skip interactive and apply defaults
#Pass through the validation/modification steps
if dev: # The only current difference between dev and default
defaults['use-image-cache'] = 'y' # depends on [control=['if'], data=[]]
defaults['input-relative-images'] = list_opts(defaults['input-relative-images'])
defaults['use-input-relative-images'] = boolean(defaults['use-input-relative-images'])
defaults['image-cache'] = absolute_path(defaults['image-cache'])
defaults['use-image-cache'] = boolean(defaults['use-image-cache'])
defaults['use-image-fetching'] = boolean(defaults['use-image-fetching'])
defaults['default-output'] = nonempty(defaults['default-output'])
defaults['input-relative-css'] = nonempty(defaults['input-relative-css'])
defaults['epubcheck-jarfile'] = absolute_path(defaults['epubcheck-jarfile'])
config = config_formatter(CONFIG_TEXT, defaults)
with open(config_loc, 'wb') as conf_out:
conf_out.write(bytes(config, 'UTF-8')) # depends on [control=['with'], data=['conf_out']]
print('The config file has been written to {0}'.format(config_loc))
return # depends on [control=['if'], data=[]]
config_dict = {'now': time.asctime(), 'oae-version': openaccess_epub.__version__, 'cache-location': unix_path_coercion(cache_loc)}
print('\nWelcome to the interactive configuration for OpenAccess_EPUB')
print('\nPlease enter values for the following settings. To accept the default value\nfor the settings, shown in brackets, just push Enter.\n\n-------------------------------------------------------------------------------')
print('\nOpenAccess_EPUB defines a default cache location for the storage of various\ndata (and the global config.py file), this location is:\n\n{0}\n'.format(cache_loc))
input('Press Enter to start...')
#Image Configuration
print("\n -- Configure Image Behavior --\n\nWhen OpenAccess_EPUB is executed using the oaepub script, it can find the\nimages for the input articles using the following strategies (in order of\npreference):\n\n Input-Relative: a path relative to the input file\n Cached Images: locate the images in a cache\n Fetched Online: attempts to download from the Internet (may fail)\n\nWe'll configure some values for each of these, and you'll also have the option\nto turn them off.")
#Input-relative image details
print('\nWhere should OpenAccess_EPUB look for images relative to the input file?\nA star "*" may be used as a wildcard to match the name of the input file.\nMultiple path values may be specified if separated by commas.')
user_prompt(config_dict, 'input-relative-images', 'Input-relative images?:', default=defaults['input-relative-images'], validator=list_opts)
print('\nShould OpenAccess_EPUB look for images relative to the input file by default?')
user_prompt(config_dict, 'use-input-relative-images', 'Use input-relative images?: (Y/n)', default=defaults['use-input-relative-images'], validator=boolean)
#Image cache details
print('\nWhere should OpenAccess_EPUB place the image cache?')
user_prompt(config_dict, 'image-cache', 'Image cache?:', default=defaults['image-cache'], validator=absolute_path)
print('\nShould OpenAccess_EPUB use the image cache by default? This feature is intended\nfor developers and testers without local access to the image files and will\nconsume extra disk space for storage.')
user_prompt(config_dict, 'use-image-cache', 'Use image cache?: (y/N)', default=defaults['use-image-cache'], validator=boolean)
#Image fetching online details
print('\nShould OpenAccess_EPUB attempt to download the images from the Internet? This\nis not supported for all publishers and not 100% guaranteed to succeed, you may\nneed to download them manually if this does not work.')
user_prompt(config_dict, 'use-image-fetching', 'Attempt image download?: (Y/n)', default=defaults['use-image-fetching'], validator=boolean)
#Output configuration
print('\n -- Configure Output Behavior --\n\nOpenAccess_EPUB produces ePub and log files as output. The following options\nwill determine what is done with these.\n\nWhere should OpenAccess_EPUB place the output ePub and log files? If you supply\na relative path, the output path will be relative to the input; if you supply\nan absolute path, the output will always be placed there. The default behavior\nis to place them in the same directory as the input.')
user_prompt(config_dict, 'default-output', 'Output path?:', default=defaults['default-output'], validator=nonempty)
print('\n -- Configure CSS Behavior --\n\nePub files use CSS for improved styling, and ePub-readers must support a basic\nsubset of CSS functions. OpenAccess_EPUB provides a default CSS file, but a\nmanual one may be supplied, relative to the input. Please define an\nappropriate input-relative path.')
user_prompt(config_dict, 'input-relative-css', 'Input-relative CSS path?:', default=defaults['input-relative-css'], validator=nonempty)
print('\n -- Configure EpubCheck --\n\nEpubCheck is a program written and maintained by the IDPF as a tool to validate\nePub. In order to use it, your system must have Java installed and it is\nrecommended to use the latest version. Downloads of this program are found here:\n\nhttps://github.com/IDPF/epubcheck/releases\n\nOnce you have downloaded the zip file for the program, unzip the archive and\nwrite a path to the .jar file here.')
user_prompt(config_dict, 'epubcheck-jarfile', 'Absolute path to epubcheck?:', default=defaults['epubcheck-jarfile'], validator=absolute_path)
#Write the config.py file
config = config_formatter(CONFIG_TEXT, config_dict)
with open(config_loc, 'wb') as conf_out:
conf_out.write(bytes(config, 'UTF-8')) # depends on [control=['with'], data=['conf_out']]
print('\nDone configuring OpenAccess_EPUB!') |
def get_resources_by_ids(self, resource_ids):
"""Gets a ``ResourceList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the resources
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``Resources`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: resource_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.resource.ResourceList) - the returned ``Resource``
list
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``resource_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_ids
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('resource',
collection='Resource',
runtime=self._runtime)
object_id_list = []
for i in resource_ids:
object_id_list.append(ObjectId(self._get_id(i, 'resource').get_identifier()))
result = collection.find(
dict({'_id': {'$in': object_id_list}},
**self._view_filter()))
result = list(result)
sorted_result = []
for object_id in object_id_list:
for object_map in result:
if object_map['_id'] == object_id:
sorted_result.append(object_map)
break
return objects.ResourceList(sorted_result, runtime=self._runtime, proxy=self._proxy) | def function[get_resources_by_ids, parameter[self, resource_ids]]:
constant[Gets a ``ResourceList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the resources
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``Resources`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: resource_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.resource.ResourceList) - the returned ``Resource``
list
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``resource_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[resource]]]
variable[object_id_list] assign[=] list[[]]
for taget[name[i]] in starred[name[resource_ids]] begin[:]
call[name[object_id_list].append, parameter[call[name[ObjectId], parameter[call[call[name[self]._get_id, parameter[name[i], constant[resource]]].get_identifier, parameter[]]]]]]
variable[result] assign[=] call[name[collection].find, parameter[call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da204344a00>], [<ast.Dict object at 0x7da204344eb0>]]]]]]
variable[result] assign[=] call[name[list], parameter[name[result]]]
variable[sorted_result] assign[=] list[[]]
for taget[name[object_id]] in starred[name[object_id_list]] begin[:]
for taget[name[object_map]] in starred[name[result]] begin[:]
if compare[call[name[object_map]][constant[_id]] equal[==] name[object_id]] begin[:]
call[name[sorted_result].append, parameter[name[object_map]]]
break
return[call[name[objects].ResourceList, parameter[name[sorted_result]]]] | keyword[def] identifier[get_resources_by_ids] ( identifier[self] , identifier[resource_ids] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
identifier[object_id_list] =[]
keyword[for] identifier[i] keyword[in] identifier[resource_ids] :
identifier[object_id_list] . identifier[append] ( identifier[ObjectId] ( identifier[self] . identifier[_get_id] ( identifier[i] , literal[string] ). identifier[get_identifier] ()))
identifier[result] = identifier[collection] . identifier[find] (
identifier[dict] ({ literal[string] :{ literal[string] : identifier[object_id_list] }},
** identifier[self] . identifier[_view_filter] ()))
identifier[result] = identifier[list] ( identifier[result] )
identifier[sorted_result] =[]
keyword[for] identifier[object_id] keyword[in] identifier[object_id_list] :
keyword[for] identifier[object_map] keyword[in] identifier[result] :
keyword[if] identifier[object_map] [ literal[string] ]== identifier[object_id] :
identifier[sorted_result] . identifier[append] ( identifier[object_map] )
keyword[break]
keyword[return] identifier[objects] . identifier[ResourceList] ( identifier[sorted_result] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] ) | def get_resources_by_ids(self, resource_ids):
"""Gets a ``ResourceList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the resources
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``Resources`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: resource_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.resource.ResourceList) - the returned ``Resource``
list
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``resource_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_ids
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('resource', collection='Resource', runtime=self._runtime)
object_id_list = []
for i in resource_ids:
object_id_list.append(ObjectId(self._get_id(i, 'resource').get_identifier())) # depends on [control=['for'], data=['i']]
result = collection.find(dict({'_id': {'$in': object_id_list}}, **self._view_filter()))
result = list(result)
sorted_result = []
for object_id in object_id_list:
for object_map in result:
if object_map['_id'] == object_id:
sorted_result.append(object_map)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['object_map']] # depends on [control=['for'], data=['object_id']]
return objects.ResourceList(sorted_result, runtime=self._runtime, proxy=self._proxy) |
def lmom_ratios(self, *args, nmom=5, **kwds):
"""
Compute the distribution's L-moment ratios, e.g. l1, l2, t3, t4, ..
:param args: Distribution parameters in order of shape(s), loc, scale
:type args: float
:param nmom: Number of moments to calculate
:type nmom: int
:param kwds: Distribution parameters as named arguments. See :attr:`rv_continous.shapes` for names of shape
parameters
:type kwds: float
:returns: List of L-moment ratios
:rtype: list
"""
if nmom > 20:
return ValueError("Parameter nmom too large. Max of 20.")
shapes, loc, scale = self._parse_args(*args, **kwds)
if scale <= 0:
return ValueError("Invalid scale parameter.")
return self._lmom_ratios(*shapes, loc=loc, scale=scale, nmom=nmom) | def function[lmom_ratios, parameter[self]]:
constant[
Compute the distribution's L-moment ratios, e.g. l1, l2, t3, t4, ..
:param args: Distribution parameters in order of shape(s), loc, scale
:type args: float
:param nmom: Number of moments to calculate
:type nmom: int
:param kwds: Distribution parameters as named arguments. See :attr:`rv_continous.shapes` for names of shape
parameters
:type kwds: float
:returns: List of L-moment ratios
:rtype: list
]
if compare[name[nmom] greater[>] constant[20]] begin[:]
return[call[name[ValueError], parameter[constant[Parameter nmom too large. Max of 20.]]]]
<ast.Tuple object at 0x7da18f58de70> assign[=] call[name[self]._parse_args, parameter[<ast.Starred object at 0x7da18f58d330>]]
if compare[name[scale] less_or_equal[<=] constant[0]] begin[:]
return[call[name[ValueError], parameter[constant[Invalid scale parameter.]]]]
return[call[name[self]._lmom_ratios, parameter[<ast.Starred object at 0x7da18f58e1a0>]]] | keyword[def] identifier[lmom_ratios] ( identifier[self] ,* identifier[args] , identifier[nmom] = literal[int] ,** identifier[kwds] ):
literal[string]
keyword[if] identifier[nmom] > literal[int] :
keyword[return] identifier[ValueError] ( literal[string] )
identifier[shapes] , identifier[loc] , identifier[scale] = identifier[self] . identifier[_parse_args] (* identifier[args] ,** identifier[kwds] )
keyword[if] identifier[scale] <= literal[int] :
keyword[return] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[_lmom_ratios] (* identifier[shapes] , identifier[loc] = identifier[loc] , identifier[scale] = identifier[scale] , identifier[nmom] = identifier[nmom] ) | def lmom_ratios(self, *args, nmom=5, **kwds):
"""
Compute the distribution's L-moment ratios, e.g. l1, l2, t3, t4, ..
:param args: Distribution parameters in order of shape(s), loc, scale
:type args: float
:param nmom: Number of moments to calculate
:type nmom: int
:param kwds: Distribution parameters as named arguments. See :attr:`rv_continous.shapes` for names of shape
parameters
:type kwds: float
:returns: List of L-moment ratios
:rtype: list
"""
if nmom > 20:
return ValueError('Parameter nmom too large. Max of 20.') # depends on [control=['if'], data=[]]
(shapes, loc, scale) = self._parse_args(*args, **kwds)
if scale <= 0:
return ValueError('Invalid scale parameter.') # depends on [control=['if'], data=[]]
return self._lmom_ratios(*shapes, loc=loc, scale=scale, nmom=nmom) |
def show_top(minion=None, saltenv='base'):
'''
Returns the compiled top data for pillar for a specific minion. If no
minion is specified, we use the first minion we find.
CLI Example:
.. code-block:: bash
salt-run pillar.show_top
'''
id_, grains, _ = salt.utils.minions.get_minion_data(minion, __opts__)
if not grains and minion == __opts__['id']:
grains = salt.loader.grains(__opts__)
pillar = salt.pillar.Pillar(
__opts__,
grains,
id_,
saltenv)
top, errors = pillar.get_top()
if errors:
__jid_event__.fire_event({'data': errors, 'outputter': 'nested'}, 'progress')
return errors
# needed because pillar compilation clobbers grains etc via lazyLoader
# this resets the masterminion back to known state
__salt__['salt.cmd']('sys.reload_modules')
return top | def function[show_top, parameter[minion, saltenv]]:
constant[
Returns the compiled top data for pillar for a specific minion. If no
minion is specified, we use the first minion we find.
CLI Example:
.. code-block:: bash
salt-run pillar.show_top
]
<ast.Tuple object at 0x7da1b2345720> assign[=] call[name[salt].utils.minions.get_minion_data, parameter[name[minion], name[__opts__]]]
if <ast.BoolOp object at 0x7da1b2345e40> begin[:]
variable[grains] assign[=] call[name[salt].loader.grains, parameter[name[__opts__]]]
variable[pillar] assign[=] call[name[salt].pillar.Pillar, parameter[name[__opts__], name[grains], name[id_], name[saltenv]]]
<ast.Tuple object at 0x7da1b2344460> assign[=] call[name[pillar].get_top, parameter[]]
if name[errors] begin[:]
call[name[__jid_event__].fire_event, parameter[dictionary[[<ast.Constant object at 0x7da1b23467a0>, <ast.Constant object at 0x7da18bcc8e80>], [<ast.Name object at 0x7da18bcc86d0>, <ast.Constant object at 0x7da18bcc9e40>]], constant[progress]]]
return[name[errors]]
call[call[name[__salt__]][constant[salt.cmd]], parameter[constant[sys.reload_modules]]]
return[name[top]] | keyword[def] identifier[show_top] ( identifier[minion] = keyword[None] , identifier[saltenv] = literal[string] ):
literal[string]
identifier[id_] , identifier[grains] , identifier[_] = identifier[salt] . identifier[utils] . identifier[minions] . identifier[get_minion_data] ( identifier[minion] , identifier[__opts__] )
keyword[if] keyword[not] identifier[grains] keyword[and] identifier[minion] == identifier[__opts__] [ literal[string] ]:
identifier[grains] = identifier[salt] . identifier[loader] . identifier[grains] ( identifier[__opts__] )
identifier[pillar] = identifier[salt] . identifier[pillar] . identifier[Pillar] (
identifier[__opts__] ,
identifier[grains] ,
identifier[id_] ,
identifier[saltenv] )
identifier[top] , identifier[errors] = identifier[pillar] . identifier[get_top] ()
keyword[if] identifier[errors] :
identifier[__jid_event__] . identifier[fire_event] ({ literal[string] : identifier[errors] , literal[string] : literal[string] }, literal[string] )
keyword[return] identifier[errors]
identifier[__salt__] [ literal[string] ]( literal[string] )
keyword[return] identifier[top] | def show_top(minion=None, saltenv='base'):
"""
Returns the compiled top data for pillar for a specific minion. If no
minion is specified, we use the first minion we find.
CLI Example:
.. code-block:: bash
salt-run pillar.show_top
"""
(id_, grains, _) = salt.utils.minions.get_minion_data(minion, __opts__)
if not grains and minion == __opts__['id']:
grains = salt.loader.grains(__opts__) # depends on [control=['if'], data=[]]
pillar = salt.pillar.Pillar(__opts__, grains, id_, saltenv)
(top, errors) = pillar.get_top()
if errors:
__jid_event__.fire_event({'data': errors, 'outputter': 'nested'}, 'progress')
return errors # depends on [control=['if'], data=[]]
# needed because pillar compilation clobbers grains etc via lazyLoader
# this resets the masterminion back to known state
__salt__['salt.cmd']('sys.reload_modules')
return top |
def get_authorize_url(self, redirect_uri, **kw):
'''
return the authorization url that the user should be redirected to.
'''
redirect = redirect_uri if redirect_uri else self._redirect_uri
if not redirect:
raise APIError('21305', 'Parameter absent: redirect_uri', 'OAuth2 request')
response_type = kw.pop('response_type', 'code')
return 'https://api.weibo.com/oauth2/authorize?%s' % \
_encode_params(client_id=self._client_id,
response_type=response_type,
redirect_uri=redirect, **kw) | def function[get_authorize_url, parameter[self, redirect_uri]]:
constant[
return the authorization url that the user should be redirected to.
]
variable[redirect] assign[=] <ast.IfExp object at 0x7da20c6a89a0>
if <ast.UnaryOp object at 0x7da20c6a9c00> begin[:]
<ast.Raise object at 0x7da20c6a8b80>
variable[response_type] assign[=] call[name[kw].pop, parameter[constant[response_type], constant[code]]]
return[binary_operation[constant[https://api.weibo.com/oauth2/authorize?%s] <ast.Mod object at 0x7da2590d6920> call[name[_encode_params], parameter[]]]] | keyword[def] identifier[get_authorize_url] ( identifier[self] , identifier[redirect_uri] ,** identifier[kw] ):
literal[string]
identifier[redirect] = identifier[redirect_uri] keyword[if] identifier[redirect_uri] keyword[else] identifier[self] . identifier[_redirect_uri]
keyword[if] keyword[not] identifier[redirect] :
keyword[raise] identifier[APIError] ( literal[string] , literal[string] , literal[string] )
identifier[response_type] = identifier[kw] . identifier[pop] ( literal[string] , literal[string] )
keyword[return] literal[string] % identifier[_encode_params] ( identifier[client_id] = identifier[self] . identifier[_client_id] ,
identifier[response_type] = identifier[response_type] ,
identifier[redirect_uri] = identifier[redirect] ,** identifier[kw] ) | def get_authorize_url(self, redirect_uri, **kw):
"""
return the authorization url that the user should be redirected to.
"""
redirect = redirect_uri if redirect_uri else self._redirect_uri
if not redirect:
raise APIError('21305', 'Parameter absent: redirect_uri', 'OAuth2 request') # depends on [control=['if'], data=[]]
response_type = kw.pop('response_type', 'code')
return 'https://api.weibo.com/oauth2/authorize?%s' % _encode_params(client_id=self._client_id, response_type=response_type, redirect_uri=redirect, **kw) |
def write(self):
""" write all needed state info to filesystem """
dumped = self._fax.codec.dump(self.__state, open(self.state_file, 'w')) | def function[write, parameter[self]]:
constant[ write all needed state info to filesystem ]
variable[dumped] assign[=] call[name[self]._fax.codec.dump, parameter[name[self].__state, call[name[open], parameter[name[self].state_file, constant[w]]]]] | keyword[def] identifier[write] ( identifier[self] ):
literal[string]
identifier[dumped] = identifier[self] . identifier[_fax] . identifier[codec] . identifier[dump] ( identifier[self] . identifier[__state] , identifier[open] ( identifier[self] . identifier[state_file] , literal[string] )) | def write(self):
""" write all needed state info to filesystem """
dumped = self._fax.codec.dump(self.__state, open(self.state_file, 'w')) |
def RegisterMethod(cls, *args, **kwargs):
"""
**RegisterMethod**
RegisterMethod(f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True)
`classmethod` for registering functions as methods of this class.
**Arguments**
* **f** : the particular function being registered as a method
* **library_path** : library from where `f` comes from, unless you pass an empty string, put a period `"."` at the end of the library name.
* `alias=None` : alias for the name/method being registered
* `original_name=None` : name of the original function, used for documentation purposes.
* `doc=None` : complete documentation of the method being registered
* `wrapped=None` : if you are registering a function which wraps around another function, pass this other function through `wrapped` to get better documentation, this is specially useful is you register a bunch of functions in a for loop. Please include an `explanation` to tell how the actual function differs from the wrapped one.
* `explanation=""` : especify any additional information for the documentation of the method being registered, you can use any of the following format tags within this string and they will be replace latter on: `{original_name}`, `{name}`, `{fn_docs}`, `{library_path}`, `{builder_class}`.
* `method_type=identity` : by default its applied but does nothing, you might also want to register functions as `property`, `classmethod`, `staticmethod`
* `explain=True` : decide whether or not to show any kind of explanation, its useful to set it to `False` if you are using a `Register*` decorator and will only use the function as a registered method.
A main feature of `phi` is that it enables you to integrate your library or even an existing library with the DSL. You can achieve three levels of integration
1. Passing your functions to the DSL. This a very general machanism -since you could actually do everything with python lamdas- but in practice functions often receive multiple parameters.
2. Creating partials with the `Then*` method family. Using this you could integrate any function, but it will add a lot of noise if you use heavily on it.
3. Registering functions as methods of a `Builder` derived class. This produces the most readable code and its the approach you should take if you want to create a Phi-based library or a helper class.
While point 3 is the most desirable it has a cost: you need to create your own `phi.builder.Builder`-derived class. This is because SHOULD NOT register functions to existing builders e.g. the `phi.builder.Builder` or [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) provided by phi because that would pollute the `P` object. Instead you should create a custom class that derives from `phi.builder.Builder`, [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) or another custom builder depending on your needs and register your functions to that class.
**Examples**
Say you have a function on a library called `"my_lib"`
def some_fun(obj, arg1, arg2):
# code
You could use it with the dsl like this
from phi import P, Then
P.Pipe(
input,
...
Then(some_fun, arg1, arg2)
...
)
assuming the first parameter `obj` is being piped down. However if you do this very often or you are creating a library, you are better off creating a custom class derived from `Builder` or `PythonBuilder`
from phi import Builder #or PythonBuilder
class MyBuilder(Builder): # or PythonBuilder
pass
and registering your function as a method. The first way you could do this is by creating a wrapper function for `some_fun` and registering it as a method
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
MyBuilder.RegisterMethod(some_fun_wrapper, "my_lib.", wrapped=some_fun)
Here we basically created a shortcut for the original expression `Then(some_fun, arg1, arg2)`. You could also do this using a decorator
@MyBuilder.RegisterMethod("my_lib.", wrapped=some_fun)
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
However, this is such a common task that we've created the method `Register` to avoid you from having to create the wrapper. With it you could register the function `some_fun` directly as a method like this
MyBuilder.Register(some_fun, "my_lib.")
or by using a decorator over the original function definition
@MyBuilder.Register("my_lib.")
def some_fun(obj, arg1, arg2):
# code
Once done you've done any of the previous approaches you can create a custom global object e.g. `M` and use it instead of/along with `P`
M = MyBuilder(lambda x: x)
M.Pipe(
input,
...
M.some_fun(arg1, args)
...
)
**Argument position**
`phi.builder.Builder.Register` internally uses `phi.builder.Builder.Then`, this is only useful if the object being piped is intended to be passed as the first argument of the function being registered, if this is not the case you could use `phi.builder.Builder.Register2`, `phi.builder.Builder.Register3`, ..., `phi.builder.Builder.Register5` or `phi.builder.Builder.RegisterAt` to set an arbitrary position, these functions will internally use `phi.builder.Builder.Then2`, `phi.builder.Builder.Then3`, ..., `phi.builder.Builder.Then5` or `phi.builder.Builder.ThenAt` respectively.
**Wrapping functions**
Sometimes you have an existing function that you would like to modify slightly so it plays nicely with the DSL, what you normally do is create a function that wraps around it and passes the arguments to it in a way that is convenient
import some_lib
@MyBuilder.Register("some_lib.")
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
When you do this -as a side effect- you loose the original documentation, to avoid this you can use the Registers `wrapped` argument along with the `explanation` argument to clarity the situation
import some_lib
some_fun_explanation = "However, it differs in that `n` is automatically subtracted `1`"
@MyBuilder.Register("some_lib.", wrapped=some_lib.some_fun, explanation=some_fun_explanation)
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
Now the documentation for `MyBuilder.some_fun` will be a little bit nicer since it includes the original documentation from `some_lib.some_fun`. This behaviour is specially useful if you are wrapping an entire 3rd party library, you usually automate the process iterating over all the funcitions in a for loop. The `phi.builder.Builder.PatchAt` method lets you register and entire module using a few lines of code, however, something you have to do thing more manually and do the iteration yourself.
**See Also**
* `phi.builder.Builder.PatchAt`
* `phi.builder.Builder.RegisterAt`
"""
unpack_error = True
try:
f, library_path = args
unpack_error = False
cls._RegisterMethod(f, library_path, **kwargs)
except:
if not unpack_error:
raise
def register_decorator(f):
library_path, = args
cls._RegisterMethod(f, library_path, **kwargs)
return f
return register_decorator | def function[RegisterMethod, parameter[cls]]:
constant[
**RegisterMethod**
RegisterMethod(f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True)
`classmethod` for registering functions as methods of this class.
**Arguments**
* **f** : the particular function being registered as a method
* **library_path** : library from where `f` comes from, unless you pass an empty string, put a period `"."` at the end of the library name.
* `alias=None` : alias for the name/method being registered
* `original_name=None` : name of the original function, used for documentation purposes.
* `doc=None` : complete documentation of the method being registered
* `wrapped=None` : if you are registering a function which wraps around another function, pass this other function through `wrapped` to get better documentation, this is specially useful is you register a bunch of functions in a for loop. Please include an `explanation` to tell how the actual function differs from the wrapped one.
* `explanation=""` : especify any additional information for the documentation of the method being registered, you can use any of the following format tags within this string and they will be replace latter on: `{original_name}`, `{name}`, `{fn_docs}`, `{library_path}`, `{builder_class}`.
* `method_type=identity` : by default its applied but does nothing, you might also want to register functions as `property`, `classmethod`, `staticmethod`
* `explain=True` : decide whether or not to show any kind of explanation, its useful to set it to `False` if you are using a `Register*` decorator and will only use the function as a registered method.
A main feature of `phi` is that it enables you to integrate your library or even an existing library with the DSL. You can achieve three levels of integration
1. Passing your functions to the DSL. This a very general machanism -since you could actually do everything with python lamdas- but in practice functions often receive multiple parameters.
2. Creating partials with the `Then*` method family. Using this you could integrate any function, but it will add a lot of noise if you use heavily on it.
3. Registering functions as methods of a `Builder` derived class. This produces the most readable code and its the approach you should take if you want to create a Phi-based library or a helper class.
While point 3 is the most desirable it has a cost: you need to create your own `phi.builder.Builder`-derived class. This is because SHOULD NOT register functions to existing builders e.g. the `phi.builder.Builder` or [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) provided by phi because that would pollute the `P` object. Instead you should create a custom class that derives from `phi.builder.Builder`, [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) or another custom builder depending on your needs and register your functions to that class.
**Examples**
Say you have a function on a library called `"my_lib"`
def some_fun(obj, arg1, arg2):
# code
You could use it with the dsl like this
from phi import P, Then
P.Pipe(
input,
...
Then(some_fun, arg1, arg2)
...
)
assuming the first parameter `obj` is being piped down. However if you do this very often or you are creating a library, you are better off creating a custom class derived from `Builder` or `PythonBuilder`
from phi import Builder #or PythonBuilder
class MyBuilder(Builder): # or PythonBuilder
pass
and registering your function as a method. The first way you could do this is by creating a wrapper function for `some_fun` and registering it as a method
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
MyBuilder.RegisterMethod(some_fun_wrapper, "my_lib.", wrapped=some_fun)
Here we basically created a shortcut for the original expression `Then(some_fun, arg1, arg2)`. You could also do this using a decorator
@MyBuilder.RegisterMethod("my_lib.", wrapped=some_fun)
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
However, this is such a common task that we've created the method `Register` to avoid you from having to create the wrapper. With it you could register the function `some_fun` directly as a method like this
MyBuilder.Register(some_fun, "my_lib.")
or by using a decorator over the original function definition
@MyBuilder.Register("my_lib.")
def some_fun(obj, arg1, arg2):
# code
Once done you've done any of the previous approaches you can create a custom global object e.g. `M` and use it instead of/along with `P`
M = MyBuilder(lambda x: x)
M.Pipe(
input,
...
M.some_fun(arg1, args)
...
)
**Argument position**
`phi.builder.Builder.Register` internally uses `phi.builder.Builder.Then`, this is only useful if the object being piped is intended to be passed as the first argument of the function being registered, if this is not the case you could use `phi.builder.Builder.Register2`, `phi.builder.Builder.Register3`, ..., `phi.builder.Builder.Register5` or `phi.builder.Builder.RegisterAt` to set an arbitrary position, these functions will internally use `phi.builder.Builder.Then2`, `phi.builder.Builder.Then3`, ..., `phi.builder.Builder.Then5` or `phi.builder.Builder.ThenAt` respectively.
**Wrapping functions**
Sometimes you have an existing function that you would like to modify slightly so it plays nicely with the DSL, what you normally do is create a function that wraps around it and passes the arguments to it in a way that is convenient
import some_lib
@MyBuilder.Register("some_lib.")
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
When you do this -as a side effect- you loose the original documentation, to avoid this you can use the Registers `wrapped` argument along with the `explanation` argument to clarity the situation
import some_lib
some_fun_explanation = "However, it differs in that `n` is automatically subtracted `1`"
@MyBuilder.Register("some_lib.", wrapped=some_lib.some_fun, explanation=some_fun_explanation)
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
Now the documentation for `MyBuilder.some_fun` will be a little bit nicer since it includes the original documentation from `some_lib.some_fun`. This behaviour is specially useful if you are wrapping an entire 3rd party library, you usually automate the process iterating over all the funcitions in a for loop. The `phi.builder.Builder.PatchAt` method lets you register and entire module using a few lines of code, however, something you have to do thing more manually and do the iteration yourself.
**See Also**
* `phi.builder.Builder.PatchAt`
* `phi.builder.Builder.RegisterAt`
]
variable[unpack_error] assign[=] constant[True]
<ast.Try object at 0x7da207f01330> | keyword[def] identifier[RegisterMethod] ( identifier[cls] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[unpack_error] = keyword[True]
keyword[try] :
identifier[f] , identifier[library_path] = identifier[args]
identifier[unpack_error] = keyword[False]
identifier[cls] . identifier[_RegisterMethod] ( identifier[f] , identifier[library_path] ,** identifier[kwargs] )
keyword[except] :
keyword[if] keyword[not] identifier[unpack_error] :
keyword[raise]
keyword[def] identifier[register_decorator] ( identifier[f] ):
identifier[library_path] ,= identifier[args]
identifier[cls] . identifier[_RegisterMethod] ( identifier[f] , identifier[library_path] ,** identifier[kwargs] )
keyword[return] identifier[f]
keyword[return] identifier[register_decorator] | def RegisterMethod(cls, *args, **kwargs):
"""
**RegisterMethod**
RegisterMethod(f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True)
`classmethod` for registering functions as methods of this class.
**Arguments**
* **f** : the particular function being registered as a method
* **library_path** : library from where `f` comes from, unless you pass an empty string, put a period `"."` at the end of the library name.
* `alias=None` : alias for the name/method being registered
* `original_name=None` : name of the original function, used for documentation purposes.
* `doc=None` : complete documentation of the method being registered
* `wrapped=None` : if you are registering a function which wraps around another function, pass this other function through `wrapped` to get better documentation, this is specially useful is you register a bunch of functions in a for loop. Please include an `explanation` to tell how the actual function differs from the wrapped one.
* `explanation=""` : especify any additional information for the documentation of the method being registered, you can use any of the following format tags within this string and they will be replace latter on: `{original_name}`, `{name}`, `{fn_docs}`, `{library_path}`, `{builder_class}`.
* `method_type=identity` : by default its applied but does nothing, you might also want to register functions as `property`, `classmethod`, `staticmethod`
* `explain=True` : decide whether or not to show any kind of explanation, its useful to set it to `False` if you are using a `Register*` decorator and will only use the function as a registered method.
A main feature of `phi` is that it enables you to integrate your library or even an existing library with the DSL. You can achieve three levels of integration
1. Passing your functions to the DSL. This a very general machanism -since you could actually do everything with python lamdas- but in practice functions often receive multiple parameters.
2. Creating partials with the `Then*` method family. Using this you could integrate any function, but it will add a lot of noise if you use heavily on it.
3. Registering functions as methods of a `Builder` derived class. This produces the most readable code and its the approach you should take if you want to create a Phi-based library or a helper class.
While point 3 is the most desirable it has a cost: you need to create your own `phi.builder.Builder`-derived class. This is because SHOULD NOT register functions to existing builders e.g. the `phi.builder.Builder` or [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) provided by phi because that would pollute the `P` object. Instead you should create a custom class that derives from `phi.builder.Builder`, [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) or another custom builder depending on your needs and register your functions to that class.
**Examples**
Say you have a function on a library called `"my_lib"`
def some_fun(obj, arg1, arg2):
# code
You could use it with the dsl like this
from phi import P, Then
P.Pipe(
input,
...
Then(some_fun, arg1, arg2)
...
)
assuming the first parameter `obj` is being piped down. However if you do this very often or you are creating a library, you are better off creating a custom class derived from `Builder` or `PythonBuilder`
from phi import Builder #or PythonBuilder
class MyBuilder(Builder): # or PythonBuilder
pass
and registering your function as a method. The first way you could do this is by creating a wrapper function for `some_fun` and registering it as a method
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
MyBuilder.RegisterMethod(some_fun_wrapper, "my_lib.", wrapped=some_fun)
Here we basically created a shortcut for the original expression `Then(some_fun, arg1, arg2)`. You could also do this using a decorator
@MyBuilder.RegisterMethod("my_lib.", wrapped=some_fun)
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
However, this is such a common task that we've created the method `Register` to avoid you from having to create the wrapper. With it you could register the function `some_fun` directly as a method like this
MyBuilder.Register(some_fun, "my_lib.")
or by using a decorator over the original function definition
@MyBuilder.Register("my_lib.")
def some_fun(obj, arg1, arg2):
# code
Once done you've done any of the previous approaches you can create a custom global object e.g. `M` and use it instead of/along with `P`
M = MyBuilder(lambda x: x)
M.Pipe(
input,
...
M.some_fun(arg1, args)
...
)
**Argument position**
`phi.builder.Builder.Register` internally uses `phi.builder.Builder.Then`, this is only useful if the object being piped is intended to be passed as the first argument of the function being registered, if this is not the case you could use `phi.builder.Builder.Register2`, `phi.builder.Builder.Register3`, ..., `phi.builder.Builder.Register5` or `phi.builder.Builder.RegisterAt` to set an arbitrary position, these functions will internally use `phi.builder.Builder.Then2`, `phi.builder.Builder.Then3`, ..., `phi.builder.Builder.Then5` or `phi.builder.Builder.ThenAt` respectively.
**Wrapping functions**
Sometimes you have an existing function that you would like to modify slightly so it plays nicely with the DSL, what you normally do is create a function that wraps around it and passes the arguments to it in a way that is convenient
import some_lib
@MyBuilder.Register("some_lib.")
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
When you do this -as a side effect- you loose the original documentation, to avoid this you can use the Registers `wrapped` argument along with the `explanation` argument to clarity the situation
import some_lib
some_fun_explanation = "However, it differs in that `n` is automatically subtracted `1`"
@MyBuilder.Register("some_lib.", wrapped=some_lib.some_fun, explanation=some_fun_explanation)
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
Now the documentation for `MyBuilder.some_fun` will be a little bit nicer since it includes the original documentation from `some_lib.some_fun`. This behaviour is specially useful if you are wrapping an entire 3rd party library, you usually automate the process iterating over all the funcitions in a for loop. The `phi.builder.Builder.PatchAt` method lets you register and entire module using a few lines of code, however, something you have to do thing more manually and do the iteration yourself.
**See Also**
* `phi.builder.Builder.PatchAt`
* `phi.builder.Builder.RegisterAt`
"""
unpack_error = True
try:
(f, library_path) = args
unpack_error = False
cls._RegisterMethod(f, library_path, **kwargs) # depends on [control=['try'], data=[]]
except:
if not unpack_error:
raise # depends on [control=['if'], data=[]]
def register_decorator(f):
(library_path,) = args
cls._RegisterMethod(f, library_path, **kwargs)
return f
return register_decorator # depends on [control=['except'], data=[]] |
def compute_mean_returns_spread(mean_returns,
upper_quant,
lower_quant,
std_err=None):
"""
Computes the difference between the mean returns of
two quantiles. Optionally, computes the standard error
of this difference.
Parameters
----------
mean_returns : pd.DataFrame
DataFrame of mean period wise returns by quantile.
MultiIndex containing date and quantile.
See mean_return_by_quantile.
upper_quant : int
Quantile of mean return from which we
wish to subtract lower quantile mean return.
lower_quant : int
Quantile of mean return we wish to subtract
from upper quantile mean return.
std_err : pd.DataFrame, optional
Period wise standard error in mean return by quantile.
Takes the same form as mean_returns.
Returns
-------
mean_return_difference : pd.Series
Period wise difference in quantile returns.
joint_std_err : pd.Series
Period wise standard error of the difference in quantile returns.
if std_err is None, this will be None
"""
mean_return_difference = mean_returns.xs(upper_quant,
level='factor_quantile') \
- mean_returns.xs(lower_quant, level='factor_quantile')
if std_err is None:
joint_std_err = None
else:
std1 = std_err.xs(upper_quant, level='factor_quantile')
std2 = std_err.xs(lower_quant, level='factor_quantile')
joint_std_err = np.sqrt(std1**2 + std2**2)
return mean_return_difference, joint_std_err | def function[compute_mean_returns_spread, parameter[mean_returns, upper_quant, lower_quant, std_err]]:
constant[
Computes the difference between the mean returns of
two quantiles. Optionally, computes the standard error
of this difference.
Parameters
----------
mean_returns : pd.DataFrame
DataFrame of mean period wise returns by quantile.
MultiIndex containing date and quantile.
See mean_return_by_quantile.
upper_quant : int
Quantile of mean return from which we
wish to subtract lower quantile mean return.
lower_quant : int
Quantile of mean return we wish to subtract
from upper quantile mean return.
std_err : pd.DataFrame, optional
Period wise standard error in mean return by quantile.
Takes the same form as mean_returns.
Returns
-------
mean_return_difference : pd.Series
Period wise difference in quantile returns.
joint_std_err : pd.Series
Period wise standard error of the difference in quantile returns.
if std_err is None, this will be None
]
variable[mean_return_difference] assign[=] binary_operation[call[name[mean_returns].xs, parameter[name[upper_quant]]] - call[name[mean_returns].xs, parameter[name[lower_quant]]]]
if compare[name[std_err] is constant[None]] begin[:]
variable[joint_std_err] assign[=] constant[None]
return[tuple[[<ast.Name object at 0x7da18f00d750>, <ast.Name object at 0x7da18f00f7f0>]]] | keyword[def] identifier[compute_mean_returns_spread] ( identifier[mean_returns] ,
identifier[upper_quant] ,
identifier[lower_quant] ,
identifier[std_err] = keyword[None] ):
literal[string]
identifier[mean_return_difference] = identifier[mean_returns] . identifier[xs] ( identifier[upper_quant] ,
identifier[level] = literal[string] )- identifier[mean_returns] . identifier[xs] ( identifier[lower_quant] , identifier[level] = literal[string] )
keyword[if] identifier[std_err] keyword[is] keyword[None] :
identifier[joint_std_err] = keyword[None]
keyword[else] :
identifier[std1] = identifier[std_err] . identifier[xs] ( identifier[upper_quant] , identifier[level] = literal[string] )
identifier[std2] = identifier[std_err] . identifier[xs] ( identifier[lower_quant] , identifier[level] = literal[string] )
identifier[joint_std_err] = identifier[np] . identifier[sqrt] ( identifier[std1] ** literal[int] + identifier[std2] ** literal[int] )
keyword[return] identifier[mean_return_difference] , identifier[joint_std_err] | def compute_mean_returns_spread(mean_returns, upper_quant, lower_quant, std_err=None):
"""
Computes the difference between the mean returns of
two quantiles. Optionally, computes the standard error
of this difference.
Parameters
----------
mean_returns : pd.DataFrame
DataFrame of mean period wise returns by quantile.
MultiIndex containing date and quantile.
See mean_return_by_quantile.
upper_quant : int
Quantile of mean return from which we
wish to subtract lower quantile mean return.
lower_quant : int
Quantile of mean return we wish to subtract
from upper quantile mean return.
std_err : pd.DataFrame, optional
Period wise standard error in mean return by quantile.
Takes the same form as mean_returns.
Returns
-------
mean_return_difference : pd.Series
Period wise difference in quantile returns.
joint_std_err : pd.Series
Period wise standard error of the difference in quantile returns.
if std_err is None, this will be None
"""
mean_return_difference = mean_returns.xs(upper_quant, level='factor_quantile') - mean_returns.xs(lower_quant, level='factor_quantile')
if std_err is None:
joint_std_err = None # depends on [control=['if'], data=[]]
else:
std1 = std_err.xs(upper_quant, level='factor_quantile')
std2 = std_err.xs(lower_quant, level='factor_quantile')
joint_std_err = np.sqrt(std1 ** 2 + std2 ** 2)
return (mean_return_difference, joint_std_err) |
def last_metric_eval(multiplexer, session_name, metric_name):
"""Returns the last evaluations of the given metric at the given session.
Args:
multiplexer: The EventMultiplexer instance allowing access to
the exported summary data.
session_name: String. The session name for which to get the metric
evaluations.
metric_name: api_pb2.MetricName proto. The name of the metric to use.
Returns:
A 3-tuples, of the form [wall-time, step, value], denoting
the last evaluation of the metric, where wall-time denotes the wall time
in seconds since UNIX epoch of the time of the evaluation, step denotes
the training step at which the model is evaluated, and value denotes the
(scalar real) value of the metric.
Raises:
KeyError if the given session does not have the metric.
"""
try:
run, tag = run_tag_from_session_and_metric(session_name, metric_name)
tensor_events = multiplexer.Tensors(run=run, tag=tag)
except KeyError as e:
raise KeyError(
'Can\'t find metric %s for session: %s. Underlying error message: %s'
% (metric_name, session_name, e))
last_event = tensor_events[-1]
# TODO(erez): Raise HParamsError if the tensor is not a 0-D real scalar.
return (last_event.wall_time,
last_event.step,
tf.make_ndarray(last_event.tensor_proto).item()) | def function[last_metric_eval, parameter[multiplexer, session_name, metric_name]]:
constant[Returns the last evaluations of the given metric at the given session.
Args:
multiplexer: The EventMultiplexer instance allowing access to
the exported summary data.
session_name: String. The session name for which to get the metric
evaluations.
metric_name: api_pb2.MetricName proto. The name of the metric to use.
Returns:
A 3-tuples, of the form [wall-time, step, value], denoting
the last evaluation of the metric, where wall-time denotes the wall time
in seconds since UNIX epoch of the time of the evaluation, step denotes
the training step at which the model is evaluated, and value denotes the
(scalar real) value of the metric.
Raises:
KeyError if the given session does not have the metric.
]
<ast.Try object at 0x7da20e9b2500>
variable[last_event] assign[=] call[name[tensor_events]][<ast.UnaryOp object at 0x7da20e9b1000>]
return[tuple[[<ast.Attribute object at 0x7da20e9b02e0>, <ast.Attribute object at 0x7da20e9b2590>, <ast.Call object at 0x7da20e9b0be0>]]] | keyword[def] identifier[last_metric_eval] ( identifier[multiplexer] , identifier[session_name] , identifier[metric_name] ):
literal[string]
keyword[try] :
identifier[run] , identifier[tag] = identifier[run_tag_from_session_and_metric] ( identifier[session_name] , identifier[metric_name] )
identifier[tensor_events] = identifier[multiplexer] . identifier[Tensors] ( identifier[run] = identifier[run] , identifier[tag] = identifier[tag] )
keyword[except] identifier[KeyError] keyword[as] identifier[e] :
keyword[raise] identifier[KeyError] (
literal[string]
%( identifier[metric_name] , identifier[session_name] , identifier[e] ))
identifier[last_event] = identifier[tensor_events] [- literal[int] ]
keyword[return] ( identifier[last_event] . identifier[wall_time] ,
identifier[last_event] . identifier[step] ,
identifier[tf] . identifier[make_ndarray] ( identifier[last_event] . identifier[tensor_proto] ). identifier[item] ()) | def last_metric_eval(multiplexer, session_name, metric_name):
"""Returns the last evaluations of the given metric at the given session.
Args:
multiplexer: The EventMultiplexer instance allowing access to
the exported summary data.
session_name: String. The session name for which to get the metric
evaluations.
metric_name: api_pb2.MetricName proto. The name of the metric to use.
Returns:
A 3-tuples, of the form [wall-time, step, value], denoting
the last evaluation of the metric, where wall-time denotes the wall time
in seconds since UNIX epoch of the time of the evaluation, step denotes
the training step at which the model is evaluated, and value denotes the
(scalar real) value of the metric.
Raises:
KeyError if the given session does not have the metric.
"""
try:
(run, tag) = run_tag_from_session_and_metric(session_name, metric_name)
tensor_events = multiplexer.Tensors(run=run, tag=tag) # depends on [control=['try'], data=[]]
except KeyError as e:
raise KeyError("Can't find metric %s for session: %s. Underlying error message: %s" % (metric_name, session_name, e)) # depends on [control=['except'], data=['e']]
last_event = tensor_events[-1]
# TODO(erez): Raise HParamsError if the tensor is not a 0-D real scalar.
return (last_event.wall_time, last_event.step, tf.make_ndarray(last_event.tensor_proto).item()) |
def getrange(self, key, start, end):
"""Returns the bit value at offset in the string value stored at key.
When offset is beyond the string length, the string is assumed to be a
contiguous space with 0 bits. When key does not exist it is assumed to
be an empty string, so offset is always out of range and the value is
also assumed to be a contiguous space with 0 bits.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)`` where ``N`` is the length of
the returned string. The complexity is ultimately determined by the
returned length, but because creating a substring from an existing
string is very cheap, it can be considered ``O(1)`` for small
strings.
:param key: The key to get the bit from
:type key: :class:`str`, :class:`bytes`
:param int start: The start position to evaluate in the string
:param int end: The end position to evaluate in the string
:rtype: bytes|None
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'GETRANGE', key, ascii(start), ascii(end)]) | def function[getrange, parameter[self, key, start, end]]:
constant[Returns the bit value at offset in the string value stored at key.
When offset is beyond the string length, the string is assumed to be a
contiguous space with 0 bits. When key does not exist it is assumed to
be an empty string, so offset is always out of range and the value is
also assumed to be a contiguous space with 0 bits.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)`` where ``N`` is the length of
the returned string. The complexity is ultimately determined by the
returned length, but because creating a substring from an existing
string is very cheap, it can be considered ``O(1)`` for small
strings.
:param key: The key to get the bit from
:type key: :class:`str`, :class:`bytes`
:param int start: The start position to evaluate in the string
:param int end: The end position to evaluate in the string
:rtype: bytes|None
:raises: :exc:`~tredis.exceptions.RedisError`
]
return[call[name[self]._execute, parameter[list[[<ast.Constant object at 0x7da1b0cfe290>, <ast.Name object at 0x7da1b0cff3a0>, <ast.Call object at 0x7da1b0cfd7e0>, <ast.Call object at 0x7da1b0cfc820>]]]]] | keyword[def] identifier[getrange] ( identifier[self] , identifier[key] , identifier[start] , identifier[end] ):
literal[string]
keyword[return] identifier[self] . identifier[_execute] ([ literal[string] , identifier[key] , identifier[ascii] ( identifier[start] ), identifier[ascii] ( identifier[end] )]) | def getrange(self, key, start, end):
"""Returns the bit value at offset in the string value stored at key.
When offset is beyond the string length, the string is assumed to be a
contiguous space with 0 bits. When key does not exist it is assumed to
be an empty string, so offset is always out of range and the value is
also assumed to be a contiguous space with 0 bits.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)`` where ``N`` is the length of
the returned string. The complexity is ultimately determined by the
returned length, but because creating a substring from an existing
string is very cheap, it can be considered ``O(1)`` for small
strings.
:param key: The key to get the bit from
:type key: :class:`str`, :class:`bytes`
:param int start: The start position to evaluate in the string
:param int end: The end position to evaluate in the string
:rtype: bytes|None
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'GETRANGE', key, ascii(start), ascii(end)]) |
def _PrintAnalysisStatusHeader(self, processing_status):
"""Prints the analysis status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Storage file\t\t: {0:s}\n'.format(self._storage_file_path))
self._PrintProcessingTime(processing_status)
if processing_status and processing_status.events_status:
self._PrintEventsStatus(processing_status.events_status)
self._output_writer.Write('\n') | def function[_PrintAnalysisStatusHeader, parameter[self, processing_status]]:
constant[Prints the analysis status header.
Args:
processing_status (ProcessingStatus): processing status.
]
call[name[self]._output_writer.Write, parameter[call[constant[Storage file : {0:s}
].format, parameter[name[self]._storage_file_path]]]]
call[name[self]._PrintProcessingTime, parameter[name[processing_status]]]
if <ast.BoolOp object at 0x7da1b1d38310> begin[:]
call[name[self]._PrintEventsStatus, parameter[name[processing_status].events_status]]
call[name[self]._output_writer.Write, parameter[constant[
]]] | keyword[def] identifier[_PrintAnalysisStatusHeader] ( identifier[self] , identifier[processing_status] ):
literal[string]
identifier[self] . identifier[_output_writer] . identifier[Write] (
literal[string] . identifier[format] ( identifier[self] . identifier[_storage_file_path] ))
identifier[self] . identifier[_PrintProcessingTime] ( identifier[processing_status] )
keyword[if] identifier[processing_status] keyword[and] identifier[processing_status] . identifier[events_status] :
identifier[self] . identifier[_PrintEventsStatus] ( identifier[processing_status] . identifier[events_status] )
identifier[self] . identifier[_output_writer] . identifier[Write] ( literal[string] ) | def _PrintAnalysisStatusHeader(self, processing_status):
"""Prints the analysis status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write('Storage file\t\t: {0:s}\n'.format(self._storage_file_path))
self._PrintProcessingTime(processing_status)
if processing_status and processing_status.events_status:
self._PrintEventsStatus(processing_status.events_status) # depends on [control=['if'], data=[]]
self._output_writer.Write('\n') |
def from_string(string):
"""
Reads a Kpoints object from a KPOINTS string.
Args:
string (str): KPOINTS string.
Returns:
Kpoints object
"""
lines = [line.strip() for line in string.splitlines()]
comment = lines[0]
num_kpts = int(lines[1].split()[0].strip())
style = lines[2].lower()[0]
# Fully automatic KPOINTS
if style == "a":
return Kpoints.automatic(int(lines[3]))
coord_pattern = re.compile(r'^\s*([\d+.\-Ee]+)\s+([\d+.\-Ee]+)\s+'
r'([\d+.\-Ee]+)')
# Automatic gamma and Monk KPOINTS, with optional shift
if style == "g" or style == "m":
kpts = [int(i) for i in lines[3].split()]
kpts_shift = (0, 0, 0)
if len(lines) > 4 and coord_pattern.match(lines[4]):
try:
kpts_shift = [float(i) for i in lines[4].split()]
except ValueError:
pass
return Kpoints.gamma_automatic(kpts, kpts_shift) if style == "g" \
else Kpoints.monkhorst_automatic(kpts, kpts_shift)
# Automatic kpoints with basis
if num_kpts <= 0:
style = Kpoints.supported_modes.Cartesian if style in "ck" \
else Kpoints.supported_modes.Reciprocal
kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]
kpts_shift = [float(i) for i in lines[6].split()]
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, kpts_shift=kpts_shift)
# Line-mode KPOINTS, usually used with band structures
if style == "l":
coord_type = "Cartesian" if lines[3].lower()[0] in "ck" \
else "Reciprocal"
style = Kpoints.supported_modes.Line_mode
kpts = []
labels = []
patt = re.compile(r'([e0-9.\-]+)\s+([e0-9.\-]+)\s+([e0-9.\-]+)'
r'\s*!*\s*(.*)')
for i in range(4, len(lines)):
line = lines[i]
m = patt.match(line)
if m:
kpts.append([float(m.group(1)), float(m.group(2)),
float(m.group(3))])
labels.append(m.group(4).strip())
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, coord_type=coord_type, labels=labels)
# Assume explicit KPOINTS if all else fails.
style = Kpoints.supported_modes.Cartesian if style in "ck" \
else Kpoints.supported_modes.Reciprocal
kpts = []
kpts_weights = []
labels = []
tet_number = 0
tet_weight = 0
tet_connections = None
for i in range(3, 3 + num_kpts):
toks = lines[i].split()
kpts.append([float(j) for j in toks[0:3]])
kpts_weights.append(float(toks[3]))
if len(toks) > 4:
labels.append(toks[4])
else:
labels.append(None)
try:
# Deal with tetrahedron method
if lines[3 + num_kpts].strip().lower()[0] == "t":
toks = lines[4 + num_kpts].split()
tet_number = int(toks[0])
tet_weight = float(toks[1])
tet_connections = []
for i in range(5 + num_kpts, 5 + num_kpts + tet_number):
toks = lines[i].split()
tet_connections.append((int(toks[0]),
[int(toks[j])
for j in range(1, 5)]))
except IndexError:
pass
return Kpoints(comment=comment, num_kpts=num_kpts,
style=Kpoints.supported_modes[str(style)],
kpts=kpts, kpts_weights=kpts_weights,
tet_number=tet_number, tet_weight=tet_weight,
tet_connections=tet_connections, labels=labels) | def function[from_string, parameter[string]]:
constant[
Reads a Kpoints object from a KPOINTS string.
Args:
string (str): KPOINTS string.
Returns:
Kpoints object
]
variable[lines] assign[=] <ast.ListComp object at 0x7da1b2187610>
variable[comment] assign[=] call[name[lines]][constant[0]]
variable[num_kpts] assign[=] call[name[int], parameter[call[call[call[call[name[lines]][constant[1]].split, parameter[]]][constant[0]].strip, parameter[]]]]
variable[style] assign[=] call[call[call[name[lines]][constant[2]].lower, parameter[]]][constant[0]]
if compare[name[style] equal[==] constant[a]] begin[:]
return[call[name[Kpoints].automatic, parameter[call[name[int], parameter[call[name[lines]][constant[3]]]]]]]
variable[coord_pattern] assign[=] call[name[re].compile, parameter[constant[^\s*([\d+.\-Ee]+)\s+([\d+.\-Ee]+)\s+([\d+.\-Ee]+)]]]
if <ast.BoolOp object at 0x7da1b2186b30> begin[:]
variable[kpts] assign[=] <ast.ListComp object at 0x7da1b2186980>
variable[kpts_shift] assign[=] tuple[[<ast.Constant object at 0x7da1b21866b0>, <ast.Constant object at 0x7da1b2186680>, <ast.Constant object at 0x7da1b2186650>]]
if <ast.BoolOp object at 0x7da1b21865f0> begin[:]
<ast.Try object at 0x7da1b21863b0>
return[<ast.IfExp object at 0x7da1b2185ff0>]
if compare[name[num_kpts] less_or_equal[<=] constant[0]] begin[:]
variable[style] assign[=] <ast.IfExp object at 0x7da1b2185c00>
variable[kpts] assign[=] <ast.ListComp object at 0x7da1b21859c0>
variable[kpts_shift] assign[=] <ast.ListComp object at 0x7da1b21855a0>
return[call[name[Kpoints], parameter[]]]
if compare[name[style] equal[==] constant[l]] begin[:]
variable[coord_type] assign[=] <ast.IfExp object at 0x7da1b2184f70>
variable[style] assign[=] name[Kpoints].supported_modes.Line_mode
variable[kpts] assign[=] list[[]]
variable[labels] assign[=] list[[]]
variable[patt] assign[=] call[name[re].compile, parameter[constant[([e0-9.\-]+)\s+([e0-9.\-]+)\s+([e0-9.\-]+)\s*!*\s*(.*)]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[4], call[name[len], parameter[name[lines]]]]]] begin[:]
variable[line] assign[=] call[name[lines]][name[i]]
variable[m] assign[=] call[name[patt].match, parameter[name[line]]]
if name[m] begin[:]
call[name[kpts].append, parameter[list[[<ast.Call object at 0x7da1b2184520>, <ast.Call object at 0x7da1b2184400>, <ast.Call object at 0x7da1b21842e0>]]]]
call[name[labels].append, parameter[call[call[name[m].group, parameter[constant[4]]].strip, parameter[]]]]
return[call[name[Kpoints], parameter[]]]
variable[style] assign[=] <ast.IfExp object at 0x7da1b21afbb0>
variable[kpts] assign[=] list[[]]
variable[kpts_weights] assign[=] list[[]]
variable[labels] assign[=] list[[]]
variable[tet_number] assign[=] constant[0]
variable[tet_weight] assign[=] constant[0]
variable[tet_connections] assign[=] constant[None]
for taget[name[i]] in starred[call[name[range], parameter[constant[3], binary_operation[constant[3] + name[num_kpts]]]]] begin[:]
variable[toks] assign[=] call[call[name[lines]][name[i]].split, parameter[]]
call[name[kpts].append, parameter[<ast.ListComp object at 0x7da18ede4850>]]
call[name[kpts_weights].append, parameter[call[name[float], parameter[call[name[toks]][constant[3]]]]]]
if compare[call[name[len], parameter[name[toks]]] greater[>] constant[4]] begin[:]
call[name[labels].append, parameter[call[name[toks]][constant[4]]]]
<ast.Try object at 0x7da18ede6350>
return[call[name[Kpoints], parameter[]]] | keyword[def] identifier[from_string] ( identifier[string] ):
literal[string]
identifier[lines] =[ identifier[line] . identifier[strip] () keyword[for] identifier[line] keyword[in] identifier[string] . identifier[splitlines] ()]
identifier[comment] = identifier[lines] [ literal[int] ]
identifier[num_kpts] = identifier[int] ( identifier[lines] [ literal[int] ]. identifier[split] ()[ literal[int] ]. identifier[strip] ())
identifier[style] = identifier[lines] [ literal[int] ]. identifier[lower] ()[ literal[int] ]
keyword[if] identifier[style] == literal[string] :
keyword[return] identifier[Kpoints] . identifier[automatic] ( identifier[int] ( identifier[lines] [ literal[int] ]))
identifier[coord_pattern] = identifier[re] . identifier[compile] ( literal[string]
literal[string] )
keyword[if] identifier[style] == literal[string] keyword[or] identifier[style] == literal[string] :
identifier[kpts] =[ identifier[int] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[lines] [ literal[int] ]. identifier[split] ()]
identifier[kpts_shift] =( literal[int] , literal[int] , literal[int] )
keyword[if] identifier[len] ( identifier[lines] )> literal[int] keyword[and] identifier[coord_pattern] . identifier[match] ( identifier[lines] [ literal[int] ]):
keyword[try] :
identifier[kpts_shift] =[ identifier[float] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[lines] [ literal[int] ]. identifier[split] ()]
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[Kpoints] . identifier[gamma_automatic] ( identifier[kpts] , identifier[kpts_shift] ) keyword[if] identifier[style] == literal[string] keyword[else] identifier[Kpoints] . identifier[monkhorst_automatic] ( identifier[kpts] , identifier[kpts_shift] )
keyword[if] identifier[num_kpts] <= literal[int] :
identifier[style] = identifier[Kpoints] . identifier[supported_modes] . identifier[Cartesian] keyword[if] identifier[style] keyword[in] literal[string] keyword[else] identifier[Kpoints] . identifier[supported_modes] . identifier[Reciprocal]
identifier[kpts] =[[ identifier[float] ( identifier[j] ) keyword[for] identifier[j] keyword[in] identifier[lines] [ identifier[i] ]. identifier[split] ()] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )]
identifier[kpts_shift] =[ identifier[float] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[lines] [ literal[int] ]. identifier[split] ()]
keyword[return] identifier[Kpoints] ( identifier[comment] = identifier[comment] , identifier[num_kpts] = identifier[num_kpts] , identifier[style] = identifier[style] ,
identifier[kpts] = identifier[kpts] , identifier[kpts_shift] = identifier[kpts_shift] )
keyword[if] identifier[style] == literal[string] :
identifier[coord_type] = literal[string] keyword[if] identifier[lines] [ literal[int] ]. identifier[lower] ()[ literal[int] ] keyword[in] literal[string] keyword[else] literal[string]
identifier[style] = identifier[Kpoints] . identifier[supported_modes] . identifier[Line_mode]
identifier[kpts] =[]
identifier[labels] =[]
identifier[patt] = identifier[re] . identifier[compile] ( literal[string]
literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[lines] )):
identifier[line] = identifier[lines] [ identifier[i] ]
identifier[m] = identifier[patt] . identifier[match] ( identifier[line] )
keyword[if] identifier[m] :
identifier[kpts] . identifier[append] ([ identifier[float] ( identifier[m] . identifier[group] ( literal[int] )), identifier[float] ( identifier[m] . identifier[group] ( literal[int] )),
identifier[float] ( identifier[m] . identifier[group] ( literal[int] ))])
identifier[labels] . identifier[append] ( identifier[m] . identifier[group] ( literal[int] ). identifier[strip] ())
keyword[return] identifier[Kpoints] ( identifier[comment] = identifier[comment] , identifier[num_kpts] = identifier[num_kpts] , identifier[style] = identifier[style] ,
identifier[kpts] = identifier[kpts] , identifier[coord_type] = identifier[coord_type] , identifier[labels] = identifier[labels] )
identifier[style] = identifier[Kpoints] . identifier[supported_modes] . identifier[Cartesian] keyword[if] identifier[style] keyword[in] literal[string] keyword[else] identifier[Kpoints] . identifier[supported_modes] . identifier[Reciprocal]
identifier[kpts] =[]
identifier[kpts_weights] =[]
identifier[labels] =[]
identifier[tet_number] = literal[int]
identifier[tet_weight] = literal[int]
identifier[tet_connections] = keyword[None]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] + identifier[num_kpts] ):
identifier[toks] = identifier[lines] [ identifier[i] ]. identifier[split] ()
identifier[kpts] . identifier[append] ([ identifier[float] ( identifier[j] ) keyword[for] identifier[j] keyword[in] identifier[toks] [ literal[int] : literal[int] ]])
identifier[kpts_weights] . identifier[append] ( identifier[float] ( identifier[toks] [ literal[int] ]))
keyword[if] identifier[len] ( identifier[toks] )> literal[int] :
identifier[labels] . identifier[append] ( identifier[toks] [ literal[int] ])
keyword[else] :
identifier[labels] . identifier[append] ( keyword[None] )
keyword[try] :
keyword[if] identifier[lines] [ literal[int] + identifier[num_kpts] ]. identifier[strip] (). identifier[lower] ()[ literal[int] ]== literal[string] :
identifier[toks] = identifier[lines] [ literal[int] + identifier[num_kpts] ]. identifier[split] ()
identifier[tet_number] = identifier[int] ( identifier[toks] [ literal[int] ])
identifier[tet_weight] = identifier[float] ( identifier[toks] [ literal[int] ])
identifier[tet_connections] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] + identifier[num_kpts] , literal[int] + identifier[num_kpts] + identifier[tet_number] ):
identifier[toks] = identifier[lines] [ identifier[i] ]. identifier[split] ()
identifier[tet_connections] . identifier[append] (( identifier[int] ( identifier[toks] [ literal[int] ]),
[ identifier[int] ( identifier[toks] [ identifier[j] ])
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , literal[int] )]))
keyword[except] identifier[IndexError] :
keyword[pass]
keyword[return] identifier[Kpoints] ( identifier[comment] = identifier[comment] , identifier[num_kpts] = identifier[num_kpts] ,
identifier[style] = identifier[Kpoints] . identifier[supported_modes] [ identifier[str] ( identifier[style] )],
identifier[kpts] = identifier[kpts] , identifier[kpts_weights] = identifier[kpts_weights] ,
identifier[tet_number] = identifier[tet_number] , identifier[tet_weight] = identifier[tet_weight] ,
identifier[tet_connections] = identifier[tet_connections] , identifier[labels] = identifier[labels] ) | def from_string(string):
"""
Reads a Kpoints object from a KPOINTS string.
Args:
string (str): KPOINTS string.
Returns:
Kpoints object
"""
lines = [line.strip() for line in string.splitlines()]
comment = lines[0]
num_kpts = int(lines[1].split()[0].strip())
style = lines[2].lower()[0]
# Fully automatic KPOINTS
if style == 'a':
return Kpoints.automatic(int(lines[3])) # depends on [control=['if'], data=[]]
coord_pattern = re.compile('^\\s*([\\d+.\\-Ee]+)\\s+([\\d+.\\-Ee]+)\\s+([\\d+.\\-Ee]+)')
# Automatic gamma and Monk KPOINTS, with optional shift
if style == 'g' or style == 'm':
kpts = [int(i) for i in lines[3].split()]
kpts_shift = (0, 0, 0)
if len(lines) > 4 and coord_pattern.match(lines[4]):
try:
kpts_shift = [float(i) for i in lines[4].split()] # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return Kpoints.gamma_automatic(kpts, kpts_shift) if style == 'g' else Kpoints.monkhorst_automatic(kpts, kpts_shift) # depends on [control=['if'], data=[]]
# Automatic kpoints with basis
if num_kpts <= 0:
style = Kpoints.supported_modes.Cartesian if style in 'ck' else Kpoints.supported_modes.Reciprocal
kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]
kpts_shift = [float(i) for i in lines[6].split()]
return Kpoints(comment=comment, num_kpts=num_kpts, style=style, kpts=kpts, kpts_shift=kpts_shift) # depends on [control=['if'], data=['num_kpts']]
# Line-mode KPOINTS, usually used with band structures
if style == 'l':
coord_type = 'Cartesian' if lines[3].lower()[0] in 'ck' else 'Reciprocal'
style = Kpoints.supported_modes.Line_mode
kpts = []
labels = []
patt = re.compile('([e0-9.\\-]+)\\s+([e0-9.\\-]+)\\s+([e0-9.\\-]+)\\s*!*\\s*(.*)')
for i in range(4, len(lines)):
line = lines[i]
m = patt.match(line)
if m:
kpts.append([float(m.group(1)), float(m.group(2)), float(m.group(3))])
labels.append(m.group(4).strip()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return Kpoints(comment=comment, num_kpts=num_kpts, style=style, kpts=kpts, coord_type=coord_type, labels=labels) # depends on [control=['if'], data=['style']]
# Assume explicit KPOINTS if all else fails.
style = Kpoints.supported_modes.Cartesian if style in 'ck' else Kpoints.supported_modes.Reciprocal
kpts = []
kpts_weights = []
labels = []
tet_number = 0
tet_weight = 0
tet_connections = None
for i in range(3, 3 + num_kpts):
toks = lines[i].split()
kpts.append([float(j) for j in toks[0:3]])
kpts_weights.append(float(toks[3]))
if len(toks) > 4:
labels.append(toks[4]) # depends on [control=['if'], data=[]]
else:
labels.append(None) # depends on [control=['for'], data=['i']]
try:
# Deal with tetrahedron method
if lines[3 + num_kpts].strip().lower()[0] == 't':
toks = lines[4 + num_kpts].split()
tet_number = int(toks[0])
tet_weight = float(toks[1])
tet_connections = []
for i in range(5 + num_kpts, 5 + num_kpts + tet_number):
toks = lines[i].split()
tet_connections.append((int(toks[0]), [int(toks[j]) for j in range(1, 5)])) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except IndexError:
pass # depends on [control=['except'], data=[]]
return Kpoints(comment=comment, num_kpts=num_kpts, style=Kpoints.supported_modes[str(style)], kpts=kpts, kpts_weights=kpts_weights, tet_number=tet_number, tet_weight=tet_weight, tet_connections=tet_connections, labels=labels) |
def add_cell(preso, pos, width, height, padding=1, top_margin=4, left_margin=2):
""" Add a text frame to current slide """
available_width = SLIDE_WIDTH
available_width -= left_margin * 2
available_width -= padding * (width - 1)
column_width = available_width / width
avail_height = SLIDE_HEIGHT
avail_height -= top_margin
avail_height -= padding * (height - 1)
column_height = avail_height / height
col_pos = int((pos - 1) % width)
row_pos = int((pos - 1) / width)
w = "{}cm".format(column_width)
h = "{}cm".format(column_height)
x = "{}cm".format(left_margin + (col_pos * column_width + (col_pos) * padding))
y = "{}cm".format(top_margin + (row_pos * column_height + (row_pos) * padding))
attr = {
"presentation:class": "outline",
"presentation:style-name": "Default-outline1",
"svg:width": w,
"svg:height": h,
"svg:x": x,
"svg:y": y,
}
preso.slides[-1].add_text_frame(attr)
preso.slides[-1].grid_w_h_x_y = (w, h, x, y) | def function[add_cell, parameter[preso, pos, width, height, padding, top_margin, left_margin]]:
constant[ Add a text frame to current slide ]
variable[available_width] assign[=] name[SLIDE_WIDTH]
<ast.AugAssign object at 0x7da20c7c8d00>
<ast.AugAssign object at 0x7da20c7ca800>
variable[column_width] assign[=] binary_operation[name[available_width] / name[width]]
variable[avail_height] assign[=] name[SLIDE_HEIGHT]
<ast.AugAssign object at 0x7da2041d9a50>
<ast.AugAssign object at 0x7da2041d94b0>
variable[column_height] assign[=] binary_operation[name[avail_height] / name[height]]
variable[col_pos] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[pos] - constant[1]] <ast.Mod object at 0x7da2590d6920> name[width]]]]
variable[row_pos] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[pos] - constant[1]] / name[width]]]]
variable[w] assign[=] call[constant[{}cm].format, parameter[name[column_width]]]
variable[h] assign[=] call[constant[{}cm].format, parameter[name[column_height]]]
variable[x] assign[=] call[constant[{}cm].format, parameter[binary_operation[name[left_margin] + binary_operation[binary_operation[name[col_pos] * name[column_width]] + binary_operation[name[col_pos] * name[padding]]]]]]
variable[y] assign[=] call[constant[{}cm].format, parameter[binary_operation[name[top_margin] + binary_operation[binary_operation[name[row_pos] * name[column_height]] + binary_operation[name[row_pos] * name[padding]]]]]]
variable[attr] assign[=] dictionary[[<ast.Constant object at 0x7da2041dbc10>, <ast.Constant object at 0x7da2041da8c0>, <ast.Constant object at 0x7da2041da230>, <ast.Constant object at 0x7da2041d85e0>, <ast.Constant object at 0x7da2041d8a30>, <ast.Constant object at 0x7da2041d9e40>], [<ast.Constant object at 0x7da1b0a69f00>, <ast.Constant object at 0x7da1b0a68160>, <ast.Name object at 0x7da1b0a6a410>, <ast.Name object at 0x7da1b0a6a1d0>, <ast.Name object at 0x7da1b0a6a0e0>, <ast.Name object at 0x7da20c794c70>]]
call[call[name[preso].slides][<ast.UnaryOp object at 0x7da20c7cafe0>].add_text_frame, parameter[name[attr]]]
call[name[preso].slides][<ast.UnaryOp object at 0x7da20c7c98a0>].grid_w_h_x_y assign[=] tuple[[<ast.Name object at 0x7da20c7cace0>, <ast.Name object at 0x7da20c7c8430>, <ast.Name object at 0x7da20c7caec0>, <ast.Name object at 0x7da20c7cb280>]] | keyword[def] identifier[add_cell] ( identifier[preso] , identifier[pos] , identifier[width] , identifier[height] , identifier[padding] = literal[int] , identifier[top_margin] = literal[int] , identifier[left_margin] = literal[int] ):
literal[string]
identifier[available_width] = identifier[SLIDE_WIDTH]
identifier[available_width] -= identifier[left_margin] * literal[int]
identifier[available_width] -= identifier[padding] *( identifier[width] - literal[int] )
identifier[column_width] = identifier[available_width] / identifier[width]
identifier[avail_height] = identifier[SLIDE_HEIGHT]
identifier[avail_height] -= identifier[top_margin]
identifier[avail_height] -= identifier[padding] *( identifier[height] - literal[int] )
identifier[column_height] = identifier[avail_height] / identifier[height]
identifier[col_pos] = identifier[int] (( identifier[pos] - literal[int] )% identifier[width] )
identifier[row_pos] = identifier[int] (( identifier[pos] - literal[int] )/ identifier[width] )
identifier[w] = literal[string] . identifier[format] ( identifier[column_width] )
identifier[h] = literal[string] . identifier[format] ( identifier[column_height] )
identifier[x] = literal[string] . identifier[format] ( identifier[left_margin] +( identifier[col_pos] * identifier[column_width] +( identifier[col_pos] )* identifier[padding] ))
identifier[y] = literal[string] . identifier[format] ( identifier[top_margin] +( identifier[row_pos] * identifier[column_height] +( identifier[row_pos] )* identifier[padding] ))
identifier[attr] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[w] ,
literal[string] : identifier[h] ,
literal[string] : identifier[x] ,
literal[string] : identifier[y] ,
}
identifier[preso] . identifier[slides] [- literal[int] ]. identifier[add_text_frame] ( identifier[attr] )
identifier[preso] . identifier[slides] [- literal[int] ]. identifier[grid_w_h_x_y] =( identifier[w] , identifier[h] , identifier[x] , identifier[y] ) | def add_cell(preso, pos, width, height, padding=1, top_margin=4, left_margin=2):
""" Add a text frame to current slide """
available_width = SLIDE_WIDTH
available_width -= left_margin * 2
available_width -= padding * (width - 1)
column_width = available_width / width
avail_height = SLIDE_HEIGHT
avail_height -= top_margin
avail_height -= padding * (height - 1)
column_height = avail_height / height
col_pos = int((pos - 1) % width)
row_pos = int((pos - 1) / width)
w = '{}cm'.format(column_width)
h = '{}cm'.format(column_height)
x = '{}cm'.format(left_margin + (col_pos * column_width + col_pos * padding))
y = '{}cm'.format(top_margin + (row_pos * column_height + row_pos * padding))
attr = {'presentation:class': 'outline', 'presentation:style-name': 'Default-outline1', 'svg:width': w, 'svg:height': h, 'svg:x': x, 'svg:y': y}
preso.slides[-1].add_text_frame(attr)
preso.slides[-1].grid_w_h_x_y = (w, h, x, y) |
def get_course_registered_users(self, course, with_admins=True):
"""
Get all the users registered to a course
:param course: a Course object
:param with_admins: include admins?
:return: a list of usernames that are registered to the course
"""
l = [entry['students'] for entry in list(self._database.aggregations.aggregate([
{"$match": {"courseid": course.get_id()}},
{"$unwind": "$students"},
{"$project": {"_id": 0, "students": 1}}
]))]
if with_admins:
return list(set(l + course.get_staff()))
else:
return l | def function[get_course_registered_users, parameter[self, course, with_admins]]:
constant[
Get all the users registered to a course
:param course: a Course object
:param with_admins: include admins?
:return: a list of usernames that are registered to the course
]
variable[l] assign[=] <ast.ListComp object at 0x7da18c4cc160>
if name[with_admins] begin[:]
return[call[name[list], parameter[call[name[set], parameter[binary_operation[name[l] + call[name[course].get_staff, parameter[]]]]]]]] | keyword[def] identifier[get_course_registered_users] ( identifier[self] , identifier[course] , identifier[with_admins] = keyword[True] ):
literal[string]
identifier[l] =[ identifier[entry] [ literal[string] ] keyword[for] identifier[entry] keyword[in] identifier[list] ( identifier[self] . identifier[_database] . identifier[aggregations] . identifier[aggregate] ([
{ literal[string] :{ literal[string] : identifier[course] . identifier[get_id] ()}},
{ literal[string] : literal[string] },
{ literal[string] :{ literal[string] : literal[int] , literal[string] : literal[int] }}
]))]
keyword[if] identifier[with_admins] :
keyword[return] identifier[list] ( identifier[set] ( identifier[l] + identifier[course] . identifier[get_staff] ()))
keyword[else] :
keyword[return] identifier[l] | def get_course_registered_users(self, course, with_admins=True):
"""
Get all the users registered to a course
:param course: a Course object
:param with_admins: include admins?
:return: a list of usernames that are registered to the course
"""
l = [entry['students'] for entry in list(self._database.aggregations.aggregate([{'$match': {'courseid': course.get_id()}}, {'$unwind': '$students'}, {'$project': {'_id': 0, 'students': 1}}]))]
if with_admins:
return list(set(l + course.get_staff())) # depends on [control=['if'], data=[]]
else:
return l |
def get_trades(self, max_id=None, count=None, instrument=None, ids=None):
""" Get a list of open trades
Parameters
----------
max_id : int
The server will return trades with id less than or equal
to this, in descending order (for pagination)
count : int
Maximum number of open trades to return. Default: 50 Max
value: 500
instrument : str
Retrieve open trades for a specific instrument only
Default: all
ids : list
A list of trades to retrieve. Maximum number of ids: 50.
No other parameter may be specified with the ids
parameter.
See more:
http://developer.oanda.com/rest-live/trades/#getListOpenTrades
"""
url = "{0}/{1}/accounts/{2}/trades".format(
self.domain,
self.API_VERSION,
self.account_id
)
params = {
"maxId": int(max_id) if max_id and max_id > 0 else None,
"count": int(count) if count and count > 0 else None,
"instrument": instrument,
"ids": ','.join(ids) if ids else None
}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False | def function[get_trades, parameter[self, max_id, count, instrument, ids]]:
constant[ Get a list of open trades
Parameters
----------
max_id : int
The server will return trades with id less than or equal
to this, in descending order (for pagination)
count : int
Maximum number of open trades to return. Default: 50 Max
value: 500
instrument : str
Retrieve open trades for a specific instrument only
Default: all
ids : list
A list of trades to retrieve. Maximum number of ids: 50.
No other parameter may be specified with the ids
parameter.
See more:
http://developer.oanda.com/rest-live/trades/#getListOpenTrades
]
variable[url] assign[=] call[constant[{0}/{1}/accounts/{2}/trades].format, parameter[name[self].domain, name[self].API_VERSION, name[self].account_id]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18f00fee0>, <ast.Constant object at 0x7da18f00eef0>, <ast.Constant object at 0x7da18f00d4e0>, <ast.Constant object at 0x7da18f00f790>], [<ast.IfExp object at 0x7da18f00cd30>, <ast.IfExp object at 0x7da18f00ec80>, <ast.Name object at 0x7da2047e8b20>, <ast.IfExp object at 0x7da212db5030>]]
<ast.Try object at 0x7da20e960d00> | keyword[def] identifier[get_trades] ( identifier[self] , identifier[max_id] = keyword[None] , identifier[count] = keyword[None] , identifier[instrument] = keyword[None] , identifier[ids] = keyword[None] ):
literal[string]
identifier[url] = literal[string] . identifier[format] (
identifier[self] . identifier[domain] ,
identifier[self] . identifier[API_VERSION] ,
identifier[self] . identifier[account_id]
)
identifier[params] ={
literal[string] : identifier[int] ( identifier[max_id] ) keyword[if] identifier[max_id] keyword[and] identifier[max_id] > literal[int] keyword[else] keyword[None] ,
literal[string] : identifier[int] ( identifier[count] ) keyword[if] identifier[count] keyword[and] identifier[count] > literal[int] keyword[else] keyword[None] ,
literal[string] : identifier[instrument] ,
literal[string] : literal[string] . identifier[join] ( identifier[ids] ) keyword[if] identifier[ids] keyword[else] keyword[None]
}
keyword[try] :
keyword[return] identifier[self] . identifier[_Client__call] ( identifier[uri] = identifier[url] , identifier[params] = identifier[params] , identifier[method] = literal[string] )
keyword[except] identifier[RequestException] :
keyword[return] keyword[False]
keyword[except] identifier[AssertionError] :
keyword[return] keyword[False] | def get_trades(self, max_id=None, count=None, instrument=None, ids=None):
""" Get a list of open trades
Parameters
----------
max_id : int
The server will return trades with id less than or equal
to this, in descending order (for pagination)
count : int
Maximum number of open trades to return. Default: 50 Max
value: 500
instrument : str
Retrieve open trades for a specific instrument only
Default: all
ids : list
A list of trades to retrieve. Maximum number of ids: 50.
No other parameter may be specified with the ids
parameter.
See more:
http://developer.oanda.com/rest-live/trades/#getListOpenTrades
"""
url = '{0}/{1}/accounts/{2}/trades'.format(self.domain, self.API_VERSION, self.account_id)
params = {'maxId': int(max_id) if max_id and max_id > 0 else None, 'count': int(count) if count and count > 0 else None, 'instrument': instrument, 'ids': ','.join(ids) if ids else None}
try:
return self._Client__call(uri=url, params=params, method='get') # depends on [control=['try'], data=[]]
except RequestException:
return False # depends on [control=['except'], data=[]]
except AssertionError:
return False # depends on [control=['except'], data=[]] |
def _backup_bytes(target, offset, length):
"""
Read bytes from one file and write it to a
backup file with the .bytes_backup suffix
"""
click.echo('Backup {l} byes at position {offset} on file {file} to .bytes_backup'.format(
l=length, offset=offset, file=target))
with open(target, 'r+b') as f:
f.seek(offset)
with open(target + '.bytes_backup', 'w+b') as b:
for _ in xrange(length):
byte = f.read(1)
b.write(byte)
b.flush()
f.flush() | def function[_backup_bytes, parameter[target, offset, length]]:
constant[
Read bytes from one file and write it to a
backup file with the .bytes_backup suffix
]
call[name[click].echo, parameter[call[constant[Backup {l} byes at position {offset} on file {file} to .bytes_backup].format, parameter[]]]]
with call[name[open], parameter[name[target], constant[r+b]]] begin[:]
call[name[f].seek, parameter[name[offset]]]
with call[name[open], parameter[binary_operation[name[target] + constant[.bytes_backup]], constant[w+b]]] begin[:]
for taget[name[_]] in starred[call[name[xrange], parameter[name[length]]]] begin[:]
variable[byte] assign[=] call[name[f].read, parameter[constant[1]]]
call[name[b].write, parameter[name[byte]]]
call[name[b].flush, parameter[]]
call[name[f].flush, parameter[]] | keyword[def] identifier[_backup_bytes] ( identifier[target] , identifier[offset] , identifier[length] ):
literal[string]
identifier[click] . identifier[echo] ( literal[string] . identifier[format] (
identifier[l] = identifier[length] , identifier[offset] = identifier[offset] , identifier[file] = identifier[target] ))
keyword[with] identifier[open] ( identifier[target] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[seek] ( identifier[offset] )
keyword[with] identifier[open] ( identifier[target] + literal[string] , literal[string] ) keyword[as] identifier[b] :
keyword[for] identifier[_] keyword[in] identifier[xrange] ( identifier[length] ):
identifier[byte] = identifier[f] . identifier[read] ( literal[int] )
identifier[b] . identifier[write] ( identifier[byte] )
identifier[b] . identifier[flush] ()
identifier[f] . identifier[flush] () | def _backup_bytes(target, offset, length):
"""
Read bytes from one file and write it to a
backup file with the .bytes_backup suffix
"""
click.echo('Backup {l} byes at position {offset} on file {file} to .bytes_backup'.format(l=length, offset=offset, file=target))
with open(target, 'r+b') as f:
f.seek(offset)
with open(target + '.bytes_backup', 'w+b') as b:
for _ in xrange(length):
byte = f.read(1)
b.write(byte) # depends on [control=['for'], data=[]]
b.flush() # depends on [control=['with'], data=['b']]
f.flush() # depends on [control=['with'], data=['open', 'f']] |
def _read_n_samples(channel_file):
"""Calculate the number of samples based on the file size
Parameters
----------
channel_file : Path
path to single filename with the header
Returns
-------
int
number of blocks (i.e. records, in which the data is cut)
int
number of samples
"""
n_blocks = int((channel_file.stat().st_size - HDR_LENGTH) / BLK_SIZE)
n_samples = n_blocks * BLK_LENGTH
return n_blocks, n_samples | def function[_read_n_samples, parameter[channel_file]]:
constant[Calculate the number of samples based on the file size
Parameters
----------
channel_file : Path
path to single filename with the header
Returns
-------
int
number of blocks (i.e. records, in which the data is cut)
int
number of samples
]
variable[n_blocks] assign[=] call[name[int], parameter[binary_operation[binary_operation[call[name[channel_file].stat, parameter[]].st_size - name[HDR_LENGTH]] / name[BLK_SIZE]]]]
variable[n_samples] assign[=] binary_operation[name[n_blocks] * name[BLK_LENGTH]]
return[tuple[[<ast.Name object at 0x7da18fe92260>, <ast.Name object at 0x7da18fe91c60>]]] | keyword[def] identifier[_read_n_samples] ( identifier[channel_file] ):
literal[string]
identifier[n_blocks] = identifier[int] (( identifier[channel_file] . identifier[stat] (). identifier[st_size] - identifier[HDR_LENGTH] )/ identifier[BLK_SIZE] )
identifier[n_samples] = identifier[n_blocks] * identifier[BLK_LENGTH]
keyword[return] identifier[n_blocks] , identifier[n_samples] | def _read_n_samples(channel_file):
"""Calculate the number of samples based on the file size
Parameters
----------
channel_file : Path
path to single filename with the header
Returns
-------
int
number of blocks (i.e. records, in which the data is cut)
int
number of samples
"""
n_blocks = int((channel_file.stat().st_size - HDR_LENGTH) / BLK_SIZE)
n_samples = n_blocks * BLK_LENGTH
return (n_blocks, n_samples) |
def compute_diff(test_case, test_case_result, output_file, base_file_path):
"""Associate the diff (if exists) with the TestCaseResult.
Return whether or not the outputs match.
"""
with open(File.file_path(base_file_path, test_case.expected.sha1)) as fp:
expected_output = fp.read()
actual_output = ''
if os.path.isfile(output_file):
with open('tc_{0}'.format(test_case.id)) as fp:
actual_output = fp.read()
unit = Diff(expected_output, actual_output)
if not unit.outputs_match():
test_case_result.diff = File.fetch_or_create(pickle.dumps(unit),
base_file_path)
return False
return True | def function[compute_diff, parameter[test_case, test_case_result, output_file, base_file_path]]:
constant[Associate the diff (if exists) with the TestCaseResult.
Return whether or not the outputs match.
]
with call[name[open], parameter[call[name[File].file_path, parameter[name[base_file_path], name[test_case].expected.sha1]]]] begin[:]
variable[expected_output] assign[=] call[name[fp].read, parameter[]]
variable[actual_output] assign[=] constant[]
if call[name[os].path.isfile, parameter[name[output_file]]] begin[:]
with call[name[open], parameter[call[constant[tc_{0}].format, parameter[name[test_case].id]]]] begin[:]
variable[actual_output] assign[=] call[name[fp].read, parameter[]]
variable[unit] assign[=] call[name[Diff], parameter[name[expected_output], name[actual_output]]]
if <ast.UnaryOp object at 0x7da18c4cfe80> begin[:]
name[test_case_result].diff assign[=] call[name[File].fetch_or_create, parameter[call[name[pickle].dumps, parameter[name[unit]]], name[base_file_path]]]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[compute_diff] ( identifier[test_case] , identifier[test_case_result] , identifier[output_file] , identifier[base_file_path] ):
literal[string]
keyword[with] identifier[open] ( identifier[File] . identifier[file_path] ( identifier[base_file_path] , identifier[test_case] . identifier[expected] . identifier[sha1] )) keyword[as] identifier[fp] :
identifier[expected_output] = identifier[fp] . identifier[read] ()
identifier[actual_output] = literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[output_file] ):
keyword[with] identifier[open] ( literal[string] . identifier[format] ( identifier[test_case] . identifier[id] )) keyword[as] identifier[fp] :
identifier[actual_output] = identifier[fp] . identifier[read] ()
identifier[unit] = identifier[Diff] ( identifier[expected_output] , identifier[actual_output] )
keyword[if] keyword[not] identifier[unit] . identifier[outputs_match] ():
identifier[test_case_result] . identifier[diff] = identifier[File] . identifier[fetch_or_create] ( identifier[pickle] . identifier[dumps] ( identifier[unit] ),
identifier[base_file_path] )
keyword[return] keyword[False]
keyword[return] keyword[True] | def compute_diff(test_case, test_case_result, output_file, base_file_path):
"""Associate the diff (if exists) with the TestCaseResult.
Return whether or not the outputs match.
"""
with open(File.file_path(base_file_path, test_case.expected.sha1)) as fp:
expected_output = fp.read() # depends on [control=['with'], data=['fp']]
actual_output = ''
if os.path.isfile(output_file):
with open('tc_{0}'.format(test_case.id)) as fp:
actual_output = fp.read() # depends on [control=['with'], data=['fp']] # depends on [control=['if'], data=[]]
unit = Diff(expected_output, actual_output)
if not unit.outputs_match():
test_case_result.diff = File.fetch_or_create(pickle.dumps(unit), base_file_path)
return False # depends on [control=['if'], data=[]]
return True |
def _return_base_data(self, url, container, container_object=None,
container_headers=None, object_headers=None):
"""Return headers and a parsed url.
:param url:
:param container:
:param container_object:
:param container_headers:
:return: ``tuple``
"""
headers = self.job_args['base_headers']
headers.update({'X-Auth-Token': self.job_args['os_token']})
_container_uri = url.geturl().rstrip('/')
if container:
_container_uri = '%s/%s' % (
_container_uri, cloud_utils.quoter(container)
)
if container_object:
_container_uri = '%s/%s' % (
_container_uri, cloud_utils.quoter(container_object)
)
if object_headers:
headers.update(object_headers)
if container_headers:
headers.update(container_headers)
return headers, urlparse.urlparse(_container_uri) | def function[_return_base_data, parameter[self, url, container, container_object, container_headers, object_headers]]:
constant[Return headers and a parsed url.
:param url:
:param container:
:param container_object:
:param container_headers:
:return: ``tuple``
]
variable[headers] assign[=] call[name[self].job_args][constant[base_headers]]
call[name[headers].update, parameter[dictionary[[<ast.Constant object at 0x7da1b2767550>], [<ast.Subscript object at 0x7da1b2766080>]]]]
variable[_container_uri] assign[=] call[call[name[url].geturl, parameter[]].rstrip, parameter[constant[/]]]
if name[container] begin[:]
variable[_container_uri] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2767310>, <ast.Call object at 0x7da1b27670a0>]]]
if name[container_object] begin[:]
variable[_container_uri] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2767820>, <ast.Call object at 0x7da1b2766e30>]]]
if name[object_headers] begin[:]
call[name[headers].update, parameter[name[object_headers]]]
if name[container_headers] begin[:]
call[name[headers].update, parameter[name[container_headers]]]
return[tuple[[<ast.Name object at 0x7da1b2764460>, <ast.Call object at 0x7da1b2767940>]]] | keyword[def] identifier[_return_base_data] ( identifier[self] , identifier[url] , identifier[container] , identifier[container_object] = keyword[None] ,
identifier[container_headers] = keyword[None] , identifier[object_headers] = keyword[None] ):
literal[string]
identifier[headers] = identifier[self] . identifier[job_args] [ literal[string] ]
identifier[headers] . identifier[update] ({ literal[string] : identifier[self] . identifier[job_args] [ literal[string] ]})
identifier[_container_uri] = identifier[url] . identifier[geturl] (). identifier[rstrip] ( literal[string] )
keyword[if] identifier[container] :
identifier[_container_uri] = literal[string] %(
identifier[_container_uri] , identifier[cloud_utils] . identifier[quoter] ( identifier[container] )
)
keyword[if] identifier[container_object] :
identifier[_container_uri] = literal[string] %(
identifier[_container_uri] , identifier[cloud_utils] . identifier[quoter] ( identifier[container_object] )
)
keyword[if] identifier[object_headers] :
identifier[headers] . identifier[update] ( identifier[object_headers] )
keyword[if] identifier[container_headers] :
identifier[headers] . identifier[update] ( identifier[container_headers] )
keyword[return] identifier[headers] , identifier[urlparse] . identifier[urlparse] ( identifier[_container_uri] ) | def _return_base_data(self, url, container, container_object=None, container_headers=None, object_headers=None):
"""Return headers and a parsed url.
:param url:
:param container:
:param container_object:
:param container_headers:
:return: ``tuple``
"""
headers = self.job_args['base_headers']
headers.update({'X-Auth-Token': self.job_args['os_token']})
_container_uri = url.geturl().rstrip('/')
if container:
_container_uri = '%s/%s' % (_container_uri, cloud_utils.quoter(container)) # depends on [control=['if'], data=[]]
if container_object:
_container_uri = '%s/%s' % (_container_uri, cloud_utils.quoter(container_object)) # depends on [control=['if'], data=[]]
if object_headers:
headers.update(object_headers) # depends on [control=['if'], data=[]]
if container_headers:
headers.update(container_headers) # depends on [control=['if'], data=[]]
return (headers, urlparse.urlparse(_container_uri)) |
def _calculateRegisterAddress( registertype, patternnumber, stepnumber = None):
"""Calculate the register address for pattern related parameters.
Args:
* registertype (string): The type of parameter, for example 'cycles'.
Allowed are the keys from :data:`REGISTER_START`.
* patternnumber (int): The pattern number.
* stepnumber (int): The step number. Use None if it not should affect the calculation.
Returns:
The register address (int).
Raises:
TypeError, ValueError
"""
if stepnumber is None:
stepnumber = 0
# Argument checking
_checkPatternNumber( patternnumber )
_checkStepNumber( stepnumber )
if not registertype in list(REGISTER_START.keys()): # To comply with both Python2 and Python3
raise ValueError('Wrong register type: {0}. Allowed values: {1}'.format(
repr(registertype), repr( list(REGISTER_START.keys()) )))
# Calculate register address
address = REGISTER_START[registertype] + \
patternnumber * REGISTER_OFFSET_PER_PATTERN[registertype] + \
stepnumber * REGISTER_OFFSET_PER_STEP[registertype]
return address | def function[_calculateRegisterAddress, parameter[registertype, patternnumber, stepnumber]]:
constant[Calculate the register address for pattern related parameters.
Args:
* registertype (string): The type of parameter, for example 'cycles'.
Allowed are the keys from :data:`REGISTER_START`.
* patternnumber (int): The pattern number.
* stepnumber (int): The step number. Use None if it not should affect the calculation.
Returns:
The register address (int).
Raises:
TypeError, ValueError
]
if compare[name[stepnumber] is constant[None]] begin[:]
variable[stepnumber] assign[=] constant[0]
call[name[_checkPatternNumber], parameter[name[patternnumber]]]
call[name[_checkStepNumber], parameter[name[stepnumber]]]
if <ast.UnaryOp object at 0x7da20e954ee0> begin[:]
<ast.Raise object at 0x7da20e9540a0>
variable[address] assign[=] binary_operation[binary_operation[call[name[REGISTER_START]][name[registertype]] + binary_operation[name[patternnumber] * call[name[REGISTER_OFFSET_PER_PATTERN]][name[registertype]]]] + binary_operation[name[stepnumber] * call[name[REGISTER_OFFSET_PER_STEP]][name[registertype]]]]
return[name[address]] | keyword[def] identifier[_calculateRegisterAddress] ( identifier[registertype] , identifier[patternnumber] , identifier[stepnumber] = keyword[None] ):
literal[string]
keyword[if] identifier[stepnumber] keyword[is] keyword[None] :
identifier[stepnumber] = literal[int]
identifier[_checkPatternNumber] ( identifier[patternnumber] )
identifier[_checkStepNumber] ( identifier[stepnumber] )
keyword[if] keyword[not] identifier[registertype] keyword[in] identifier[list] ( identifier[REGISTER_START] . identifier[keys] ()):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[repr] ( identifier[registertype] ), identifier[repr] ( identifier[list] ( identifier[REGISTER_START] . identifier[keys] ()))))
identifier[address] = identifier[REGISTER_START] [ identifier[registertype] ]+ identifier[patternnumber] * identifier[REGISTER_OFFSET_PER_PATTERN] [ identifier[registertype] ]+ identifier[stepnumber] * identifier[REGISTER_OFFSET_PER_STEP] [ identifier[registertype] ]
keyword[return] identifier[address] | def _calculateRegisterAddress(registertype, patternnumber, stepnumber=None):
"""Calculate the register address for pattern related parameters.
Args:
* registertype (string): The type of parameter, for example 'cycles'.
Allowed are the keys from :data:`REGISTER_START`.
* patternnumber (int): The pattern number.
* stepnumber (int): The step number. Use None if it not should affect the calculation.
Returns:
The register address (int).
Raises:
TypeError, ValueError
"""
if stepnumber is None:
stepnumber = 0 # depends on [control=['if'], data=['stepnumber']]
# Argument checking
_checkPatternNumber(patternnumber)
_checkStepNumber(stepnumber)
if not registertype in list(REGISTER_START.keys()): # To comply with both Python2 and Python3
raise ValueError('Wrong register type: {0}. Allowed values: {1}'.format(repr(registertype), repr(list(REGISTER_START.keys())))) # depends on [control=['if'], data=[]]
# Calculate register address
address = REGISTER_START[registertype] + patternnumber * REGISTER_OFFSET_PER_PATTERN[registertype] + stepnumber * REGISTER_OFFSET_PER_STEP[registertype]
return address |
def send_request_message(self, request_id, meta, body, _=None):
"""
Receives a request from the client and handles and dispatches in in-thread. `message_expiry_in_seconds` is not
supported. Messages do not expire, as the server handles the request immediately in the same thread before
this method returns. This method blocks until the server has completed handling the request.
"""
self._current_request = (request_id, meta, body)
try:
self.server.handle_next_request()
finally:
self._current_request = None | def function[send_request_message, parameter[self, request_id, meta, body, _]]:
constant[
Receives a request from the client and handles and dispatches in in-thread. `message_expiry_in_seconds` is not
supported. Messages do not expire, as the server handles the request immediately in the same thread before
this method returns. This method blocks until the server has completed handling the request.
]
name[self]._current_request assign[=] tuple[[<ast.Name object at 0x7da20c6e4d00>, <ast.Name object at 0x7da20c6e66b0>, <ast.Name object at 0x7da20c6e5db0>]]
<ast.Try object at 0x7da20c6e4b50> | keyword[def] identifier[send_request_message] ( identifier[self] , identifier[request_id] , identifier[meta] , identifier[body] , identifier[_] = keyword[None] ):
literal[string]
identifier[self] . identifier[_current_request] =( identifier[request_id] , identifier[meta] , identifier[body] )
keyword[try] :
identifier[self] . identifier[server] . identifier[handle_next_request] ()
keyword[finally] :
identifier[self] . identifier[_current_request] = keyword[None] | def send_request_message(self, request_id, meta, body, _=None):
"""
Receives a request from the client and handles and dispatches in in-thread. `message_expiry_in_seconds` is not
supported. Messages do not expire, as the server handles the request immediately in the same thread before
this method returns. This method blocks until the server has completed handling the request.
"""
self._current_request = (request_id, meta, body)
try:
self.server.handle_next_request() # depends on [control=['try'], data=[]]
finally:
self._current_request = None |
def wrap_get_stream(cls, response):
"""Wrap the response from getting a stream into an instance
and return it
:param response: The response from getting a stream
:type response: :class:`requests.Response`
:returns: the new stream instance
:rtype: :class:`list` of :class:`stream`
:raises: None
"""
json = response.json()
s = cls.wrap_json(json['stream'])
return s | def function[wrap_get_stream, parameter[cls, response]]:
constant[Wrap the response from getting a stream into an instance
and return it
:param response: The response from getting a stream
:type response: :class:`requests.Response`
:returns: the new stream instance
:rtype: :class:`list` of :class:`stream`
:raises: None
]
variable[json] assign[=] call[name[response].json, parameter[]]
variable[s] assign[=] call[name[cls].wrap_json, parameter[call[name[json]][constant[stream]]]]
return[name[s]] | keyword[def] identifier[wrap_get_stream] ( identifier[cls] , identifier[response] ):
literal[string]
identifier[json] = identifier[response] . identifier[json] ()
identifier[s] = identifier[cls] . identifier[wrap_json] ( identifier[json] [ literal[string] ])
keyword[return] identifier[s] | def wrap_get_stream(cls, response):
"""Wrap the response from getting a stream into an instance
and return it
:param response: The response from getting a stream
:type response: :class:`requests.Response`
:returns: the new stream instance
:rtype: :class:`list` of :class:`stream`
:raises: None
"""
json = response.json()
s = cls.wrap_json(json['stream'])
return s |
def get_reference_fields(self, exclude_models=None):
"""
Get all Django model fields which reference the Item model.
"""
if exclude_models is None:
exclude_models = []
result = []
for django_model in django.apps.apps.get_models():
if any([issubclass(django_model, m) for m in exclude_models]):
continue
for django_field in django_model._meta.fields:
if isinstance(django_field, models.ForeignKey) and django_field.related.to == Item:
result = [(m, f) for (m, f) in result if not issubclass(django_model, m)]
result.append((django_model, django_field))
return result | def function[get_reference_fields, parameter[self, exclude_models]]:
constant[
Get all Django model fields which reference the Item model.
]
if compare[name[exclude_models] is constant[None]] begin[:]
variable[exclude_models] assign[=] list[[]]
variable[result] assign[=] list[[]]
for taget[name[django_model]] in starred[call[name[django].apps.apps.get_models, parameter[]]] begin[:]
if call[name[any], parameter[<ast.ListComp object at 0x7da204623910>]] begin[:]
continue
for taget[name[django_field]] in starred[name[django_model]._meta.fields] begin[:]
if <ast.BoolOp object at 0x7da204622bf0> begin[:]
variable[result] assign[=] <ast.ListComp object at 0x7da20c76e410>
call[name[result].append, parameter[tuple[[<ast.Name object at 0x7da207f00370>, <ast.Name object at 0x7da207f014b0>]]]]
return[name[result]] | keyword[def] identifier[get_reference_fields] ( identifier[self] , identifier[exclude_models] = keyword[None] ):
literal[string]
keyword[if] identifier[exclude_models] keyword[is] keyword[None] :
identifier[exclude_models] =[]
identifier[result] =[]
keyword[for] identifier[django_model] keyword[in] identifier[django] . identifier[apps] . identifier[apps] . identifier[get_models] ():
keyword[if] identifier[any] ([ identifier[issubclass] ( identifier[django_model] , identifier[m] ) keyword[for] identifier[m] keyword[in] identifier[exclude_models] ]):
keyword[continue]
keyword[for] identifier[django_field] keyword[in] identifier[django_model] . identifier[_meta] . identifier[fields] :
keyword[if] identifier[isinstance] ( identifier[django_field] , identifier[models] . identifier[ForeignKey] ) keyword[and] identifier[django_field] . identifier[related] . identifier[to] == identifier[Item] :
identifier[result] =[( identifier[m] , identifier[f] ) keyword[for] ( identifier[m] , identifier[f] ) keyword[in] identifier[result] keyword[if] keyword[not] identifier[issubclass] ( identifier[django_model] , identifier[m] )]
identifier[result] . identifier[append] (( identifier[django_model] , identifier[django_field] ))
keyword[return] identifier[result] | def get_reference_fields(self, exclude_models=None):
"""
Get all Django model fields which reference the Item model.
"""
if exclude_models is None:
exclude_models = [] # depends on [control=['if'], data=['exclude_models']]
result = []
for django_model in django.apps.apps.get_models():
if any([issubclass(django_model, m) for m in exclude_models]):
continue # depends on [control=['if'], data=[]]
for django_field in django_model._meta.fields:
if isinstance(django_field, models.ForeignKey) and django_field.related.to == Item:
result = [(m, f) for (m, f) in result if not issubclass(django_model, m)]
result.append((django_model, django_field)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['django_field']] # depends on [control=['for'], data=['django_model']]
return result |
def from_xml(cls, xml, service, parent_id):
"""Return a Music Service item generated from xml.
:param xml: Object XML. All items containing text are added to the
content of the item. The class variable ``valid_fields`` of each of
the classes list the valid fields (after translating the camel
case to underscore notation). Required fields are listed in the
class variable by that name (where 'id' has been renamed to
'item_id').
:type xml: :py:class:`xml.etree.ElementTree.Element`
:param service: The music service (plugin) instance that retrieved the
element. This service must contain ``id_to_extended_id`` and
``form_uri`` methods and ``description`` and ``service_id``
attributes.
:type service: Instance of sub-class of
:class:`soco.plugins.SoCoPlugin`
:param parent_id: The parent ID of the item, will either be the
extended ID of another MusicServiceItem or of a search
:type parent_id: str
For a track the XML can e.g. be on the following form:
.. code :: xml
<mediaMetadata xmlns="http://www.sonos.com/Services/1.1">
<id>trackid_141359</id>
<itemType>track</itemType>
<mimeType>audio/aac</mimeType>
<title>Teacher</title>
<trackMetadata>
<artistId>artistid_10597</artistId>
<artist>Jethro Tull</artist>
<composerId>artistid_10597</composerId>
<composer>Jethro Tull</composer>
<albumId>albumid_141358</albumId>
<album>MU - The Best Of Jethro Tull</album>
<albumArtistId>artistid_10597</albumArtistId>
<albumArtist>Jethro Tull</albumArtist>
<duration>229</duration>
<albumArtURI>http://varnish01.music.aspiro.com/sca/
imscale?h=90&w=90&img=/content/music10/prod/wmg/
1383757201/094639008452_20131105025504431/resources/094639008452.
jpg</albumArtURI>
<canPlay>true</canPlay>
<canSkip>true</canSkip>
<canAddToFavorites>true</canAddToFavorites>
</trackMetadata>
</mediaMetadata>
"""
# Add a few extra pieces of information
content = {'description': service.description,
'service_id': service.service_id,
'parent_id': parent_id}
# Extract values from the XML
all_text_elements = tags_with_text(xml)
for item in all_text_elements:
tag = item.tag[len(NAMESPACES['ms']) + 2:] # Strip namespace
tag = camel_to_underscore(tag) # Convert to nice names
if tag not in cls.valid_fields:
message = 'The info tag \'{}\' is not allowed for this item'.\
format(tag)
raise ValueError(message)
content[tag] = item.text
# Convert values for known types
for key, value in content.items():
if key == 'duration':
content[key] = int(value)
if key in ['can_play', 'can_skip', 'can_add_to_favorites',
'can_enumerate']:
content[key] = True if value == 'true' else False
# Rename a single item
content['item_id'] = content.pop('id')
# And get the extended id
content['extended_id'] = service.id_to_extended_id(content['item_id'],
cls)
# Add URI if there is one for the relevant class
uri = service.form_uri(content, cls)
if uri:
content['uri'] = uri
# Check for all required values
for key in cls.required_fields:
if key not in content:
message = 'An XML field that correspond to the key \'{}\' '\
'is required. See the docstring for help.'.format(key)
return cls.from_dict(content) | def function[from_xml, parameter[cls, xml, service, parent_id]]:
constant[Return a Music Service item generated from xml.
:param xml: Object XML. All items containing text are added to the
content of the item. The class variable ``valid_fields`` of each of
the classes list the valid fields (after translating the camel
case to underscore notation). Required fields are listed in the
class variable by that name (where 'id' has been renamed to
'item_id').
:type xml: :py:class:`xml.etree.ElementTree.Element`
:param service: The music service (plugin) instance that retrieved the
element. This service must contain ``id_to_extended_id`` and
``form_uri`` methods and ``description`` and ``service_id``
attributes.
:type service: Instance of sub-class of
:class:`soco.plugins.SoCoPlugin`
:param parent_id: The parent ID of the item, will either be the
extended ID of another MusicServiceItem or of a search
:type parent_id: str
For a track the XML can e.g. be on the following form:
.. code :: xml
<mediaMetadata xmlns="http://www.sonos.com/Services/1.1">
<id>trackid_141359</id>
<itemType>track</itemType>
<mimeType>audio/aac</mimeType>
<title>Teacher</title>
<trackMetadata>
<artistId>artistid_10597</artistId>
<artist>Jethro Tull</artist>
<composerId>artistid_10597</composerId>
<composer>Jethro Tull</composer>
<albumId>albumid_141358</albumId>
<album>MU - The Best Of Jethro Tull</album>
<albumArtistId>artistid_10597</albumArtistId>
<albumArtist>Jethro Tull</albumArtist>
<duration>229</duration>
<albumArtURI>http://varnish01.music.aspiro.com/sca/
imscale?h=90&w=90&img=/content/music10/prod/wmg/
1383757201/094639008452_20131105025504431/resources/094639008452.
jpg</albumArtURI>
<canPlay>true</canPlay>
<canSkip>true</canSkip>
<canAddToFavorites>true</canAddToFavorites>
</trackMetadata>
</mediaMetadata>
]
variable[content] assign[=] dictionary[[<ast.Constant object at 0x7da207f00e80>, <ast.Constant object at 0x7da207f01ed0>, <ast.Constant object at 0x7da207f01960>], [<ast.Attribute object at 0x7da207f027a0>, <ast.Attribute object at 0x7da207f03640>, <ast.Name object at 0x7da207f00b80>]]
variable[all_text_elements] assign[=] call[name[tags_with_text], parameter[name[xml]]]
for taget[name[item]] in starred[name[all_text_elements]] begin[:]
variable[tag] assign[=] call[name[item].tag][<ast.Slice object at 0x7da207f03df0>]
variable[tag] assign[=] call[name[camel_to_underscore], parameter[name[tag]]]
if compare[name[tag] <ast.NotIn object at 0x7da2590d7190> name[cls].valid_fields] begin[:]
variable[message] assign[=] call[constant[The info tag '{}' is not allowed for this item].format, parameter[name[tag]]]
<ast.Raise object at 0x7da207f00a30>
call[name[content]][name[tag]] assign[=] name[item].text
for taget[tuple[[<ast.Name object at 0x7da207f01c60>, <ast.Name object at 0x7da207f03130>]]] in starred[call[name[content].items, parameter[]]] begin[:]
if compare[name[key] equal[==] constant[duration]] begin[:]
call[name[content]][name[key]] assign[=] call[name[int], parameter[name[value]]]
if compare[name[key] in list[[<ast.Constant object at 0x7da207f01c30>, <ast.Constant object at 0x7da207f03fa0>, <ast.Constant object at 0x7da207f01c90>, <ast.Constant object at 0x7da207f03f10>]]] begin[:]
call[name[content]][name[key]] assign[=] <ast.IfExp object at 0x7da207f00160>
call[name[content]][constant[item_id]] assign[=] call[name[content].pop, parameter[constant[id]]]
call[name[content]][constant[extended_id]] assign[=] call[name[service].id_to_extended_id, parameter[call[name[content]][constant[item_id]], name[cls]]]
variable[uri] assign[=] call[name[service].form_uri, parameter[name[content], name[cls]]]
if name[uri] begin[:]
call[name[content]][constant[uri]] assign[=] name[uri]
for taget[name[key]] in starred[name[cls].required_fields] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[content]] begin[:]
variable[message] assign[=] call[constant[An XML field that correspond to the key '{}' is required. See the docstring for help.].format, parameter[name[key]]]
return[call[name[cls].from_dict, parameter[name[content]]]] | keyword[def] identifier[from_xml] ( identifier[cls] , identifier[xml] , identifier[service] , identifier[parent_id] ):
literal[string]
identifier[content] ={ literal[string] : identifier[service] . identifier[description] ,
literal[string] : identifier[service] . identifier[service_id] ,
literal[string] : identifier[parent_id] }
identifier[all_text_elements] = identifier[tags_with_text] ( identifier[xml] )
keyword[for] identifier[item] keyword[in] identifier[all_text_elements] :
identifier[tag] = identifier[item] . identifier[tag] [ identifier[len] ( identifier[NAMESPACES] [ literal[string] ])+ literal[int] :]
identifier[tag] = identifier[camel_to_underscore] ( identifier[tag] )
keyword[if] identifier[tag] keyword[not] keyword[in] identifier[cls] . identifier[valid_fields] :
identifier[message] = literal[string] . identifier[format] ( identifier[tag] )
keyword[raise] identifier[ValueError] ( identifier[message] )
identifier[content] [ identifier[tag] ]= identifier[item] . identifier[text]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[content] . identifier[items] ():
keyword[if] identifier[key] == literal[string] :
identifier[content] [ identifier[key] ]= identifier[int] ( identifier[value] )
keyword[if] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] ,
literal[string] ]:
identifier[content] [ identifier[key] ]= keyword[True] keyword[if] identifier[value] == literal[string] keyword[else] keyword[False]
identifier[content] [ literal[string] ]= identifier[content] . identifier[pop] ( literal[string] )
identifier[content] [ literal[string] ]= identifier[service] . identifier[id_to_extended_id] ( identifier[content] [ literal[string] ],
identifier[cls] )
identifier[uri] = identifier[service] . identifier[form_uri] ( identifier[content] , identifier[cls] )
keyword[if] identifier[uri] :
identifier[content] [ literal[string] ]= identifier[uri]
keyword[for] identifier[key] keyword[in] identifier[cls] . identifier[required_fields] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[content] :
identifier[message] = literal[string] literal[string] . identifier[format] ( identifier[key] )
keyword[return] identifier[cls] . identifier[from_dict] ( identifier[content] ) | def from_xml(cls, xml, service, parent_id):
"""Return a Music Service item generated from xml.
:param xml: Object XML. All items containing text are added to the
content of the item. The class variable ``valid_fields`` of each of
the classes list the valid fields (after translating the camel
case to underscore notation). Required fields are listed in the
class variable by that name (where 'id' has been renamed to
'item_id').
:type xml: :py:class:`xml.etree.ElementTree.Element`
:param service: The music service (plugin) instance that retrieved the
element. This service must contain ``id_to_extended_id`` and
``form_uri`` methods and ``description`` and ``service_id``
attributes.
:type service: Instance of sub-class of
:class:`soco.plugins.SoCoPlugin`
:param parent_id: The parent ID of the item, will either be the
extended ID of another MusicServiceItem or of a search
:type parent_id: str
For a track the XML can e.g. be on the following form:
.. code :: xml
<mediaMetadata xmlns="http://www.sonos.com/Services/1.1">
<id>trackid_141359</id>
<itemType>track</itemType>
<mimeType>audio/aac</mimeType>
<title>Teacher</title>
<trackMetadata>
<artistId>artistid_10597</artistId>
<artist>Jethro Tull</artist>
<composerId>artistid_10597</composerId>
<composer>Jethro Tull</composer>
<albumId>albumid_141358</albumId>
<album>MU - The Best Of Jethro Tull</album>
<albumArtistId>artistid_10597</albumArtistId>
<albumArtist>Jethro Tull</albumArtist>
<duration>229</duration>
<albumArtURI>http://varnish01.music.aspiro.com/sca/
imscale?h=90&w=90&img=/content/music10/prod/wmg/
1383757201/094639008452_20131105025504431/resources/094639008452.
jpg</albumArtURI>
<canPlay>true</canPlay>
<canSkip>true</canSkip>
<canAddToFavorites>true</canAddToFavorites>
</trackMetadata>
</mediaMetadata>
"""
# Add a few extra pieces of information
content = {'description': service.description, 'service_id': service.service_id, 'parent_id': parent_id}
# Extract values from the XML
all_text_elements = tags_with_text(xml)
for item in all_text_elements:
tag = item.tag[len(NAMESPACES['ms']) + 2:] # Strip namespace
tag = camel_to_underscore(tag) # Convert to nice names
if tag not in cls.valid_fields:
message = "The info tag '{}' is not allowed for this item".format(tag)
raise ValueError(message) # depends on [control=['if'], data=['tag']]
content[tag] = item.text # depends on [control=['for'], data=['item']]
# Convert values for known types
for (key, value) in content.items():
if key == 'duration':
content[key] = int(value) # depends on [control=['if'], data=['key']]
if key in ['can_play', 'can_skip', 'can_add_to_favorites', 'can_enumerate']:
content[key] = True if value == 'true' else False # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=[]]
# Rename a single item
content['item_id'] = content.pop('id')
# And get the extended id
content['extended_id'] = service.id_to_extended_id(content['item_id'], cls)
# Add URI if there is one for the relevant class
uri = service.form_uri(content, cls)
if uri:
content['uri'] = uri # depends on [control=['if'], data=[]]
# Check for all required values
for key in cls.required_fields:
if key not in content:
message = "An XML field that correspond to the key '{}' is required. See the docstring for help.".format(key) # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
return cls.from_dict(content) |
def wrap(self, methodName, types, skip=2):
"""
Create a message handler that invokes a wrapper method
with the in-order message fields as parameters, skipping over
the first ``skip`` fields, and parsed according to the ``types`` list.
"""
def handler(fields):
try:
args = [
field if typ is str else
int(field or 0) if typ is int else
float(field or 0) if typ is float else
bool(int(field or 0))
for (typ, field) in zip(types, fields[skip:])]
method(*args)
except Exception:
self.logger.exception(f'Error for {methodName}:')
method = getattr(self.wrapper, methodName, None)
return handler if method else lambda *args: None | def function[wrap, parameter[self, methodName, types, skip]]:
constant[
Create a message handler that invokes a wrapper method
with the in-order message fields as parameters, skipping over
the first ``skip`` fields, and parsed according to the ``types`` list.
]
def function[handler, parameter[fields]]:
<ast.Try object at 0x7da18f812620>
variable[method] assign[=] call[name[getattr], parameter[name[self].wrapper, name[methodName], constant[None]]]
return[<ast.IfExp object at 0x7da20c796140>] | keyword[def] identifier[wrap] ( identifier[self] , identifier[methodName] , identifier[types] , identifier[skip] = literal[int] ):
literal[string]
keyword[def] identifier[handler] ( identifier[fields] ):
keyword[try] :
identifier[args] =[
identifier[field] keyword[if] identifier[typ] keyword[is] identifier[str] keyword[else]
identifier[int] ( identifier[field] keyword[or] literal[int] ) keyword[if] identifier[typ] keyword[is] identifier[int] keyword[else]
identifier[float] ( identifier[field] keyword[or] literal[int] ) keyword[if] identifier[typ] keyword[is] identifier[float] keyword[else]
identifier[bool] ( identifier[int] ( identifier[field] keyword[or] literal[int] ))
keyword[for] ( identifier[typ] , identifier[field] ) keyword[in] identifier[zip] ( identifier[types] , identifier[fields] [ identifier[skip] :])]
identifier[method] (* identifier[args] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[logger] . identifier[exception] ( literal[string] )
identifier[method] = identifier[getattr] ( identifier[self] . identifier[wrapper] , identifier[methodName] , keyword[None] )
keyword[return] identifier[handler] keyword[if] identifier[method] keyword[else] keyword[lambda] * identifier[args] : keyword[None] | def wrap(self, methodName, types, skip=2):
"""
Create a message handler that invokes a wrapper method
with the in-order message fields as parameters, skipping over
the first ``skip`` fields, and parsed according to the ``types`` list.
"""
def handler(fields):
try:
args = [field if typ is str else int(field or 0) if typ is int else float(field or 0) if typ is float else bool(int(field or 0)) for (typ, field) in zip(types, fields[skip:])]
method(*args) # depends on [control=['try'], data=[]]
except Exception:
self.logger.exception(f'Error for {methodName}:') # depends on [control=['except'], data=[]]
method = getattr(self.wrapper, methodName, None)
return handler if method else lambda *args: None |
def tostr(self):
"""Export SVG as a string"""
element = _transform.SVGFigure(self.width, self.height)
element.append(self)
svgstr = element.to_str()
return svgstr | def function[tostr, parameter[self]]:
constant[Export SVG as a string]
variable[element] assign[=] call[name[_transform].SVGFigure, parameter[name[self].width, name[self].height]]
call[name[element].append, parameter[name[self]]]
variable[svgstr] assign[=] call[name[element].to_str, parameter[]]
return[name[svgstr]] | keyword[def] identifier[tostr] ( identifier[self] ):
literal[string]
identifier[element] = identifier[_transform] . identifier[SVGFigure] ( identifier[self] . identifier[width] , identifier[self] . identifier[height] )
identifier[element] . identifier[append] ( identifier[self] )
identifier[svgstr] = identifier[element] . identifier[to_str] ()
keyword[return] identifier[svgstr] | def tostr(self):
"""Export SVG as a string"""
element = _transform.SVGFigure(self.width, self.height)
element.append(self)
svgstr = element.to_str()
return svgstr |
def get_payment_card_by_id(cls, payment_card_id, **kwargs):
"""Find PaymentCard
Return single instance of PaymentCard by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_card_by_id(payment_card_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_card_id: ID of paymentCard to return (required)
:return: PaymentCard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_payment_card_by_id_with_http_info(payment_card_id, **kwargs)
else:
(data) = cls._get_payment_card_by_id_with_http_info(payment_card_id, **kwargs)
return data | def function[get_payment_card_by_id, parameter[cls, payment_card_id]]:
constant[Find PaymentCard
Return single instance of PaymentCard by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_card_by_id(payment_card_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_card_id: ID of paymentCard to return (required)
:return: PaymentCard
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._get_payment_card_by_id_with_http_info, parameter[name[payment_card_id]]]] | keyword[def] identifier[get_payment_card_by_id] ( identifier[cls] , identifier[payment_card_id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_get_payment_card_by_id_with_http_info] ( identifier[payment_card_id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_get_payment_card_by_id_with_http_info] ( identifier[payment_card_id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def get_payment_card_by_id(cls, payment_card_id, **kwargs):
"""Find PaymentCard
Return single instance of PaymentCard by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_card_by_id(payment_card_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_card_id: ID of paymentCard to return (required)
:return: PaymentCard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_payment_card_by_id_with_http_info(payment_card_id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._get_payment_card_by_id_with_http_info(payment_card_id, **kwargs)
return data |
def get_successors(self, node, excluding_fakeret=True, jumpkind=None):
"""
Get successors of a node in the control flow graph.
:param CFGNode node: The node.
:param bool excluding_fakeret: True if you want to exclude all successors that is connected to the node
with a fakeret edge.
:param str or None jumpkind: Only return successors with the specified jumpkind. This argument will be
ignored if set to None.
:return: A list of successors
:rtype: list
"""
if jumpkind is not None:
if excluding_fakeret and jumpkind == 'Ijk_FakeRet':
return [ ]
if not excluding_fakeret and jumpkind is None:
# fast path
if node in self.graph:
return list(self.graph.successors(node))
return [ ]
successors = []
for _, suc, data in self.graph.out_edges([node], data=True):
jk = data['jumpkind']
if jumpkind is not None:
if jumpkind == jk:
successors.append(suc)
elif excluding_fakeret:
if jk != 'Ijk_FakeRet':
successors.append(suc)
else:
successors.append(suc)
return successors | def function[get_successors, parameter[self, node, excluding_fakeret, jumpkind]]:
constant[
Get successors of a node in the control flow graph.
:param CFGNode node: The node.
:param bool excluding_fakeret: True if you want to exclude all successors that is connected to the node
with a fakeret edge.
:param str or None jumpkind: Only return successors with the specified jumpkind. This argument will be
ignored if set to None.
:return: A list of successors
:rtype: list
]
if compare[name[jumpkind] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da20c7cb550> begin[:]
return[list[[]]]
if <ast.BoolOp object at 0x7da20c7caf80> begin[:]
if compare[name[node] in name[self].graph] begin[:]
return[call[name[list], parameter[call[name[self].graph.successors, parameter[name[node]]]]]]
return[list[[]]]
variable[successors] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c7cb460>, <ast.Name object at 0x7da20c7cba30>, <ast.Name object at 0x7da20c7cb040>]]] in starred[call[name[self].graph.out_edges, parameter[list[[<ast.Name object at 0x7da20c7c9b10>]]]]] begin[:]
variable[jk] assign[=] call[name[data]][constant[jumpkind]]
if compare[name[jumpkind] is_not constant[None]] begin[:]
if compare[name[jumpkind] equal[==] name[jk]] begin[:]
call[name[successors].append, parameter[name[suc]]]
return[name[successors]] | keyword[def] identifier[get_successors] ( identifier[self] , identifier[node] , identifier[excluding_fakeret] = keyword[True] , identifier[jumpkind] = keyword[None] ):
literal[string]
keyword[if] identifier[jumpkind] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[excluding_fakeret] keyword[and] identifier[jumpkind] == literal[string] :
keyword[return] []
keyword[if] keyword[not] identifier[excluding_fakeret] keyword[and] identifier[jumpkind] keyword[is] keyword[None] :
keyword[if] identifier[node] keyword[in] identifier[self] . identifier[graph] :
keyword[return] identifier[list] ( identifier[self] . identifier[graph] . identifier[successors] ( identifier[node] ))
keyword[return] []
identifier[successors] =[]
keyword[for] identifier[_] , identifier[suc] , identifier[data] keyword[in] identifier[self] . identifier[graph] . identifier[out_edges] ([ identifier[node] ], identifier[data] = keyword[True] ):
identifier[jk] = identifier[data] [ literal[string] ]
keyword[if] identifier[jumpkind] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[jumpkind] == identifier[jk] :
identifier[successors] . identifier[append] ( identifier[suc] )
keyword[elif] identifier[excluding_fakeret] :
keyword[if] identifier[jk] != literal[string] :
identifier[successors] . identifier[append] ( identifier[suc] )
keyword[else] :
identifier[successors] . identifier[append] ( identifier[suc] )
keyword[return] identifier[successors] | def get_successors(self, node, excluding_fakeret=True, jumpkind=None):
"""
Get successors of a node in the control flow graph.
:param CFGNode node: The node.
:param bool excluding_fakeret: True if you want to exclude all successors that is connected to the node
with a fakeret edge.
:param str or None jumpkind: Only return successors with the specified jumpkind. This argument will be
ignored if set to None.
:return: A list of successors
:rtype: list
"""
if jumpkind is not None:
if excluding_fakeret and jumpkind == 'Ijk_FakeRet':
return [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['jumpkind']]
if not excluding_fakeret and jumpkind is None:
# fast path
if node in self.graph:
return list(self.graph.successors(node)) # depends on [control=['if'], data=['node']]
return [] # depends on [control=['if'], data=[]]
successors = []
for (_, suc, data) in self.graph.out_edges([node], data=True):
jk = data['jumpkind']
if jumpkind is not None:
if jumpkind == jk:
successors.append(suc) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['jumpkind']]
elif excluding_fakeret:
if jk != 'Ijk_FakeRet':
successors.append(suc) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
successors.append(suc) # depends on [control=['for'], data=[]]
return successors |
def get_builds(self, id, **kwargs):
"""
Get all BuildRecords (running and archived) associated with this Build Configuration, returns empty list if no build records are found
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_builds(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildRecordPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_builds_with_http_info(id, **kwargs)
else:
(data) = self.get_builds_with_http_info(id, **kwargs)
return data | def function[get_builds, parameter[self, id]]:
constant[
Get all BuildRecords (running and archived) associated with this Build Configuration, returns empty list if no build records are found
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_builds(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildRecordPage
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[callback]]] begin[:]
return[call[name[self].get_builds_with_http_info, parameter[name[id]]]] | keyword[def] identifier[get_builds] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[get_builds_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[get_builds_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def get_builds(self, id, **kwargs):
"""
Get all BuildRecords (running and archived) associated with this Build Configuration, returns empty list if no build records are found
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_builds(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildRecordPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_builds_with_http_info(id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = self.get_builds_with_http_info(id, **kwargs)
return data |
def rot_from_vectors(start_vec, end_vec):
"""Return the rotation matrix to rotate from one vector to another."""
dot = start_vec.dot(end_vec)
# TODO: check if dot is a valid number
angle = math.acos(dot)
# TODO: check if angle is a valid number
cross = start_vec.cross(end_vec)
cross.normalize
rot_matrix = Matrix44.from_axis_angle(cross, angle)
# TODO: catch exception and return identity for invalid numbers
return rot_matrix | def function[rot_from_vectors, parameter[start_vec, end_vec]]:
constant[Return the rotation matrix to rotate from one vector to another.]
variable[dot] assign[=] call[name[start_vec].dot, parameter[name[end_vec]]]
variable[angle] assign[=] call[name[math].acos, parameter[name[dot]]]
variable[cross] assign[=] call[name[start_vec].cross, parameter[name[end_vec]]]
name[cross].normalize
variable[rot_matrix] assign[=] call[name[Matrix44].from_axis_angle, parameter[name[cross], name[angle]]]
return[name[rot_matrix]] | keyword[def] identifier[rot_from_vectors] ( identifier[start_vec] , identifier[end_vec] ):
literal[string]
identifier[dot] = identifier[start_vec] . identifier[dot] ( identifier[end_vec] )
identifier[angle] = identifier[math] . identifier[acos] ( identifier[dot] )
identifier[cross] = identifier[start_vec] . identifier[cross] ( identifier[end_vec] )
identifier[cross] . identifier[normalize]
identifier[rot_matrix] = identifier[Matrix44] . identifier[from_axis_angle] ( identifier[cross] , identifier[angle] )
keyword[return] identifier[rot_matrix] | def rot_from_vectors(start_vec, end_vec):
"""Return the rotation matrix to rotate from one vector to another."""
dot = start_vec.dot(end_vec)
# TODO: check if dot is a valid number
angle = math.acos(dot)
# TODO: check if angle is a valid number
cross = start_vec.cross(end_vec)
cross.normalize
rot_matrix = Matrix44.from_axis_angle(cross, angle)
# TODO: catch exception and return identity for invalid numbers
return rot_matrix |
def connect(host='localhost', port=21050, database=None, timeout=None,
use_ssl=False, ca_cert=None, auth_mechanism='NOSASL', user=None,
password=None, kerberos_service_name='impala', use_ldap=None,
ldap_user=None, ldap_password=None, use_kerberos=None,
protocol=None, krb_host=None):
"""Get a connection to HiveServer2 (HS2).
These options are largely compatible with the impala-shell command line
arguments. See those docs for more information.
Parameters
----------
host : str
The hostname for HS2. For Impala, this can be any of the `impalad`s.
port : int, optional
The port number for HS2. The Impala default is 21050. The Hive port is
likely different.
database : str, optional
The default database. If `None`, the result is
implementation-dependent.
timeout : int, optional
Connection timeout in seconds. Default is no timeout.
use_ssl : bool, optional
Enable SSL.
ca_cert : str, optional
Local path to the the third-party CA certificate. If SSL is enabled but
the certificate is not specified, the server certificate will not be
validated.
auth_mechanism : {'NOSASL', 'PLAIN', 'GSSAPI', 'LDAP'}
Specify the authentication mechanism. `'NOSASL'` for unsecured Impala.
`'PLAIN'` for unsecured Hive (because Hive requires the SASL
transport). `'GSSAPI'` for Kerberos and `'LDAP'` for Kerberos with
LDAP.
user : str, optional
LDAP user, if applicable.
password : str, optional
LDAP password, if applicable.
kerberos_service_name : str, optional
Authenticate to a particular `impalad` service principal. Uses
`'impala'` by default.
use_ldap : bool, optional
Specify `auth_mechanism='LDAP'` instead.
.. deprecated:: 0.11.0
ldap_user : str, optional
Use `user` parameter instead.
.. deprecated:: 0.11.0
ldap_password : str, optional
Use `password` parameter instead.
.. deprecated:: 0.11.0
use_kerberos : bool, optional
Specify `auth_mechanism='GSSAPI'` instead.
.. deprecated:: 0.11.0
protocol : str, optional
Do not use. HiveServer2 is the only protocol currently supported.
.. deprecated:: 0.11.0
Returns
-------
HiveServer2Connection
A `Connection` object (DB API 2.0-compliant).
"""
# pylint: disable=too-many-locals
if use_kerberos is not None:
warn_deprecate('use_kerberos', 'auth_mechanism="GSSAPI"')
if use_kerberos:
auth_mechanism = 'GSSAPI'
if use_ldap is not None:
warn_deprecate('use_ldap', 'auth_mechanism="LDAP"')
if use_ldap:
auth_mechanism = 'LDAP'
if auth_mechanism:
auth_mechanism = auth_mechanism.upper()
else:
auth_mechanism = 'NOSASL'
if auth_mechanism not in AUTH_MECHANISMS:
raise NotSupportedError(
'Unsupported authentication mechanism: {0}'.format(auth_mechanism))
if ldap_user is not None:
warn_deprecate('ldap_user', 'user')
user = ldap_user
if ldap_password is not None:
warn_deprecate('ldap_password', 'password')
password = ldap_password
if protocol is not None:
if protocol.lower() == 'hiveserver2':
warn_protocol_param()
else:
raise NotSupportedError(
"'{0}' is not a supported protocol; only HiveServer2 is "
"supported".format(protocol))
service = hs2.connect(host=host, port=port,
timeout=timeout, use_ssl=use_ssl,
ca_cert=ca_cert, user=user, password=password,
kerberos_service_name=kerberos_service_name,
auth_mechanism=auth_mechanism, krb_host=krb_host)
return hs2.HiveServer2Connection(service, default_db=database) | def function[connect, parameter[host, port, database, timeout, use_ssl, ca_cert, auth_mechanism, user, password, kerberos_service_name, use_ldap, ldap_user, ldap_password, use_kerberos, protocol, krb_host]]:
constant[Get a connection to HiveServer2 (HS2).
These options are largely compatible with the impala-shell command line
arguments. See those docs for more information.
Parameters
----------
host : str
The hostname for HS2. For Impala, this can be any of the `impalad`s.
port : int, optional
The port number for HS2. The Impala default is 21050. The Hive port is
likely different.
database : str, optional
The default database. If `None`, the result is
implementation-dependent.
timeout : int, optional
Connection timeout in seconds. Default is no timeout.
use_ssl : bool, optional
Enable SSL.
ca_cert : str, optional
Local path to the the third-party CA certificate. If SSL is enabled but
the certificate is not specified, the server certificate will not be
validated.
auth_mechanism : {'NOSASL', 'PLAIN', 'GSSAPI', 'LDAP'}
Specify the authentication mechanism. `'NOSASL'` for unsecured Impala.
`'PLAIN'` for unsecured Hive (because Hive requires the SASL
transport). `'GSSAPI'` for Kerberos and `'LDAP'` for Kerberos with
LDAP.
user : str, optional
LDAP user, if applicable.
password : str, optional
LDAP password, if applicable.
kerberos_service_name : str, optional
Authenticate to a particular `impalad` service principal. Uses
`'impala'` by default.
use_ldap : bool, optional
Specify `auth_mechanism='LDAP'` instead.
.. deprecated:: 0.11.0
ldap_user : str, optional
Use `user` parameter instead.
.. deprecated:: 0.11.0
ldap_password : str, optional
Use `password` parameter instead.
.. deprecated:: 0.11.0
use_kerberos : bool, optional
Specify `auth_mechanism='GSSAPI'` instead.
.. deprecated:: 0.11.0
protocol : str, optional
Do not use. HiveServer2 is the only protocol currently supported.
.. deprecated:: 0.11.0
Returns
-------
HiveServer2Connection
A `Connection` object (DB API 2.0-compliant).
]
if compare[name[use_kerberos] is_not constant[None]] begin[:]
call[name[warn_deprecate], parameter[constant[use_kerberos], constant[auth_mechanism="GSSAPI"]]]
if name[use_kerberos] begin[:]
variable[auth_mechanism] assign[=] constant[GSSAPI]
if compare[name[use_ldap] is_not constant[None]] begin[:]
call[name[warn_deprecate], parameter[constant[use_ldap], constant[auth_mechanism="LDAP"]]]
if name[use_ldap] begin[:]
variable[auth_mechanism] assign[=] constant[LDAP]
if name[auth_mechanism] begin[:]
variable[auth_mechanism] assign[=] call[name[auth_mechanism].upper, parameter[]]
if compare[name[auth_mechanism] <ast.NotIn object at 0x7da2590d7190> name[AUTH_MECHANISMS]] begin[:]
<ast.Raise object at 0x7da1b1dda290>
if compare[name[ldap_user] is_not constant[None]] begin[:]
call[name[warn_deprecate], parameter[constant[ldap_user], constant[user]]]
variable[user] assign[=] name[ldap_user]
if compare[name[ldap_password] is_not constant[None]] begin[:]
call[name[warn_deprecate], parameter[constant[ldap_password], constant[password]]]
variable[password] assign[=] name[ldap_password]
if compare[name[protocol] is_not constant[None]] begin[:]
if compare[call[name[protocol].lower, parameter[]] equal[==] constant[hiveserver2]] begin[:]
call[name[warn_protocol_param], parameter[]]
variable[service] assign[=] call[name[hs2].connect, parameter[]]
return[call[name[hs2].HiveServer2Connection, parameter[name[service]]]] | keyword[def] identifier[connect] ( identifier[host] = literal[string] , identifier[port] = literal[int] , identifier[database] = keyword[None] , identifier[timeout] = keyword[None] ,
identifier[use_ssl] = keyword[False] , identifier[ca_cert] = keyword[None] , identifier[auth_mechanism] = literal[string] , identifier[user] = keyword[None] ,
identifier[password] = keyword[None] , identifier[kerberos_service_name] = literal[string] , identifier[use_ldap] = keyword[None] ,
identifier[ldap_user] = keyword[None] , identifier[ldap_password] = keyword[None] , identifier[use_kerberos] = keyword[None] ,
identifier[protocol] = keyword[None] , identifier[krb_host] = keyword[None] ):
literal[string]
keyword[if] identifier[use_kerberos] keyword[is] keyword[not] keyword[None] :
identifier[warn_deprecate] ( literal[string] , literal[string] )
keyword[if] identifier[use_kerberos] :
identifier[auth_mechanism] = literal[string]
keyword[if] identifier[use_ldap] keyword[is] keyword[not] keyword[None] :
identifier[warn_deprecate] ( literal[string] , literal[string] )
keyword[if] identifier[use_ldap] :
identifier[auth_mechanism] = literal[string]
keyword[if] identifier[auth_mechanism] :
identifier[auth_mechanism] = identifier[auth_mechanism] . identifier[upper] ()
keyword[else] :
identifier[auth_mechanism] = literal[string]
keyword[if] identifier[auth_mechanism] keyword[not] keyword[in] identifier[AUTH_MECHANISMS] :
keyword[raise] identifier[NotSupportedError] (
literal[string] . identifier[format] ( identifier[auth_mechanism] ))
keyword[if] identifier[ldap_user] keyword[is] keyword[not] keyword[None] :
identifier[warn_deprecate] ( literal[string] , literal[string] )
identifier[user] = identifier[ldap_user]
keyword[if] identifier[ldap_password] keyword[is] keyword[not] keyword[None] :
identifier[warn_deprecate] ( literal[string] , literal[string] )
identifier[password] = identifier[ldap_password]
keyword[if] identifier[protocol] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[protocol] . identifier[lower] ()== literal[string] :
identifier[warn_protocol_param] ()
keyword[else] :
keyword[raise] identifier[NotSupportedError] (
literal[string]
literal[string] . identifier[format] ( identifier[protocol] ))
identifier[service] = identifier[hs2] . identifier[connect] ( identifier[host] = identifier[host] , identifier[port] = identifier[port] ,
identifier[timeout] = identifier[timeout] , identifier[use_ssl] = identifier[use_ssl] ,
identifier[ca_cert] = identifier[ca_cert] , identifier[user] = identifier[user] , identifier[password] = identifier[password] ,
identifier[kerberos_service_name] = identifier[kerberos_service_name] ,
identifier[auth_mechanism] = identifier[auth_mechanism] , identifier[krb_host] = identifier[krb_host] )
keyword[return] identifier[hs2] . identifier[HiveServer2Connection] ( identifier[service] , identifier[default_db] = identifier[database] ) | def connect(host='localhost', port=21050, database=None, timeout=None, use_ssl=False, ca_cert=None, auth_mechanism='NOSASL', user=None, password=None, kerberos_service_name='impala', use_ldap=None, ldap_user=None, ldap_password=None, use_kerberos=None, protocol=None, krb_host=None):
"""Get a connection to HiveServer2 (HS2).
These options are largely compatible with the impala-shell command line
arguments. See those docs for more information.
Parameters
----------
host : str
The hostname for HS2. For Impala, this can be any of the `impalad`s.
port : int, optional
The port number for HS2. The Impala default is 21050. The Hive port is
likely different.
database : str, optional
The default database. If `None`, the result is
implementation-dependent.
timeout : int, optional
Connection timeout in seconds. Default is no timeout.
use_ssl : bool, optional
Enable SSL.
ca_cert : str, optional
Local path to the the third-party CA certificate. If SSL is enabled but
the certificate is not specified, the server certificate will not be
validated.
auth_mechanism : {'NOSASL', 'PLAIN', 'GSSAPI', 'LDAP'}
Specify the authentication mechanism. `'NOSASL'` for unsecured Impala.
`'PLAIN'` for unsecured Hive (because Hive requires the SASL
transport). `'GSSAPI'` for Kerberos and `'LDAP'` for Kerberos with
LDAP.
user : str, optional
LDAP user, if applicable.
password : str, optional
LDAP password, if applicable.
kerberos_service_name : str, optional
Authenticate to a particular `impalad` service principal. Uses
`'impala'` by default.
use_ldap : bool, optional
Specify `auth_mechanism='LDAP'` instead.
.. deprecated:: 0.11.0
ldap_user : str, optional
Use `user` parameter instead.
.. deprecated:: 0.11.0
ldap_password : str, optional
Use `password` parameter instead.
.. deprecated:: 0.11.0
use_kerberos : bool, optional
Specify `auth_mechanism='GSSAPI'` instead.
.. deprecated:: 0.11.0
protocol : str, optional
Do not use. HiveServer2 is the only protocol currently supported.
.. deprecated:: 0.11.0
Returns
-------
HiveServer2Connection
A `Connection` object (DB API 2.0-compliant).
"""
# pylint: disable=too-many-locals
if use_kerberos is not None:
warn_deprecate('use_kerberos', 'auth_mechanism="GSSAPI"')
if use_kerberos:
auth_mechanism = 'GSSAPI' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['use_kerberos']]
if use_ldap is not None:
warn_deprecate('use_ldap', 'auth_mechanism="LDAP"')
if use_ldap:
auth_mechanism = 'LDAP' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['use_ldap']]
if auth_mechanism:
auth_mechanism = auth_mechanism.upper() # depends on [control=['if'], data=[]]
else:
auth_mechanism = 'NOSASL'
if auth_mechanism not in AUTH_MECHANISMS:
raise NotSupportedError('Unsupported authentication mechanism: {0}'.format(auth_mechanism)) # depends on [control=['if'], data=['auth_mechanism']]
if ldap_user is not None:
warn_deprecate('ldap_user', 'user')
user = ldap_user # depends on [control=['if'], data=['ldap_user']]
if ldap_password is not None:
warn_deprecate('ldap_password', 'password')
password = ldap_password # depends on [control=['if'], data=['ldap_password']]
if protocol is not None:
if protocol.lower() == 'hiveserver2':
warn_protocol_param() # depends on [control=['if'], data=[]]
else:
raise NotSupportedError("'{0}' is not a supported protocol; only HiveServer2 is supported".format(protocol)) # depends on [control=['if'], data=['protocol']]
service = hs2.connect(host=host, port=port, timeout=timeout, use_ssl=use_ssl, ca_cert=ca_cert, user=user, password=password, kerberos_service_name=kerberos_service_name, auth_mechanism=auth_mechanism, krb_host=krb_host)
return hs2.HiveServer2Connection(service, default_db=database) |
def get_notebook_rel_path(pkg=None):
"""Get the path of a notebook, relative to the current soruce package"""
pkg = pkg or open_source_package()
pkg_path = str(pkg.package_url.fspath)
nb_path = get_notebook_path()
return nb_path.replace(pkg_path, '').strip('/') | def function[get_notebook_rel_path, parameter[pkg]]:
constant[Get the path of a notebook, relative to the current soruce package]
variable[pkg] assign[=] <ast.BoolOp object at 0x7da1b19cd750>
variable[pkg_path] assign[=] call[name[str], parameter[name[pkg].package_url.fspath]]
variable[nb_path] assign[=] call[name[get_notebook_path], parameter[]]
return[call[call[name[nb_path].replace, parameter[name[pkg_path], constant[]]].strip, parameter[constant[/]]]] | keyword[def] identifier[get_notebook_rel_path] ( identifier[pkg] = keyword[None] ):
literal[string]
identifier[pkg] = identifier[pkg] keyword[or] identifier[open_source_package] ()
identifier[pkg_path] = identifier[str] ( identifier[pkg] . identifier[package_url] . identifier[fspath] )
identifier[nb_path] = identifier[get_notebook_path] ()
keyword[return] identifier[nb_path] . identifier[replace] ( identifier[pkg_path] , literal[string] ). identifier[strip] ( literal[string] ) | def get_notebook_rel_path(pkg=None):
"""Get the path of a notebook, relative to the current soruce package"""
pkg = pkg or open_source_package()
pkg_path = str(pkg.package_url.fspath)
nb_path = get_notebook_path()
return nb_path.replace(pkg_path, '').strip('/') |
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
syslog_msg = self.build_msg(record)
self.transport.transmit(syslog_msg)
except Exception:
self.handleError(record) | def function[emit, parameter[self, record]]:
constant[
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
]
<ast.Try object at 0x7da20e957100> | keyword[def] identifier[emit] ( identifier[self] , identifier[record] ):
literal[string]
keyword[try] :
identifier[syslog_msg] = identifier[self] . identifier[build_msg] ( identifier[record] )
identifier[self] . identifier[transport] . identifier[transmit] ( identifier[syslog_msg] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[handleError] ( identifier[record] ) | def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
syslog_msg = self.build_msg(record)
self.transport.transmit(syslog_msg) # depends on [control=['try'], data=[]]
except Exception:
self.handleError(record) # depends on [control=['except'], data=[]] |
def _populate_relationships(self, rec_curr):
"""Convert GO IDs in relationships to GO Term record objects. Populate children."""
for relationship_type, goids in rec_curr.relationship.items():
parent_recs = set([self[goid] for goid in goids])
rec_curr.relationship[relationship_type] = parent_recs
for parent_rec in parent_recs:
if relationship_type not in parent_rec.relationship_rev:
parent_rec.relationship_rev[relationship_type] = set([rec_curr])
else:
parent_rec.relationship_rev[relationship_type].add(rec_curr) | def function[_populate_relationships, parameter[self, rec_curr]]:
constant[Convert GO IDs in relationships to GO Term record objects. Populate children.]
for taget[tuple[[<ast.Name object at 0x7da18bcc99f0>, <ast.Name object at 0x7da18bccbb20>]]] in starred[call[name[rec_curr].relationship.items, parameter[]]] begin[:]
variable[parent_recs] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da18bccbf70>]]
call[name[rec_curr].relationship][name[relationship_type]] assign[=] name[parent_recs]
for taget[name[parent_rec]] in starred[name[parent_recs]] begin[:]
if compare[name[relationship_type] <ast.NotIn object at 0x7da2590d7190> name[parent_rec].relationship_rev] begin[:]
call[name[parent_rec].relationship_rev][name[relationship_type]] assign[=] call[name[set], parameter[list[[<ast.Name object at 0x7da18bcc9720>]]]] | keyword[def] identifier[_populate_relationships] ( identifier[self] , identifier[rec_curr] ):
literal[string]
keyword[for] identifier[relationship_type] , identifier[goids] keyword[in] identifier[rec_curr] . identifier[relationship] . identifier[items] ():
identifier[parent_recs] = identifier[set] ([ identifier[self] [ identifier[goid] ] keyword[for] identifier[goid] keyword[in] identifier[goids] ])
identifier[rec_curr] . identifier[relationship] [ identifier[relationship_type] ]= identifier[parent_recs]
keyword[for] identifier[parent_rec] keyword[in] identifier[parent_recs] :
keyword[if] identifier[relationship_type] keyword[not] keyword[in] identifier[parent_rec] . identifier[relationship_rev] :
identifier[parent_rec] . identifier[relationship_rev] [ identifier[relationship_type] ]= identifier[set] ([ identifier[rec_curr] ])
keyword[else] :
identifier[parent_rec] . identifier[relationship_rev] [ identifier[relationship_type] ]. identifier[add] ( identifier[rec_curr] ) | def _populate_relationships(self, rec_curr):
"""Convert GO IDs in relationships to GO Term record objects. Populate children."""
for (relationship_type, goids) in rec_curr.relationship.items():
parent_recs = set([self[goid] for goid in goids])
rec_curr.relationship[relationship_type] = parent_recs
for parent_rec in parent_recs:
if relationship_type not in parent_rec.relationship_rev:
parent_rec.relationship_rev[relationship_type] = set([rec_curr]) # depends on [control=['if'], data=['relationship_type']]
else:
parent_rec.relationship_rev[relationship_type].add(rec_curr) # depends on [control=['for'], data=['parent_rec']] # depends on [control=['for'], data=[]] |
def mediatype_create(name, mediatype, **kwargs):
'''
Create new mediatype
.. note::
This function accepts all standard mediatype properties: keyword
argument names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/3.0/manual/api/reference/mediatype/object
:param mediatype: media type - 0: email, 1: script, 2: sms, 3: Jabber, 100: Ez Texting
:param exec_path: exec path - Required for script and Ez Texting types, see Zabbix API docs
:param gsm_modem: exec path - Required for sms type, see Zabbix API docs
:param smtp_email: email address from which notifications will be sent, required for email type
:param smtp_helo: SMTP HELO, required for email type
:param smtp_server: SMTP server, required for email type
:param status: whether the media type is enabled - 0: enabled, 1: disabled
:param username: authentication user, required for Jabber and Ez Texting types
:param passwd: authentication password, required for Jabber and Ez Texting types
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: ID of the created mediatype.
CLI Example:
.. code-block:: bash
salt '*' zabbix.mediatype_create 'Email' 0 smtp_email='noreply@example.com'
smtp_server='mailserver.example.com' smtp_helo='zabbix.example.com'
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'mediatype.create'
params = {"description": name}
params['type'] = mediatype
params = _params_extend(params, _ignore_name=True, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['mediatypeid']
else:
raise KeyError
except KeyError:
return ret | def function[mediatype_create, parameter[name, mediatype]]:
constant[
Create new mediatype
.. note::
This function accepts all standard mediatype properties: keyword
argument names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/3.0/manual/api/reference/mediatype/object
:param mediatype: media type - 0: email, 1: script, 2: sms, 3: Jabber, 100: Ez Texting
:param exec_path: exec path - Required for script and Ez Texting types, see Zabbix API docs
:param gsm_modem: exec path - Required for sms type, see Zabbix API docs
:param smtp_email: email address from which notifications will be sent, required for email type
:param smtp_helo: SMTP HELO, required for email type
:param smtp_server: SMTP server, required for email type
:param status: whether the media type is enabled - 0: enabled, 1: disabled
:param username: authentication user, required for Jabber and Ez Texting types
:param passwd: authentication password, required for Jabber and Ez Texting types
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: ID of the created mediatype.
CLI Example:
.. code-block:: bash
salt '*' zabbix.mediatype_create 'Email' 0 smtp_email='noreply@example.com'
smtp_server='mailserver.example.com' smtp_helo='zabbix.example.com'
]
variable[conn_args] assign[=] call[name[_login], parameter[]]
variable[ret] assign[=] dictionary[[], []]
<ast.Try object at 0x7da204621630> | keyword[def] identifier[mediatype_create] ( identifier[name] , identifier[mediatype] ,** identifier[kwargs] ):
literal[string]
identifier[conn_args] = identifier[_login] (** identifier[kwargs] )
identifier[ret] ={}
keyword[try] :
keyword[if] identifier[conn_args] :
identifier[method] = literal[string]
identifier[params] ={ literal[string] : identifier[name] }
identifier[params] [ literal[string] ]= identifier[mediatype]
identifier[params] = identifier[_params_extend] ( identifier[params] , identifier[_ignore_name] = keyword[True] ,** identifier[kwargs] )
identifier[ret] = identifier[_query] ( identifier[method] , identifier[params] , identifier[conn_args] [ literal[string] ], identifier[conn_args] [ literal[string] ])
keyword[return] identifier[ret] [ literal[string] ][ literal[string] ]
keyword[else] :
keyword[raise] identifier[KeyError]
keyword[except] identifier[KeyError] :
keyword[return] identifier[ret] | def mediatype_create(name, mediatype, **kwargs):
"""
Create new mediatype
.. note::
This function accepts all standard mediatype properties: keyword
argument names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/3.0/manual/api/reference/mediatype/object
:param mediatype: media type - 0: email, 1: script, 2: sms, 3: Jabber, 100: Ez Texting
:param exec_path: exec path - Required for script and Ez Texting types, see Zabbix API docs
:param gsm_modem: exec path - Required for sms type, see Zabbix API docs
:param smtp_email: email address from which notifications will be sent, required for email type
:param smtp_helo: SMTP HELO, required for email type
:param smtp_server: SMTP server, required for email type
:param status: whether the media type is enabled - 0: enabled, 1: disabled
:param username: authentication user, required for Jabber and Ez Texting types
:param passwd: authentication password, required for Jabber and Ez Texting types
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: ID of the created mediatype.
CLI Example:
.. code-block:: bash
salt '*' zabbix.mediatype_create 'Email' 0 smtp_email='noreply@example.com'
smtp_server='mailserver.example.com' smtp_helo='zabbix.example.com'
"""
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'mediatype.create'
params = {'description': name}
params['type'] = mediatype
params = _params_extend(params, _ignore_name=True, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['mediatypeid'] # depends on [control=['if'], data=[]]
else:
raise KeyError # depends on [control=['try'], data=[]]
except KeyError:
return ret # depends on [control=['except'], data=[]] |
def received(self, src, body):
""" Simulate an incoming message
:type src: str
:param src: Message source
:type boby: str | unicode
:param body: Message body
:rtype: IncomingMessage
"""
# Create the message
self._msgid += 1
message = IncomingMessage(src, body, self._msgid)
# Log traffic
self._traffic.append(message)
# Handle it
self._receive_message(message)
# Finish
return message | def function[received, parameter[self, src, body]]:
constant[ Simulate an incoming message
:type src: str
:param src: Message source
:type boby: str | unicode
:param body: Message body
:rtype: IncomingMessage
]
<ast.AugAssign object at 0x7da204567490>
variable[message] assign[=] call[name[IncomingMessage], parameter[name[src], name[body], name[self]._msgid]]
call[name[self]._traffic.append, parameter[name[message]]]
call[name[self]._receive_message, parameter[name[message]]]
return[name[message]] | keyword[def] identifier[received] ( identifier[self] , identifier[src] , identifier[body] ):
literal[string]
identifier[self] . identifier[_msgid] += literal[int]
identifier[message] = identifier[IncomingMessage] ( identifier[src] , identifier[body] , identifier[self] . identifier[_msgid] )
identifier[self] . identifier[_traffic] . identifier[append] ( identifier[message] )
identifier[self] . identifier[_receive_message] ( identifier[message] )
keyword[return] identifier[message] | def received(self, src, body):
""" Simulate an incoming message
:type src: str
:param src: Message source
:type boby: str | unicode
:param body: Message body
:rtype: IncomingMessage
"""
# Create the message
self._msgid += 1
message = IncomingMessage(src, body, self._msgid)
# Log traffic
self._traffic.append(message)
# Handle it
self._receive_message(message)
# Finish
return message |
def remove_stale_sockets(self):
"""Removes stale sockets then adds new ones if pool is too small."""
if self.opts.max_idle_time_seconds is not None:
with self.lock:
while (self.sockets and
self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds):
sock_info = self.sockets.pop()
sock_info.close()
while True:
with self.lock:
if (len(self.sockets) + self.active_sockets >=
self.opts.min_pool_size):
# There are enough sockets in the pool.
break
# We must acquire the semaphore to respect max_pool_size.
if not self._socket_semaphore.acquire(False):
break
try:
sock_info = self.connect()
with self.lock:
self.sockets.appendleft(sock_info)
finally:
self._socket_semaphore.release() | def function[remove_stale_sockets, parameter[self]]:
constant[Removes stale sockets then adds new ones if pool is too small.]
if compare[name[self].opts.max_idle_time_seconds is_not constant[None]] begin[:]
with name[self].lock begin[:]
while <ast.BoolOp object at 0x7da2054a6440> begin[:]
variable[sock_info] assign[=] call[name[self].sockets.pop, parameter[]]
call[name[sock_info].close, parameter[]]
while constant[True] begin[:]
with name[self].lock begin[:]
if compare[binary_operation[call[name[len], parameter[name[self].sockets]] + name[self].active_sockets] greater_or_equal[>=] name[self].opts.min_pool_size] begin[:]
break
if <ast.UnaryOp object at 0x7da2054a4d30> begin[:]
break
<ast.Try object at 0x7da2054a79d0> | keyword[def] identifier[remove_stale_sockets] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[opts] . identifier[max_idle_time_seconds] keyword[is] keyword[not] keyword[None] :
keyword[with] identifier[self] . identifier[lock] :
keyword[while] ( identifier[self] . identifier[sockets] keyword[and]
identifier[self] . identifier[sockets] [- literal[int] ]. identifier[idle_time_seconds] ()> identifier[self] . identifier[opts] . identifier[max_idle_time_seconds] ):
identifier[sock_info] = identifier[self] . identifier[sockets] . identifier[pop] ()
identifier[sock_info] . identifier[close] ()
keyword[while] keyword[True] :
keyword[with] identifier[self] . identifier[lock] :
keyword[if] ( identifier[len] ( identifier[self] . identifier[sockets] )+ identifier[self] . identifier[active_sockets] >=
identifier[self] . identifier[opts] . identifier[min_pool_size] ):
keyword[break]
keyword[if] keyword[not] identifier[self] . identifier[_socket_semaphore] . identifier[acquire] ( keyword[False] ):
keyword[break]
keyword[try] :
identifier[sock_info] = identifier[self] . identifier[connect] ()
keyword[with] identifier[self] . identifier[lock] :
identifier[self] . identifier[sockets] . identifier[appendleft] ( identifier[sock_info] )
keyword[finally] :
identifier[self] . identifier[_socket_semaphore] . identifier[release] () | def remove_stale_sockets(self):
"""Removes stale sockets then adds new ones if pool is too small."""
if self.opts.max_idle_time_seconds is not None:
with self.lock:
while self.sockets and self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds:
sock_info = self.sockets.pop()
sock_info.close() # depends on [control=['while'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
while True:
with self.lock:
if len(self.sockets) + self.active_sockets >= self.opts.min_pool_size:
# There are enough sockets in the pool.
break # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
# We must acquire the semaphore to respect max_pool_size.
if not self._socket_semaphore.acquire(False):
break # depends on [control=['if'], data=[]]
try:
sock_info = self.connect()
with self.lock:
self.sockets.appendleft(sock_info) # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
finally:
self._socket_semaphore.release() # depends on [control=['while'], data=[]] |
def handle_args_and_set_context(args):
"""
Args:
args: the command line args, probably passed from main() as sys.argv[1:]
Returns:
a populated EFVersionContext object
"""
parser = argparse.ArgumentParser()
parser.add_argument("service_name", help="name of the service")
parser.add_argument("key", help="version key to look up for <service_name> such as 'ami-id' (list in EF_Config)")
parser.add_argument("env", help=", ".join(EFConfig.ENV_LIST))
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--get", help="get current version", action="store_true")
group.add_argument("--set", help="set current version of <key> to <value> for <service_name>")
group.add_argument("--rollback", help="set current version to most recent 'stable' version in history",
action="store_true")
group.add_argument("--rollback-to", help="rollback current version to <ami-id> in history",
action="store", metavar='<ami-id>')
group.add_argument("--history", help="Show version history for env/service/key", choices=['json', 'text'])
group.add_argument("--show", help="Show keys and values. '*' allowed for <key> and <env>",
action="store_true", default=False)
parser.add_argument("--build",
help="On --set, also set the externally defined build number associated with the version entity",
default="")
parser.add_argument("--commit_hash", help="On --set, also set the commit hash associated with the version entity",
default="")
parser.add_argument("--commit", help="Actually --set or --rollback (dry run if omitted)",
action="store_true", default=False)
parser.add_argument("--devel", help="Allow running from branch; don't refresh from origin", action="store_true",
default=False)
parser.add_argument("--force_env_full", help="Override env with env_full for account-scoped environments",
action="store_true", default=False)
parser.add_argument("--limit", help="Limit 'history', 'rollback', 'show' to first N records (default 100, max 1000)",
type=int, default=100)
parser.add_argument("--location", help="On --set, also mark the url location of the static build's version file to"
"support dist-hash precheck", default="")
if EFConfig.ALLOW_EF_VERSION_SKIP_PRECHECK:
parser.add_argument("--noprecheck", help="--set or --rollback without precheck", action="store_true", default=False)
parser.add_argument("--sr", help="optional /path/to/service_registry_file.json", default=None)
parser.add_argument("--stable", help="On --set, also mark the version 'stable'", action="store_true")
parser.add_argument("--verbose", help="Print additional info", action="store_true", default=False)
# parse
parsed_args = vars(parser.parse_args(args))
context = EFVersionContext()
# marshall the inherited context values
context._build_number = parsed_args["build"]
context._commit_hash = parsed_args["commit_hash"]
context.commit = parsed_args["commit"]
context.devel = parsed_args["devel"]
context._force_env_full = parsed_args["force_env_full"]
try:
context.env = parsed_args["env"]
except ValueError as e:
fail("Error in env: {}".format(e.message))
# marshall this module's additional context values
context._get = parsed_args["get"]
context._history = parsed_args["history"]
context._key = parsed_args["key"]
if EFConfig.ALLOW_EF_VERSION_SKIP_PRECHECK:
context._noprecheck = parsed_args["noprecheck"]
if not 1 <= parsed_args["limit"] <= 1000:
fail("Error in --limit. Valid range: 1..1000")
context._limit = parsed_args["limit"]
context._location = parsed_args["location"]
context._rollback = parsed_args["rollback"]
context._rollback_to = parsed_args["rollback_to"]
context._service_name = parsed_args["service_name"]
context._show = parsed_args["show"]
context._stable = parsed_args["stable"]
context._value = parsed_args["set"]
# Set up service registry and policy template path which depends on it
context.service_registry = EFServiceRegistry(parsed_args["sr"])
# VERBOSE is global
global VERBOSE
VERBOSE = parsed_args["verbose"]
validate_context(context)
return context | def function[handle_args_and_set_context, parameter[args]]:
constant[
Args:
args: the command line args, probably passed from main() as sys.argv[1:]
Returns:
a populated EFVersionContext object
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[service_name]]]
call[name[parser].add_argument, parameter[constant[key]]]
call[name[parser].add_argument, parameter[constant[env]]]
variable[group] assign[=] call[name[parser].add_mutually_exclusive_group, parameter[]]
call[name[group].add_argument, parameter[constant[--get]]]
call[name[group].add_argument, parameter[constant[--set]]]
call[name[group].add_argument, parameter[constant[--rollback]]]
call[name[group].add_argument, parameter[constant[--rollback-to]]]
call[name[group].add_argument, parameter[constant[--history]]]
call[name[group].add_argument, parameter[constant[--show]]]
call[name[parser].add_argument, parameter[constant[--build]]]
call[name[parser].add_argument, parameter[constant[--commit_hash]]]
call[name[parser].add_argument, parameter[constant[--commit]]]
call[name[parser].add_argument, parameter[constant[--devel]]]
call[name[parser].add_argument, parameter[constant[--force_env_full]]]
call[name[parser].add_argument, parameter[constant[--limit]]]
call[name[parser].add_argument, parameter[constant[--location]]]
if name[EFConfig].ALLOW_EF_VERSION_SKIP_PRECHECK begin[:]
call[name[parser].add_argument, parameter[constant[--noprecheck]]]
call[name[parser].add_argument, parameter[constant[--sr]]]
call[name[parser].add_argument, parameter[constant[--stable]]]
call[name[parser].add_argument, parameter[constant[--verbose]]]
variable[parsed_args] assign[=] call[name[vars], parameter[call[name[parser].parse_args, parameter[name[args]]]]]
variable[context] assign[=] call[name[EFVersionContext], parameter[]]
name[context]._build_number assign[=] call[name[parsed_args]][constant[build]]
name[context]._commit_hash assign[=] call[name[parsed_args]][constant[commit_hash]]
name[context].commit assign[=] call[name[parsed_args]][constant[commit]]
name[context].devel assign[=] call[name[parsed_args]][constant[devel]]
name[context]._force_env_full assign[=] call[name[parsed_args]][constant[force_env_full]]
<ast.Try object at 0x7da1b1a28730>
name[context]._get assign[=] call[name[parsed_args]][constant[get]]
name[context]._history assign[=] call[name[parsed_args]][constant[history]]
name[context]._key assign[=] call[name[parsed_args]][constant[key]]
if name[EFConfig].ALLOW_EF_VERSION_SKIP_PRECHECK begin[:]
name[context]._noprecheck assign[=] call[name[parsed_args]][constant[noprecheck]]
if <ast.UnaryOp object at 0x7da1b1b04250> begin[:]
call[name[fail], parameter[constant[Error in --limit. Valid range: 1..1000]]]
name[context]._limit assign[=] call[name[parsed_args]][constant[limit]]
name[context]._location assign[=] call[name[parsed_args]][constant[location]]
name[context]._rollback assign[=] call[name[parsed_args]][constant[rollback]]
name[context]._rollback_to assign[=] call[name[parsed_args]][constant[rollback_to]]
name[context]._service_name assign[=] call[name[parsed_args]][constant[service_name]]
name[context]._show assign[=] call[name[parsed_args]][constant[show]]
name[context]._stable assign[=] call[name[parsed_args]][constant[stable]]
name[context]._value assign[=] call[name[parsed_args]][constant[set]]
name[context].service_registry assign[=] call[name[EFServiceRegistry], parameter[call[name[parsed_args]][constant[sr]]]]
<ast.Global object at 0x7da1b1b04d30>
variable[VERBOSE] assign[=] call[name[parsed_args]][constant[verbose]]
call[name[validate_context], parameter[name[context]]]
return[name[context]] | keyword[def] identifier[handle_args_and_set_context] ( identifier[args] ):
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ()
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] . identifier[join] ( identifier[EFConfig] . identifier[ENV_LIST] ))
identifier[group] = identifier[parser] . identifier[add_mutually_exclusive_group] ( identifier[required] = keyword[True] )
identifier[group] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] , identifier[action] = literal[string] )
identifier[group] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] )
identifier[group] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[action] = literal[string] )
identifier[group] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[metavar] = literal[string] )
identifier[group] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] , identifier[choices] =[ literal[string] , literal[string] ])
identifier[group] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = keyword[False] )
identifier[parser] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] ,
identifier[default] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[default] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = keyword[False] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] , identifier[action] = literal[string] ,
identifier[default] = keyword[False] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = keyword[False] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[type] = identifier[int] , identifier[default] = literal[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string]
literal[string] , identifier[default] = literal[string] )
keyword[if] identifier[EFConfig] . identifier[ALLOW_EF_VERSION_SKIP_PRECHECK] :
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] , identifier[default] = keyword[None] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] , identifier[action] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] , identifier[action] = literal[string] , identifier[default] = keyword[False] )
identifier[parsed_args] = identifier[vars] ( identifier[parser] . identifier[parse_args] ( identifier[args] ))
identifier[context] = identifier[EFVersionContext] ()
identifier[context] . identifier[_build_number] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_commit_hash] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[commit] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[devel] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_force_env_full] = identifier[parsed_args] [ literal[string] ]
keyword[try] :
identifier[context] . identifier[env] = identifier[parsed_args] [ literal[string] ]
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[fail] ( literal[string] . identifier[format] ( identifier[e] . identifier[message] ))
identifier[context] . identifier[_get] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_history] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_key] = identifier[parsed_args] [ literal[string] ]
keyword[if] identifier[EFConfig] . identifier[ALLOW_EF_VERSION_SKIP_PRECHECK] :
identifier[context] . identifier[_noprecheck] = identifier[parsed_args] [ literal[string] ]
keyword[if] keyword[not] literal[int] <= identifier[parsed_args] [ literal[string] ]<= literal[int] :
identifier[fail] ( literal[string] )
identifier[context] . identifier[_limit] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_location] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_rollback] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_rollback_to] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_service_name] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_show] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_stable] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[_value] = identifier[parsed_args] [ literal[string] ]
identifier[context] . identifier[service_registry] = identifier[EFServiceRegistry] ( identifier[parsed_args] [ literal[string] ])
keyword[global] identifier[VERBOSE]
identifier[VERBOSE] = identifier[parsed_args] [ literal[string] ]
identifier[validate_context] ( identifier[context] )
keyword[return] identifier[context] | def handle_args_and_set_context(args):
"""
Args:
args: the command line args, probably passed from main() as sys.argv[1:]
Returns:
a populated EFVersionContext object
"""
parser = argparse.ArgumentParser()
parser.add_argument('service_name', help='name of the service')
parser.add_argument('key', help="version key to look up for <service_name> such as 'ami-id' (list in EF_Config)")
parser.add_argument('env', help=', '.join(EFConfig.ENV_LIST))
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--get', help='get current version', action='store_true')
group.add_argument('--set', help='set current version of <key> to <value> for <service_name>')
group.add_argument('--rollback', help="set current version to most recent 'stable' version in history", action='store_true')
group.add_argument('--rollback-to', help='rollback current version to <ami-id> in history', action='store', metavar='<ami-id>')
group.add_argument('--history', help='Show version history for env/service/key', choices=['json', 'text'])
group.add_argument('--show', help="Show keys and values. '*' allowed for <key> and <env>", action='store_true', default=False)
parser.add_argument('--build', help='On --set, also set the externally defined build number associated with the version entity', default='')
parser.add_argument('--commit_hash', help='On --set, also set the commit hash associated with the version entity', default='')
parser.add_argument('--commit', help='Actually --set or --rollback (dry run if omitted)', action='store_true', default=False)
parser.add_argument('--devel', help="Allow running from branch; don't refresh from origin", action='store_true', default=False)
parser.add_argument('--force_env_full', help='Override env with env_full for account-scoped environments', action='store_true', default=False)
parser.add_argument('--limit', help="Limit 'history', 'rollback', 'show' to first N records (default 100, max 1000)", type=int, default=100)
parser.add_argument('--location', help="On --set, also mark the url location of the static build's version file tosupport dist-hash precheck", default='')
if EFConfig.ALLOW_EF_VERSION_SKIP_PRECHECK:
parser.add_argument('--noprecheck', help='--set or --rollback without precheck', action='store_true', default=False) # depends on [control=['if'], data=[]]
parser.add_argument('--sr', help='optional /path/to/service_registry_file.json', default=None)
parser.add_argument('--stable', help="On --set, also mark the version 'stable'", action='store_true')
parser.add_argument('--verbose', help='Print additional info', action='store_true', default=False)
# parse
parsed_args = vars(parser.parse_args(args))
context = EFVersionContext()
# marshall the inherited context values
context._build_number = parsed_args['build']
context._commit_hash = parsed_args['commit_hash']
context.commit = parsed_args['commit']
context.devel = parsed_args['devel']
context._force_env_full = parsed_args['force_env_full']
try:
context.env = parsed_args['env'] # depends on [control=['try'], data=[]]
except ValueError as e:
fail('Error in env: {}'.format(e.message)) # depends on [control=['except'], data=['e']]
# marshall this module's additional context values
context._get = parsed_args['get']
context._history = parsed_args['history']
context._key = parsed_args['key']
if EFConfig.ALLOW_EF_VERSION_SKIP_PRECHECK:
context._noprecheck = parsed_args['noprecheck'] # depends on [control=['if'], data=[]]
if not 1 <= parsed_args['limit'] <= 1000:
fail('Error in --limit. Valid range: 1..1000') # depends on [control=['if'], data=[]]
context._limit = parsed_args['limit']
context._location = parsed_args['location']
context._rollback = parsed_args['rollback']
context._rollback_to = parsed_args['rollback_to']
context._service_name = parsed_args['service_name']
context._show = parsed_args['show']
context._stable = parsed_args['stable']
context._value = parsed_args['set']
# Set up service registry and policy template path which depends on it
context.service_registry = EFServiceRegistry(parsed_args['sr'])
# VERBOSE is global
global VERBOSE
VERBOSE = parsed_args['verbose']
validate_context(context)
return context |
def group(self, indent=0, open='', close=''):
"""like begin_group / end_group but for the with statement."""
self.begin_group(indent, open)
try:
yield
finally:
self.end_group(indent, close) | def function[group, parameter[self, indent, open, close]]:
constant[like begin_group / end_group but for the with statement.]
call[name[self].begin_group, parameter[name[indent], name[open]]]
<ast.Try object at 0x7da2054a47f0> | keyword[def] identifier[group] ( identifier[self] , identifier[indent] = literal[int] , identifier[open] = literal[string] , identifier[close] = literal[string] ):
literal[string]
identifier[self] . identifier[begin_group] ( identifier[indent] , identifier[open] )
keyword[try] :
keyword[yield]
keyword[finally] :
identifier[self] . identifier[end_group] ( identifier[indent] , identifier[close] ) | def group(self, indent=0, open='', close=''):
"""like begin_group / end_group but for the with statement."""
self.begin_group(indent, open)
try:
yield # depends on [control=['try'], data=[]]
finally:
self.end_group(indent, close) |
def get_task_cls(cls, name):
"""
Returns an unambiguous class or raises an exception.
"""
task_cls = cls._get_reg().get(name)
if not task_cls:
raise TaskClassNotFoundException(cls._missing_task_msg(name))
if task_cls == cls.AMBIGUOUS_CLASS:
raise TaskClassAmbigiousException('Task %r is ambiguous' % name)
return task_cls | def function[get_task_cls, parameter[cls, name]]:
constant[
Returns an unambiguous class or raises an exception.
]
variable[task_cls] assign[=] call[call[name[cls]._get_reg, parameter[]].get, parameter[name[name]]]
if <ast.UnaryOp object at 0x7da1b1f475b0> begin[:]
<ast.Raise object at 0x7da1b1f477c0>
if compare[name[task_cls] equal[==] name[cls].AMBIGUOUS_CLASS] begin[:]
<ast.Raise object at 0x7da1b1f46950>
return[name[task_cls]] | keyword[def] identifier[get_task_cls] ( identifier[cls] , identifier[name] ):
literal[string]
identifier[task_cls] = identifier[cls] . identifier[_get_reg] (). identifier[get] ( identifier[name] )
keyword[if] keyword[not] identifier[task_cls] :
keyword[raise] identifier[TaskClassNotFoundException] ( identifier[cls] . identifier[_missing_task_msg] ( identifier[name] ))
keyword[if] identifier[task_cls] == identifier[cls] . identifier[AMBIGUOUS_CLASS] :
keyword[raise] identifier[TaskClassAmbigiousException] ( literal[string] % identifier[name] )
keyword[return] identifier[task_cls] | def get_task_cls(cls, name):
"""
Returns an unambiguous class or raises an exception.
"""
task_cls = cls._get_reg().get(name)
if not task_cls:
raise TaskClassNotFoundException(cls._missing_task_msg(name)) # depends on [control=['if'], data=[]]
if task_cls == cls.AMBIGUOUS_CLASS:
raise TaskClassAmbigiousException('Task %r is ambiguous' % name) # depends on [control=['if'], data=[]]
return task_cls |
def _handle(self, request: Request, response: Response) -> TypeGenerator[Any, None, None]:
"""
request 解析后的回调,调用中间件,并处理 headers, body 发送。
"""
# request.start_time = datetime.now().timestamp()
# 创建一个新的会话上下文
ctx = self._context(
cast(asyncio.AbstractEventLoop, self._loop),
request,
response,
self,
)
request.app = self
response.app = self
request.ctx = ctx
response.ctx = ctx
request.response = response
response.request = request
# 把当前注册的中间件转为迭代器
middleware_iter = iter(self._middleware)
# 通过迭代器的模式生成一个执行下一个中间的调用方法
next_call = self._next_middleware(middleware_iter, ctx)
# 顺序执行中间件
yield from self._middleware_call(middleware_iter, ctx, next_call)
# 设置 cookies
cookies_headers = ctx.cookies.headers()
if cookies_headers is not None:
ctx.response.set("Set-Cookie", cookies_headers)
# 写出 headers
ctx.response.flush_headers()
# 写出 body
ctx.response.flush_body() | def function[_handle, parameter[self, request, response]]:
constant[
request 解析后的回调,调用中间件,并处理 headers, body 发送。
]
variable[ctx] assign[=] call[name[self]._context, parameter[call[name[cast], parameter[name[asyncio].AbstractEventLoop, name[self]._loop]], name[request], name[response], name[self]]]
name[request].app assign[=] name[self]
name[response].app assign[=] name[self]
name[request].ctx assign[=] name[ctx]
name[response].ctx assign[=] name[ctx]
name[request].response assign[=] name[response]
name[response].request assign[=] name[request]
variable[middleware_iter] assign[=] call[name[iter], parameter[name[self]._middleware]]
variable[next_call] assign[=] call[name[self]._next_middleware, parameter[name[middleware_iter], name[ctx]]]
<ast.YieldFrom object at 0x7da20c6abd30>
variable[cookies_headers] assign[=] call[name[ctx].cookies.headers, parameter[]]
if compare[name[cookies_headers] is_not constant[None]] begin[:]
call[name[ctx].response.set, parameter[constant[Set-Cookie], name[cookies_headers]]]
call[name[ctx].response.flush_headers, parameter[]]
call[name[ctx].response.flush_body, parameter[]] | keyword[def] identifier[_handle] ( identifier[self] , identifier[request] : identifier[Request] , identifier[response] : identifier[Response] )-> identifier[TypeGenerator] [ identifier[Any] , keyword[None] , keyword[None] ]:
literal[string]
identifier[ctx] = identifier[self] . identifier[_context] (
identifier[cast] ( identifier[asyncio] . identifier[AbstractEventLoop] , identifier[self] . identifier[_loop] ),
identifier[request] ,
identifier[response] ,
identifier[self] ,
)
identifier[request] . identifier[app] = identifier[self]
identifier[response] . identifier[app] = identifier[self]
identifier[request] . identifier[ctx] = identifier[ctx]
identifier[response] . identifier[ctx] = identifier[ctx]
identifier[request] . identifier[response] = identifier[response]
identifier[response] . identifier[request] = identifier[request]
identifier[middleware_iter] = identifier[iter] ( identifier[self] . identifier[_middleware] )
identifier[next_call] = identifier[self] . identifier[_next_middleware] ( identifier[middleware_iter] , identifier[ctx] )
keyword[yield] keyword[from] identifier[self] . identifier[_middleware_call] ( identifier[middleware_iter] , identifier[ctx] , identifier[next_call] )
identifier[cookies_headers] = identifier[ctx] . identifier[cookies] . identifier[headers] ()
keyword[if] identifier[cookies_headers] keyword[is] keyword[not] keyword[None] :
identifier[ctx] . identifier[response] . identifier[set] ( literal[string] , identifier[cookies_headers] )
identifier[ctx] . identifier[response] . identifier[flush_headers] ()
identifier[ctx] . identifier[response] . identifier[flush_body] () | def _handle(self, request: Request, response: Response) -> TypeGenerator[Any, None, None]:
"""
request 解析后的回调,调用中间件,并处理 headers, body 发送。
"""
# request.start_time = datetime.now().timestamp()
# 创建一个新的会话上下文
ctx = self._context(cast(asyncio.AbstractEventLoop, self._loop), request, response, self)
request.app = self
response.app = self
request.ctx = ctx
response.ctx = ctx
request.response = response
response.request = request
# 把当前注册的中间件转为迭代器
middleware_iter = iter(self._middleware)
# 通过迭代器的模式生成一个执行下一个中间的调用方法
next_call = self._next_middleware(middleware_iter, ctx)
# 顺序执行中间件
yield from self._middleware_call(middleware_iter, ctx, next_call)
# 设置 cookies
cookies_headers = ctx.cookies.headers()
if cookies_headers is not None:
ctx.response.set('Set-Cookie', cookies_headers) # depends on [control=['if'], data=['cookies_headers']]
# 写出 headers
ctx.response.flush_headers()
# 写出 body
ctx.response.flush_body() |
def list_requests(self, status=None, assignee=None, author=None):
"""
Get all pull requests of a project.
:param status: filters the status of the requests
:param assignee: filters the assignee of the requests
:param author: filters the author of the requests
:return:
"""
request_url = "{}pull-requests".format(self.create_basic_url())
payload = {}
if status is not None:
payload['status'] = status
if assignee is not None:
payload['assignee'] = assignee
if author is not None:
payload['author'] = author
return_value = self._call_api(request_url, params=payload)
return return_value['requests'] | def function[list_requests, parameter[self, status, assignee, author]]:
constant[
Get all pull requests of a project.
:param status: filters the status of the requests
:param assignee: filters the assignee of the requests
:param author: filters the author of the requests
:return:
]
variable[request_url] assign[=] call[constant[{}pull-requests].format, parameter[call[name[self].create_basic_url, parameter[]]]]
variable[payload] assign[=] dictionary[[], []]
if compare[name[status] is_not constant[None]] begin[:]
call[name[payload]][constant[status]] assign[=] name[status]
if compare[name[assignee] is_not constant[None]] begin[:]
call[name[payload]][constant[assignee]] assign[=] name[assignee]
if compare[name[author] is_not constant[None]] begin[:]
call[name[payload]][constant[author]] assign[=] name[author]
variable[return_value] assign[=] call[name[self]._call_api, parameter[name[request_url]]]
return[call[name[return_value]][constant[requests]]] | keyword[def] identifier[list_requests] ( identifier[self] , identifier[status] = keyword[None] , identifier[assignee] = keyword[None] , identifier[author] = keyword[None] ):
literal[string]
identifier[request_url] = literal[string] . identifier[format] ( identifier[self] . identifier[create_basic_url] ())
identifier[payload] ={}
keyword[if] identifier[status] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[status]
keyword[if] identifier[assignee] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[assignee]
keyword[if] identifier[author] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[author]
identifier[return_value] = identifier[self] . identifier[_call_api] ( identifier[request_url] , identifier[params] = identifier[payload] )
keyword[return] identifier[return_value] [ literal[string] ] | def list_requests(self, status=None, assignee=None, author=None):
"""
Get all pull requests of a project.
:param status: filters the status of the requests
:param assignee: filters the assignee of the requests
:param author: filters the author of the requests
:return:
"""
request_url = '{}pull-requests'.format(self.create_basic_url())
payload = {}
if status is not None:
payload['status'] = status # depends on [control=['if'], data=['status']]
if assignee is not None:
payload['assignee'] = assignee # depends on [control=['if'], data=['assignee']]
if author is not None:
payload['author'] = author # depends on [control=['if'], data=['author']]
return_value = self._call_api(request_url, params=payload)
return return_value['requests'] |
def locate(self, minimum_version=None, maximum_version=None, jdk=False):
"""Finds a java distribution that meets the given constraints and returns it.
First looks for a cached version that was previously located, otherwise calls locate().
:param minimum_version: minimum jvm version to look for (eg, 1.7).
The stricter of this and `--jvm-distributions-minimum-version` is used.
:param maximum_version: maximum jvm version to look for (eg, 1.7.9999).
The stricter of this and `--jvm-distributions-maximum-version` is used.
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the Distribution.
:rtype: :class:`Distribution`
:raises: :class:`Distribution.Error` if no suitable java distribution could be found.
"""
def _get_stricter_version(a, b, name, stricter):
version_a = _parse_java_version(name, a)
version_b = _parse_java_version(name, b)
if version_a is None:
return version_b
if version_b is None:
return version_a
return stricter(version_a, version_b)
# Take the tighter constraint of method args and subsystem options.
minimum_version = _get_stricter_version(minimum_version,
self._minimum_version,
"minimum_version",
max)
maximum_version = _get_stricter_version(maximum_version,
self._maximum_version,
"maximum_version",
min)
key = (minimum_version, maximum_version, jdk)
dist = self._cache.get(key)
if not dist:
dist = self._scan_constraint_match(minimum_version, maximum_version, jdk)
if not dist:
dist = self._locate(minimum_version=minimum_version,
maximum_version=maximum_version,
jdk=jdk)
self._cache[key] = dist
return dist | def function[locate, parameter[self, minimum_version, maximum_version, jdk]]:
constant[Finds a java distribution that meets the given constraints and returns it.
First looks for a cached version that was previously located, otherwise calls locate().
:param minimum_version: minimum jvm version to look for (eg, 1.7).
The stricter of this and `--jvm-distributions-minimum-version` is used.
:param maximum_version: maximum jvm version to look for (eg, 1.7.9999).
The stricter of this and `--jvm-distributions-maximum-version` is used.
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the Distribution.
:rtype: :class:`Distribution`
:raises: :class:`Distribution.Error` if no suitable java distribution could be found.
]
def function[_get_stricter_version, parameter[a, b, name, stricter]]:
variable[version_a] assign[=] call[name[_parse_java_version], parameter[name[name], name[a]]]
variable[version_b] assign[=] call[name[_parse_java_version], parameter[name[name], name[b]]]
if compare[name[version_a] is constant[None]] begin[:]
return[name[version_b]]
if compare[name[version_b] is constant[None]] begin[:]
return[name[version_a]]
return[call[name[stricter], parameter[name[version_a], name[version_b]]]]
variable[minimum_version] assign[=] call[name[_get_stricter_version], parameter[name[minimum_version], name[self]._minimum_version, constant[minimum_version], name[max]]]
variable[maximum_version] assign[=] call[name[_get_stricter_version], parameter[name[maximum_version], name[self]._maximum_version, constant[maximum_version], name[min]]]
variable[key] assign[=] tuple[[<ast.Name object at 0x7da1b1d6d990>, <ast.Name object at 0x7da1b1d6e7a0>, <ast.Name object at 0x7da1b1d6e5f0>]]
variable[dist] assign[=] call[name[self]._cache.get, parameter[name[key]]]
if <ast.UnaryOp object at 0x7da1b1d6ef50> begin[:]
variable[dist] assign[=] call[name[self]._scan_constraint_match, parameter[name[minimum_version], name[maximum_version], name[jdk]]]
if <ast.UnaryOp object at 0x7da1b1d6d6c0> begin[:]
variable[dist] assign[=] call[name[self]._locate, parameter[]]
call[name[self]._cache][name[key]] assign[=] name[dist]
return[name[dist]] | keyword[def] identifier[locate] ( identifier[self] , identifier[minimum_version] = keyword[None] , identifier[maximum_version] = keyword[None] , identifier[jdk] = keyword[False] ):
literal[string]
keyword[def] identifier[_get_stricter_version] ( identifier[a] , identifier[b] , identifier[name] , identifier[stricter] ):
identifier[version_a] = identifier[_parse_java_version] ( identifier[name] , identifier[a] )
identifier[version_b] = identifier[_parse_java_version] ( identifier[name] , identifier[b] )
keyword[if] identifier[version_a] keyword[is] keyword[None] :
keyword[return] identifier[version_b]
keyword[if] identifier[version_b] keyword[is] keyword[None] :
keyword[return] identifier[version_a]
keyword[return] identifier[stricter] ( identifier[version_a] , identifier[version_b] )
identifier[minimum_version] = identifier[_get_stricter_version] ( identifier[minimum_version] ,
identifier[self] . identifier[_minimum_version] ,
literal[string] ,
identifier[max] )
identifier[maximum_version] = identifier[_get_stricter_version] ( identifier[maximum_version] ,
identifier[self] . identifier[_maximum_version] ,
literal[string] ,
identifier[min] )
identifier[key] =( identifier[minimum_version] , identifier[maximum_version] , identifier[jdk] )
identifier[dist] = identifier[self] . identifier[_cache] . identifier[get] ( identifier[key] )
keyword[if] keyword[not] identifier[dist] :
identifier[dist] = identifier[self] . identifier[_scan_constraint_match] ( identifier[minimum_version] , identifier[maximum_version] , identifier[jdk] )
keyword[if] keyword[not] identifier[dist] :
identifier[dist] = identifier[self] . identifier[_locate] ( identifier[minimum_version] = identifier[minimum_version] ,
identifier[maximum_version] = identifier[maximum_version] ,
identifier[jdk] = identifier[jdk] )
identifier[self] . identifier[_cache] [ identifier[key] ]= identifier[dist]
keyword[return] identifier[dist] | def locate(self, minimum_version=None, maximum_version=None, jdk=False):
"""Finds a java distribution that meets the given constraints and returns it.
First looks for a cached version that was previously located, otherwise calls locate().
:param minimum_version: minimum jvm version to look for (eg, 1.7).
The stricter of this and `--jvm-distributions-minimum-version` is used.
:param maximum_version: maximum jvm version to look for (eg, 1.7.9999).
The stricter of this and `--jvm-distributions-maximum-version` is used.
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the Distribution.
:rtype: :class:`Distribution`
:raises: :class:`Distribution.Error` if no suitable java distribution could be found.
"""
def _get_stricter_version(a, b, name, stricter):
version_a = _parse_java_version(name, a)
version_b = _parse_java_version(name, b)
if version_a is None:
return version_b # depends on [control=['if'], data=[]]
if version_b is None:
return version_a # depends on [control=['if'], data=[]]
return stricter(version_a, version_b)
# Take the tighter constraint of method args and subsystem options.
minimum_version = _get_stricter_version(minimum_version, self._minimum_version, 'minimum_version', max)
maximum_version = _get_stricter_version(maximum_version, self._maximum_version, 'maximum_version', min)
key = (minimum_version, maximum_version, jdk)
dist = self._cache.get(key)
if not dist:
dist = self._scan_constraint_match(minimum_version, maximum_version, jdk)
if not dist:
dist = self._locate(minimum_version=minimum_version, maximum_version=maximum_version, jdk=jdk) # depends on [control=['if'], data=[]]
self._cache[key] = dist # depends on [control=['if'], data=[]]
return dist |
def add(self, *args, **kwargs):
"""Add the instance tied to the field for the given "value" (via `args`) to the index
For the parameters, see ``BaseIndex.add``
Notes
-----
This method calls the ``store`` method that should be overridden in subclasses
to store in the index sorted-set key
"""
check_uniqueness = kwargs.get('check_uniqueness', True)
if self.field.unique and check_uniqueness:
self.check_uniqueness(*args)
key = self.get_storage_key(*args)
args = list(args)
value = args[-1]
pk = self.instance.pk.get()
logger.debug("adding %s to index %s" % (pk, key))
self.store(key, pk, self.prepare_value_for_storage(value, pk))
self._indexed_values.add(tuple(args)) | def function[add, parameter[self]]:
constant[Add the instance tied to the field for the given "value" (via `args`) to the index
For the parameters, see ``BaseIndex.add``
Notes
-----
This method calls the ``store`` method that should be overridden in subclasses
to store in the index sorted-set key
]
variable[check_uniqueness] assign[=] call[name[kwargs].get, parameter[constant[check_uniqueness], constant[True]]]
if <ast.BoolOp object at 0x7da1b2525630> begin[:]
call[name[self].check_uniqueness, parameter[<ast.Starred object at 0x7da1b2524790>]]
variable[key] assign[=] call[name[self].get_storage_key, parameter[<ast.Starred object at 0x7da1b2524250>]]
variable[args] assign[=] call[name[list], parameter[name[args]]]
variable[value] assign[=] call[name[args]][<ast.UnaryOp object at 0x7da1b261ba00>]
variable[pk] assign[=] call[name[self].instance.pk.get, parameter[]]
call[name[logger].debug, parameter[binary_operation[constant[adding %s to index %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b261a200>, <ast.Name object at 0x7da1b26180a0>]]]]]
call[name[self].store, parameter[name[key], name[pk], call[name[self].prepare_value_for_storage, parameter[name[value], name[pk]]]]]
call[name[self]._indexed_values.add, parameter[call[name[tuple], parameter[name[args]]]]] | keyword[def] identifier[add] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[check_uniqueness] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[True] )
keyword[if] identifier[self] . identifier[field] . identifier[unique] keyword[and] identifier[check_uniqueness] :
identifier[self] . identifier[check_uniqueness] (* identifier[args] )
identifier[key] = identifier[self] . identifier[get_storage_key] (* identifier[args] )
identifier[args] = identifier[list] ( identifier[args] )
identifier[value] = identifier[args] [- literal[int] ]
identifier[pk] = identifier[self] . identifier[instance] . identifier[pk] . identifier[get] ()
identifier[logger] . identifier[debug] ( literal[string] %( identifier[pk] , identifier[key] ))
identifier[self] . identifier[store] ( identifier[key] , identifier[pk] , identifier[self] . identifier[prepare_value_for_storage] ( identifier[value] , identifier[pk] ))
identifier[self] . identifier[_indexed_values] . identifier[add] ( identifier[tuple] ( identifier[args] )) | def add(self, *args, **kwargs):
"""Add the instance tied to the field for the given "value" (via `args`) to the index
For the parameters, see ``BaseIndex.add``
Notes
-----
This method calls the ``store`` method that should be overridden in subclasses
to store in the index sorted-set key
"""
check_uniqueness = kwargs.get('check_uniqueness', True)
if self.field.unique and check_uniqueness:
self.check_uniqueness(*args) # depends on [control=['if'], data=[]]
key = self.get_storage_key(*args)
args = list(args)
value = args[-1]
pk = self.instance.pk.get()
logger.debug('adding %s to index %s' % (pk, key))
self.store(key, pk, self.prepare_value_for_storage(value, pk))
self._indexed_values.add(tuple(args)) |
def _maybe_call_volatility_fn_and_grads(volatility_fn,
state,
volatility_fn_results=None,
grads_volatility_fn=None,
sample_shape=None,
parallel_iterations=10):
"""Helper which computes `volatility_fn` results and grads, if needed."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
needs_volatility_fn_gradients = grads_volatility_fn is None
# Convert `volatility_fn_results` to a list
if volatility_fn_results is None:
volatility_fn_results = volatility_fn(*state_parts)
volatility_fn_results = (list(volatility_fn_results)
if mcmc_util.is_list_like(volatility_fn_results)
else [volatility_fn_results])
if len(volatility_fn_results) == 1:
volatility_fn_results *= len(state_parts)
if len(state_parts) != len(volatility_fn_results):
raise ValueError('`volatility_fn` should return a tensor or a list '
'of the same length as `current_state`.')
# The shape of 'volatility_parts' needs to have the number of chains as a
# leading dimension. For determinism we broadcast 'volatility_parts' to the
# shape of `state_parts` since each dimension of `state_parts` could have a
# different volatility value.
volatility_fn_results = _maybe_broadcast_volatility(volatility_fn_results,
state_parts)
if grads_volatility_fn is None:
[
_,
grads_volatility_fn,
] = diag_jacobian(
xs=state_parts,
ys=volatility_fn_results,
sample_shape=sample_shape,
parallel_iterations=parallel_iterations,
fn=volatility_fn)
# Compute gradient of `volatility_parts**2`
if needs_volatility_fn_gradients:
grads_volatility_fn = [
2. * g * volatility if g is not None else tf.zeros_like(
fn_arg, dtype=fn_arg.dtype.base_dtype)
for g, volatility, fn_arg in zip(
grads_volatility_fn, volatility_fn_results, state_parts)
]
return volatility_fn_results, grads_volatility_fn | def function[_maybe_call_volatility_fn_and_grads, parameter[volatility_fn, state, volatility_fn_results, grads_volatility_fn, sample_shape, parallel_iterations]]:
constant[Helper which computes `volatility_fn` results and grads, if needed.]
variable[state_parts] assign[=] <ast.IfExp object at 0x7da1b033f850>
variable[needs_volatility_fn_gradients] assign[=] compare[name[grads_volatility_fn] is constant[None]]
if compare[name[volatility_fn_results] is constant[None]] begin[:]
variable[volatility_fn_results] assign[=] call[name[volatility_fn], parameter[<ast.Starred object at 0x7da1b0357190>]]
variable[volatility_fn_results] assign[=] <ast.IfExp object at 0x7da1b0357280>
if compare[call[name[len], parameter[name[volatility_fn_results]]] equal[==] constant[1]] begin[:]
<ast.AugAssign object at 0x7da1b0357370>
if compare[call[name[len], parameter[name[state_parts]]] not_equal[!=] call[name[len], parameter[name[volatility_fn_results]]]] begin[:]
<ast.Raise object at 0x7da1b03543a0>
variable[volatility_fn_results] assign[=] call[name[_maybe_broadcast_volatility], parameter[name[volatility_fn_results], name[state_parts]]]
if compare[name[grads_volatility_fn] is constant[None]] begin[:]
<ast.List object at 0x7da1b0354430> assign[=] call[name[diag_jacobian], parameter[]]
if name[needs_volatility_fn_gradients] begin[:]
variable[grads_volatility_fn] assign[=] <ast.ListComp object at 0x7da1b05be170>
return[tuple[[<ast.Name object at 0x7da1b05bdc30>, <ast.Name object at 0x7da1b05bc8b0>]]] | keyword[def] identifier[_maybe_call_volatility_fn_and_grads] ( identifier[volatility_fn] ,
identifier[state] ,
identifier[volatility_fn_results] = keyword[None] ,
identifier[grads_volatility_fn] = keyword[None] ,
identifier[sample_shape] = keyword[None] ,
identifier[parallel_iterations] = literal[int] ):
literal[string]
identifier[state_parts] = identifier[list] ( identifier[state] ) keyword[if] identifier[mcmc_util] . identifier[is_list_like] ( identifier[state] ) keyword[else] [ identifier[state] ]
identifier[needs_volatility_fn_gradients] = identifier[grads_volatility_fn] keyword[is] keyword[None]
keyword[if] identifier[volatility_fn_results] keyword[is] keyword[None] :
identifier[volatility_fn_results] = identifier[volatility_fn] (* identifier[state_parts] )
identifier[volatility_fn_results] =( identifier[list] ( identifier[volatility_fn_results] )
keyword[if] identifier[mcmc_util] . identifier[is_list_like] ( identifier[volatility_fn_results] )
keyword[else] [ identifier[volatility_fn_results] ])
keyword[if] identifier[len] ( identifier[volatility_fn_results] )== literal[int] :
identifier[volatility_fn_results] *= identifier[len] ( identifier[state_parts] )
keyword[if] identifier[len] ( identifier[state_parts] )!= identifier[len] ( identifier[volatility_fn_results] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[volatility_fn_results] = identifier[_maybe_broadcast_volatility] ( identifier[volatility_fn_results] ,
identifier[state_parts] )
keyword[if] identifier[grads_volatility_fn] keyword[is] keyword[None] :
[
identifier[_] ,
identifier[grads_volatility_fn] ,
]= identifier[diag_jacobian] (
identifier[xs] = identifier[state_parts] ,
identifier[ys] = identifier[volatility_fn_results] ,
identifier[sample_shape] = identifier[sample_shape] ,
identifier[parallel_iterations] = identifier[parallel_iterations] ,
identifier[fn] = identifier[volatility_fn] )
keyword[if] identifier[needs_volatility_fn_gradients] :
identifier[grads_volatility_fn] =[
literal[int] * identifier[g] * identifier[volatility] keyword[if] identifier[g] keyword[is] keyword[not] keyword[None] keyword[else] identifier[tf] . identifier[zeros_like] (
identifier[fn_arg] , identifier[dtype] = identifier[fn_arg] . identifier[dtype] . identifier[base_dtype] )
keyword[for] identifier[g] , identifier[volatility] , identifier[fn_arg] keyword[in] identifier[zip] (
identifier[grads_volatility_fn] , identifier[volatility_fn_results] , identifier[state_parts] )
]
keyword[return] identifier[volatility_fn_results] , identifier[grads_volatility_fn] | def _maybe_call_volatility_fn_and_grads(volatility_fn, state, volatility_fn_results=None, grads_volatility_fn=None, sample_shape=None, parallel_iterations=10):
"""Helper which computes `volatility_fn` results and grads, if needed."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
needs_volatility_fn_gradients = grads_volatility_fn is None
# Convert `volatility_fn_results` to a list
if volatility_fn_results is None:
volatility_fn_results = volatility_fn(*state_parts) # depends on [control=['if'], data=['volatility_fn_results']]
volatility_fn_results = list(volatility_fn_results) if mcmc_util.is_list_like(volatility_fn_results) else [volatility_fn_results]
if len(volatility_fn_results) == 1:
volatility_fn_results *= len(state_parts) # depends on [control=['if'], data=[]]
if len(state_parts) != len(volatility_fn_results):
raise ValueError('`volatility_fn` should return a tensor or a list of the same length as `current_state`.') # depends on [control=['if'], data=[]]
# The shape of 'volatility_parts' needs to have the number of chains as a
# leading dimension. For determinism we broadcast 'volatility_parts' to the
# shape of `state_parts` since each dimension of `state_parts` could have a
# different volatility value.
volatility_fn_results = _maybe_broadcast_volatility(volatility_fn_results, state_parts)
if grads_volatility_fn is None:
[_, grads_volatility_fn] = diag_jacobian(xs=state_parts, ys=volatility_fn_results, sample_shape=sample_shape, parallel_iterations=parallel_iterations, fn=volatility_fn) # depends on [control=['if'], data=['grads_volatility_fn']]
# Compute gradient of `volatility_parts**2`
if needs_volatility_fn_gradients:
grads_volatility_fn = [2.0 * g * volatility if g is not None else tf.zeros_like(fn_arg, dtype=fn_arg.dtype.base_dtype) for (g, volatility, fn_arg) in zip(grads_volatility_fn, volatility_fn_results, state_parts)] # depends on [control=['if'], data=[]]
return (volatility_fn_results, grads_volatility_fn) |
def _on_work_finished(self, results):
"""
Display results.
:param status: Response status
:param results: Response data, messages.
"""
messages = []
for msg in results:
msg = CheckerMessage(*msg)
if msg.line >= self.editor.blockCount():
msg.line = self.editor.blockCount() - 1
block = self.editor.document().findBlockByNumber(msg.line)
msg.block = block
messages.append(msg)
self.add_messages(messages) | def function[_on_work_finished, parameter[self, results]]:
constant[
Display results.
:param status: Response status
:param results: Response data, messages.
]
variable[messages] assign[=] list[[]]
for taget[name[msg]] in starred[name[results]] begin[:]
variable[msg] assign[=] call[name[CheckerMessage], parameter[<ast.Starred object at 0x7da20c6c4850>]]
if compare[name[msg].line greater_or_equal[>=] call[name[self].editor.blockCount, parameter[]]] begin[:]
name[msg].line assign[=] binary_operation[call[name[self].editor.blockCount, parameter[]] - constant[1]]
variable[block] assign[=] call[call[name[self].editor.document, parameter[]].findBlockByNumber, parameter[name[msg].line]]
name[msg].block assign[=] name[block]
call[name[messages].append, parameter[name[msg]]]
call[name[self].add_messages, parameter[name[messages]]] | keyword[def] identifier[_on_work_finished] ( identifier[self] , identifier[results] ):
literal[string]
identifier[messages] =[]
keyword[for] identifier[msg] keyword[in] identifier[results] :
identifier[msg] = identifier[CheckerMessage] (* identifier[msg] )
keyword[if] identifier[msg] . identifier[line] >= identifier[self] . identifier[editor] . identifier[blockCount] ():
identifier[msg] . identifier[line] = identifier[self] . identifier[editor] . identifier[blockCount] ()- literal[int]
identifier[block] = identifier[self] . identifier[editor] . identifier[document] (). identifier[findBlockByNumber] ( identifier[msg] . identifier[line] )
identifier[msg] . identifier[block] = identifier[block]
identifier[messages] . identifier[append] ( identifier[msg] )
identifier[self] . identifier[add_messages] ( identifier[messages] ) | def _on_work_finished(self, results):
"""
Display results.
:param status: Response status
:param results: Response data, messages.
"""
messages = []
for msg in results:
msg = CheckerMessage(*msg)
if msg.line >= self.editor.blockCount():
msg.line = self.editor.blockCount() - 1 # depends on [control=['if'], data=[]]
block = self.editor.document().findBlockByNumber(msg.line)
msg.block = block
messages.append(msg) # depends on [control=['for'], data=['msg']]
self.add_messages(messages) |
def parameter_value(self, parameter, read_cached=True):
"""Return a value of one of the monitored paramaters.
This method will try to retrieve the data from cache and only
request it by bluetooth if no cached value is stored or the cache is
expired.
This behaviour can be overwritten by the "read_cached" parameter.
"""
# Special handling for battery attribute
if parameter == MI_BATTERY:
return self.battery_level()
# Use the lock to make sure the cache isn't updated multiple times
with self.lock:
if (read_cached is False) or \
(self._last_read is None) or \
(datetime.now() - self._cache_timeout > self._last_read):
self.fill_cache()
else:
_LOGGER.debug("Using cache (%s < %s)",
datetime.now() - self._last_read,
self._cache_timeout)
if self.cache_available() and (len(self._cache) == 16):
return self._parse_data()[parameter]
else:
raise BluetoothBackendException("Could not read data from Mi Flora sensor %s" % self._mac) | def function[parameter_value, parameter[self, parameter, read_cached]]:
constant[Return a value of one of the monitored paramaters.
This method will try to retrieve the data from cache and only
request it by bluetooth if no cached value is stored or the cache is
expired.
This behaviour can be overwritten by the "read_cached" parameter.
]
if compare[name[parameter] equal[==] name[MI_BATTERY]] begin[:]
return[call[name[self].battery_level, parameter[]]]
with name[self].lock begin[:]
if <ast.BoolOp object at 0x7da18eb579d0> begin[:]
call[name[self].fill_cache, parameter[]]
if <ast.BoolOp object at 0x7da18eb549d0> begin[:]
return[call[call[name[self]._parse_data, parameter[]]][name[parameter]]] | keyword[def] identifier[parameter_value] ( identifier[self] , identifier[parameter] , identifier[read_cached] = keyword[True] ):
literal[string]
keyword[if] identifier[parameter] == identifier[MI_BATTERY] :
keyword[return] identifier[self] . identifier[battery_level] ()
keyword[with] identifier[self] . identifier[lock] :
keyword[if] ( identifier[read_cached] keyword[is] keyword[False] ) keyword[or] ( identifier[self] . identifier[_last_read] keyword[is] keyword[None] ) keyword[or] ( identifier[datetime] . identifier[now] ()- identifier[self] . identifier[_cache_timeout] > identifier[self] . identifier[_last_read] ):
identifier[self] . identifier[fill_cache] ()
keyword[else] :
identifier[_LOGGER] . identifier[debug] ( literal[string] ,
identifier[datetime] . identifier[now] ()- identifier[self] . identifier[_last_read] ,
identifier[self] . identifier[_cache_timeout] )
keyword[if] identifier[self] . identifier[cache_available] () keyword[and] ( identifier[len] ( identifier[self] . identifier[_cache] )== literal[int] ):
keyword[return] identifier[self] . identifier[_parse_data] ()[ identifier[parameter] ]
keyword[else] :
keyword[raise] identifier[BluetoothBackendException] ( literal[string] % identifier[self] . identifier[_mac] ) | def parameter_value(self, parameter, read_cached=True):
"""Return a value of one of the monitored paramaters.
This method will try to retrieve the data from cache and only
request it by bluetooth if no cached value is stored or the cache is
expired.
This behaviour can be overwritten by the "read_cached" parameter.
"""
# Special handling for battery attribute
if parameter == MI_BATTERY:
return self.battery_level() # depends on [control=['if'], data=[]]
# Use the lock to make sure the cache isn't updated multiple times
with self.lock:
if read_cached is False or self._last_read is None or datetime.now() - self._cache_timeout > self._last_read:
self.fill_cache() # depends on [control=['if'], data=[]]
else:
_LOGGER.debug('Using cache (%s < %s)', datetime.now() - self._last_read, self._cache_timeout) # depends on [control=['with'], data=[]]
if self.cache_available() and len(self._cache) == 16:
return self._parse_data()[parameter] # depends on [control=['if'], data=[]]
else:
raise BluetoothBackendException('Could not read data from Mi Flora sensor %s' % self._mac) |
def regex(expression, flags=re.IGNORECASE):
"""
Convenient shortcut to ``re.compile()`` for fast, easy to use
regular expression compilation without an extra import statement.
Arguments:
expression (str): regular expression value.
flags (int): optional regular expression flags.
Defaults to ``re.IGNORECASE``
Returns:
expression (str): string based regular expression.
Raises:
Exception: in case of regular expression compilation error
Example::
(pook
.get('api.com/foo')
.header('Content-Type', pook.regex('[a-z]{1,4}')))
"""
return re.compile(expression, flags=flags) | def function[regex, parameter[expression, flags]]:
constant[
Convenient shortcut to ``re.compile()`` for fast, easy to use
regular expression compilation without an extra import statement.
Arguments:
expression (str): regular expression value.
flags (int): optional regular expression flags.
Defaults to ``re.IGNORECASE``
Returns:
expression (str): string based regular expression.
Raises:
Exception: in case of regular expression compilation error
Example::
(pook
.get('api.com/foo')
.header('Content-Type', pook.regex('[a-z]{1,4}')))
]
return[call[name[re].compile, parameter[name[expression]]]] | keyword[def] identifier[regex] ( identifier[expression] , identifier[flags] = identifier[re] . identifier[IGNORECASE] ):
literal[string]
keyword[return] identifier[re] . identifier[compile] ( identifier[expression] , identifier[flags] = identifier[flags] ) | def regex(expression, flags=re.IGNORECASE):
"""
Convenient shortcut to ``re.compile()`` for fast, easy to use
regular expression compilation without an extra import statement.
Arguments:
expression (str): regular expression value.
flags (int): optional regular expression flags.
Defaults to ``re.IGNORECASE``
Returns:
expression (str): string based regular expression.
Raises:
Exception: in case of regular expression compilation error
Example::
(pook
.get('api.com/foo')
.header('Content-Type', pook.regex('[a-z]{1,4}')))
"""
return re.compile(expression, flags=flags) |
def subdir_path(directory, relative):
"""Returns a file path relative to another path."""
item_bits = directory.split(os.sep)
relative_bits = relative.split(os.sep)
for i, _item in enumerate(item_bits):
if i == len(relative_bits) - 1:
return os.sep.join(item_bits[i:])
else:
if item_bits[i] != relative_bits[i]:
return None
return None | def function[subdir_path, parameter[directory, relative]]:
constant[Returns a file path relative to another path.]
variable[item_bits] assign[=] call[name[directory].split, parameter[name[os].sep]]
variable[relative_bits] assign[=] call[name[relative].split, parameter[name[os].sep]]
for taget[tuple[[<ast.Name object at 0x7da1b1b6b3d0>, <ast.Name object at 0x7da1b1b6a080>]]] in starred[call[name[enumerate], parameter[name[item_bits]]]] begin[:]
if compare[name[i] equal[==] binary_operation[call[name[len], parameter[name[relative_bits]]] - constant[1]]] begin[:]
return[call[name[os].sep.join, parameter[call[name[item_bits]][<ast.Slice object at 0x7da1b1b69cc0>]]]]
return[constant[None]] | keyword[def] identifier[subdir_path] ( identifier[directory] , identifier[relative] ):
literal[string]
identifier[item_bits] = identifier[directory] . identifier[split] ( identifier[os] . identifier[sep] )
identifier[relative_bits] = identifier[relative] . identifier[split] ( identifier[os] . identifier[sep] )
keyword[for] identifier[i] , identifier[_item] keyword[in] identifier[enumerate] ( identifier[item_bits] ):
keyword[if] identifier[i] == identifier[len] ( identifier[relative_bits] )- literal[int] :
keyword[return] identifier[os] . identifier[sep] . identifier[join] ( identifier[item_bits] [ identifier[i] :])
keyword[else] :
keyword[if] identifier[item_bits] [ identifier[i] ]!= identifier[relative_bits] [ identifier[i] ]:
keyword[return] keyword[None]
keyword[return] keyword[None] | def subdir_path(directory, relative):
"""Returns a file path relative to another path."""
item_bits = directory.split(os.sep)
relative_bits = relative.split(os.sep)
for (i, _item) in enumerate(item_bits):
if i == len(relative_bits) - 1:
return os.sep.join(item_bits[i:]) # depends on [control=['if'], data=['i']]
elif item_bits[i] != relative_bits[i]:
return None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return None |
def _make_requires(flag, error):
"""
Builds a decorator that ensures that functions that rely on OpenSSL
functions that are not present in this build raise NotImplementedError,
rather than AttributeError coming out of cryptography.
:param flag: A cryptography flag that guards the functions, e.g.
``Cryptography_HAS_NEXTPROTONEG``.
:param error: The string to be used in the exception if the flag is false.
"""
def _requires_decorator(func):
if not flag:
@wraps(func)
def explode(*args, **kwargs):
raise NotImplementedError(error)
return explode
else:
return func
return _requires_decorator | def function[_make_requires, parameter[flag, error]]:
constant[
Builds a decorator that ensures that functions that rely on OpenSSL
functions that are not present in this build raise NotImplementedError,
rather than AttributeError coming out of cryptography.
:param flag: A cryptography flag that guards the functions, e.g.
``Cryptography_HAS_NEXTPROTONEG``.
:param error: The string to be used in the exception if the flag is false.
]
def function[_requires_decorator, parameter[func]]:
if <ast.UnaryOp object at 0x7da1b025af20> begin[:]
def function[explode, parameter[]]:
<ast.Raise object at 0x7da1b025b1c0>
return[name[explode]]
return[name[_requires_decorator]] | keyword[def] identifier[_make_requires] ( identifier[flag] , identifier[error] ):
literal[string]
keyword[def] identifier[_requires_decorator] ( identifier[func] ):
keyword[if] keyword[not] identifier[flag] :
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[explode] (* identifier[args] ,** identifier[kwargs] ):
keyword[raise] identifier[NotImplementedError] ( identifier[error] )
keyword[return] identifier[explode]
keyword[else] :
keyword[return] identifier[func]
keyword[return] identifier[_requires_decorator] | def _make_requires(flag, error):
"""
Builds a decorator that ensures that functions that rely on OpenSSL
functions that are not present in this build raise NotImplementedError,
rather than AttributeError coming out of cryptography.
:param flag: A cryptography flag that guards the functions, e.g.
``Cryptography_HAS_NEXTPROTONEG``.
:param error: The string to be used in the exception if the flag is false.
"""
def _requires_decorator(func):
if not flag:
@wraps(func)
def explode(*args, **kwargs):
raise NotImplementedError(error)
return explode # depends on [control=['if'], data=[]]
else:
return func
return _requires_decorator |
def __write_noaas(dat, path):
"""
Use the filename - text data pairs to write the data as NOAA text files
:param dict dat: NOAA data to be written
:return none:
"""
for filename, text in dat.items():
try:
with open(os.path.join(path, filename), "w+") as f:
f.write(text)
except Exception as e:
print("write_noaas: There was a problem writing the NOAA text file: {}: {}".format(filename, e))
return | def function[__write_noaas, parameter[dat, path]]:
constant[
Use the filename - text data pairs to write the data as NOAA text files
:param dict dat: NOAA data to be written
:return none:
]
for taget[tuple[[<ast.Name object at 0x7da18f09e230>, <ast.Name object at 0x7da18f09e5f0>]]] in starred[call[name[dat].items, parameter[]]] begin[:]
<ast.Try object at 0x7da18f09c550>
return[None] | keyword[def] identifier[__write_noaas] ( identifier[dat] , identifier[path] ):
literal[string]
keyword[for] identifier[filename] , identifier[text] keyword[in] identifier[dat] . identifier[items] ():
keyword[try] :
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[filename] ), literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[text] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( literal[string] . identifier[format] ( identifier[filename] , identifier[e] ))
keyword[return] | def __write_noaas(dat, path):
"""
Use the filename - text data pairs to write the data as NOAA text files
:param dict dat: NOAA data to be written
:return none:
"""
for (filename, text) in dat.items():
try:
with open(os.path.join(path, filename), 'w+') as f:
f.write(text) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except Exception as e:
print('write_noaas: There was a problem writing the NOAA text file: {}: {}'.format(filename, e)) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=[]]
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.