code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def set_proxy(proxy_url, transport_proxy=None):
"""Create the proxy to PyPI XML-RPC Server"""
global proxy, PYPI_URL
PYPI_URL = proxy_url
proxy = xmlrpc.ServerProxy(
proxy_url,
transport=RequestsTransport(proxy_url.startswith('https://')),
allow_none=True) | def function[set_proxy, parameter[proxy_url, transport_proxy]]:
constant[Create the proxy to PyPI XML-RPC Server]
<ast.Global object at 0x7da20c6e4d00>
variable[PYPI_URL] assign[=] name[proxy_url]
variable[proxy] assign[=] call[name[xmlrpc].ServerProxy, parameter[name[proxy_url]]] | keyword[def] identifier[set_proxy] ( identifier[proxy_url] , identifier[transport_proxy] = keyword[None] ):
literal[string]
keyword[global] identifier[proxy] , identifier[PYPI_URL]
identifier[PYPI_URL] = identifier[proxy_url]
identifier[proxy] = identifier[xmlrpc] . identifier[ServerProxy] (
identifier[proxy_url] ,
identifier[transport] = identifier[RequestsTransport] ( identifier[proxy_url] . identifier[startswith] ( literal[string] )),
identifier[allow_none] = keyword[True] ) | def set_proxy(proxy_url, transport_proxy=None):
"""Create the proxy to PyPI XML-RPC Server"""
global proxy, PYPI_URL
PYPI_URL = proxy_url
proxy = xmlrpc.ServerProxy(proxy_url, transport=RequestsTransport(proxy_url.startswith('https://')), allow_none=True) |
def _set_value(value):
'''
A function to detect if user is trying to pass a dictionary or list. parse it and return a
dictionary list or a string
'''
#don't continue if already an acceptable data-type
if isinstance(value, bool) or isinstance(value, dict) or isinstance(value, list):
return value
#check if json
if value.startswith('j{') and value.endswith('}j'):
value = value.replace('j{', '{')
value = value.replace('}j', '}')
try:
return salt.utils.json.loads(value)
except Exception:
raise salt.exceptions.CommandExecutionError
#detect list of dictionaries
if '|' in value and r'\|' not in value:
values = value.split('|')
items = []
for value in values:
items.append(_set_value(value))
return items
#parse out dictionary if detected
if ':' in value and r'\:' not in value:
options = {}
#split out pairs
key_pairs = value.split(',')
for key_pair in key_pairs:
k = key_pair.split(':')[0]
v = key_pair.split(':')[1]
options[k] = v
return options
#try making a list
elif ',' in value and r'\,' not in value:
value_items = value.split(',')
return value_items
#just return a string
else:
#remove escape chars if added
if r'\|' in value:
value = value.replace(r'\|', '|')
if r'\:' in value:
value = value.replace(r'\:', ':')
if r'\,' in value:
value = value.replace(r'\,', ',')
return value | def function[_set_value, parameter[value]]:
constant[
A function to detect if user is trying to pass a dictionary or list. parse it and return a
dictionary list or a string
]
if <ast.BoolOp object at 0x7da1b2046b00> begin[:]
return[name[value]]
if <ast.BoolOp object at 0x7da1b2046260> begin[:]
variable[value] assign[=] call[name[value].replace, parameter[constant[j{], constant[{]]]
variable[value] assign[=] call[name[value].replace, parameter[constant[}j], constant[}]]]
<ast.Try object at 0x7da1b2045b10>
if <ast.BoolOp object at 0x7da1b2045f90> begin[:]
variable[values] assign[=] call[name[value].split, parameter[constant[|]]]
variable[items] assign[=] list[[]]
for taget[name[value]] in starred[name[values]] begin[:]
call[name[items].append, parameter[call[name[_set_value], parameter[name[value]]]]]
return[name[items]]
if <ast.BoolOp object at 0x7da1b2044280> begin[:]
variable[options] assign[=] dictionary[[], []]
variable[key_pairs] assign[=] call[name[value].split, parameter[constant[,]]]
for taget[name[key_pair]] in starred[name[key_pairs]] begin[:]
variable[k] assign[=] call[call[name[key_pair].split, parameter[constant[:]]]][constant[0]]
variable[v] assign[=] call[call[name[key_pair].split, parameter[constant[:]]]][constant[1]]
call[name[options]][name[k]] assign[=] name[v]
return[name[options]] | keyword[def] identifier[_set_value] ( identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[bool] ) keyword[or] identifier[isinstance] ( identifier[value] , identifier[dict] ) keyword[or] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[return] identifier[value]
keyword[if] identifier[value] . identifier[startswith] ( literal[string] ) keyword[and] identifier[value] . identifier[endswith] ( literal[string] ):
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] )
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] )
keyword[try] :
keyword[return] identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[value] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[salt] . identifier[exceptions] . identifier[CommandExecutionError]
keyword[if] literal[string] keyword[in] identifier[value] keyword[and] literal[string] keyword[not] keyword[in] identifier[value] :
identifier[values] = identifier[value] . identifier[split] ( literal[string] )
identifier[items] =[]
keyword[for] identifier[value] keyword[in] identifier[values] :
identifier[items] . identifier[append] ( identifier[_set_value] ( identifier[value] ))
keyword[return] identifier[items]
keyword[if] literal[string] keyword[in] identifier[value] keyword[and] literal[string] keyword[not] keyword[in] identifier[value] :
identifier[options] ={}
identifier[key_pairs] = identifier[value] . identifier[split] ( literal[string] )
keyword[for] identifier[key_pair] keyword[in] identifier[key_pairs] :
identifier[k] = identifier[key_pair] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[v] = identifier[key_pair] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[options] [ identifier[k] ]= identifier[v]
keyword[return] identifier[options]
keyword[elif] literal[string] keyword[in] identifier[value] keyword[and] literal[string] keyword[not] keyword[in] identifier[value] :
identifier[value_items] = identifier[value] . identifier[split] ( literal[string] )
keyword[return] identifier[value_items]
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[value] :
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] literal[string] keyword[in] identifier[value] :
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] literal[string] keyword[in] identifier[value] :
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[value] | def _set_value(value):
"""
A function to detect if user is trying to pass a dictionary or list. parse it and return a
dictionary list or a string
"""
#don't continue if already an acceptable data-type
if isinstance(value, bool) or isinstance(value, dict) or isinstance(value, list):
return value # depends on [control=['if'], data=[]]
#check if json
if value.startswith('j{') and value.endswith('}j'):
value = value.replace('j{', '{')
value = value.replace('}j', '}')
try:
return salt.utils.json.loads(value) # depends on [control=['try'], data=[]]
except Exception:
raise salt.exceptions.CommandExecutionError # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
#detect list of dictionaries
if '|' in value and '\\|' not in value:
values = value.split('|')
items = []
for value in values:
items.append(_set_value(value)) # depends on [control=['for'], data=['value']]
return items # depends on [control=['if'], data=[]]
#parse out dictionary if detected
if ':' in value and '\\:' not in value:
options = {}
#split out pairs
key_pairs = value.split(',')
for key_pair in key_pairs:
k = key_pair.split(':')[0]
v = key_pair.split(':')[1]
options[k] = v # depends on [control=['for'], data=['key_pair']]
return options # depends on [control=['if'], data=[]]
#try making a list
elif ',' in value and '\\,' not in value:
value_items = value.split(',')
return value_items # depends on [control=['if'], data=[]]
else:
#just return a string
#remove escape chars if added
if '\\|' in value:
value = value.replace('\\|', '|') # depends on [control=['if'], data=['value']]
if '\\:' in value:
value = value.replace('\\:', ':') # depends on [control=['if'], data=['value']]
if '\\,' in value:
value = value.replace('\\,', ',') # depends on [control=['if'], data=['value']]
return value |
def __build_python_module_cache(self):
"""Recursively walks through the b2/src subdirectories and
creates an index of base module name to package name. The
index is stored within self.__python_module_cache and allows
for an O(1) module lookup.
For example, given the base module name `toolset`,
self.__python_module_cache['toolset'] will return
'b2.build.toolset'
pkgutil.walk_packages() will find any python package
provided a directory contains an __init__.py. This has the
added benefit of allowing libraries to be installed and
automatically avaiable within the contrib directory.
*Note*: pkgutil.walk_packages() will import any subpackage
in order to access its __path__variable. Meaning:
any initialization code will be run if the package hasn't
already been imported.
"""
cache = {}
for importer, mname, ispkg in pkgutil.walk_packages(b2.__path__, prefix='b2.'):
basename = mname.split('.')[-1]
# since the jam code is only going to have "import toolset ;"
# it doesn't matter if there are separately named "b2.build.toolset" and
# "b2.contrib.toolset" as it is impossible to know which the user is
# referring to.
if basename in cache:
self.manager.errors()('duplicate module name "{0}" '
'found in boost-build path'.format(basename))
cache[basename] = mname
self.__python_module_cache = cache | def function[__build_python_module_cache, parameter[self]]:
constant[Recursively walks through the b2/src subdirectories and
creates an index of base module name to package name. The
index is stored within self.__python_module_cache and allows
for an O(1) module lookup.
For example, given the base module name `toolset`,
self.__python_module_cache['toolset'] will return
'b2.build.toolset'
pkgutil.walk_packages() will find any python package
provided a directory contains an __init__.py. This has the
added benefit of allowing libraries to be installed and
automatically avaiable within the contrib directory.
*Note*: pkgutil.walk_packages() will import any subpackage
in order to access its __path__variable. Meaning:
any initialization code will be run if the package hasn't
already been imported.
]
variable[cache] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c9901c0>, <ast.Name object at 0x7da20c993fa0>, <ast.Name object at 0x7da20c990580>]]] in starred[call[name[pkgutil].walk_packages, parameter[name[b2].__path__]]] begin[:]
variable[basename] assign[=] call[call[name[mname].split, parameter[constant[.]]]][<ast.UnaryOp object at 0x7da20c990430>]
if compare[name[basename] in name[cache]] begin[:]
call[call[name[self].manager.errors, parameter[]], parameter[call[constant[duplicate module name "{0}" found in boost-build path].format, parameter[name[basename]]]]]
call[name[cache]][name[basename]] assign[=] name[mname]
name[self].__python_module_cache assign[=] name[cache] | keyword[def] identifier[__build_python_module_cache] ( identifier[self] ):
literal[string]
identifier[cache] ={}
keyword[for] identifier[importer] , identifier[mname] , identifier[ispkg] keyword[in] identifier[pkgutil] . identifier[walk_packages] ( identifier[b2] . identifier[__path__] , identifier[prefix] = literal[string] ):
identifier[basename] = identifier[mname] . identifier[split] ( literal[string] )[- literal[int] ]
keyword[if] identifier[basename] keyword[in] identifier[cache] :
identifier[self] . identifier[manager] . identifier[errors] ()( literal[string]
literal[string] . identifier[format] ( identifier[basename] ))
identifier[cache] [ identifier[basename] ]= identifier[mname]
identifier[self] . identifier[__python_module_cache] = identifier[cache] | def __build_python_module_cache(self):
"""Recursively walks through the b2/src subdirectories and
creates an index of base module name to package name. The
index is stored within self.__python_module_cache and allows
for an O(1) module lookup.
For example, given the base module name `toolset`,
self.__python_module_cache['toolset'] will return
'b2.build.toolset'
pkgutil.walk_packages() will find any python package
provided a directory contains an __init__.py. This has the
added benefit of allowing libraries to be installed and
automatically avaiable within the contrib directory.
*Note*: pkgutil.walk_packages() will import any subpackage
in order to access its __path__variable. Meaning:
any initialization code will be run if the package hasn't
already been imported.
"""
cache = {}
for (importer, mname, ispkg) in pkgutil.walk_packages(b2.__path__, prefix='b2.'):
basename = mname.split('.')[-1]
# since the jam code is only going to have "import toolset ;"
# it doesn't matter if there are separately named "b2.build.toolset" and
# "b2.contrib.toolset" as it is impossible to know which the user is
# referring to.
if basename in cache:
self.manager.errors()('duplicate module name "{0}" found in boost-build path'.format(basename)) # depends on [control=['if'], data=['basename']]
cache[basename] = mname # depends on [control=['for'], data=[]]
self.__python_module_cache = cache |
def channel_submit_row(context):
"""
Display the row of buttons for delete and save.
"""
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
can_delete = context['has_delete_permission']
can_add = context['has_add_permission']
can_change = context['has_change_permission']
ctx = Context(context)
ctx.update({
'show_delete_link': (not is_popup and
can_delete and
change and
context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (can_add and
not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': (not is_popup and can_change and show_save_and_continue),
'show_save': show_save,
})
return ctx | def function[channel_submit_row, parameter[context]]:
constant[
Display the row of buttons for delete and save.
]
variable[change] assign[=] call[name[context]][constant[change]]
variable[is_popup] assign[=] call[name[context]][constant[is_popup]]
variable[save_as] assign[=] call[name[context]][constant[save_as]]
variable[show_save] assign[=] call[name[context].get, parameter[constant[show_save], constant[True]]]
variable[show_save_and_continue] assign[=] call[name[context].get, parameter[constant[show_save_and_continue], constant[True]]]
variable[can_delete] assign[=] call[name[context]][constant[has_delete_permission]]
variable[can_add] assign[=] call[name[context]][constant[has_add_permission]]
variable[can_change] assign[=] call[name[context]][constant[has_change_permission]]
variable[ctx] assign[=] call[name[Context], parameter[name[context]]]
call[name[ctx].update, parameter[dictionary[[<ast.Constant object at 0x7da1b12970a0>, <ast.Constant object at 0x7da1b1294d90>, <ast.Constant object at 0x7da1b1297d30>, <ast.Constant object at 0x7da1b1295c90>, <ast.Constant object at 0x7da1b1297220>], [<ast.BoolOp object at 0x7da1b1296590>, <ast.BoolOp object at 0x7da1b1297490>, <ast.BoolOp object at 0x7da1b12958d0>, <ast.BoolOp object at 0x7da1b1294880>, <ast.Name object at 0x7da1b12958a0>]]]]
return[name[ctx]] | keyword[def] identifier[channel_submit_row] ( identifier[context] ):
literal[string]
identifier[change] = identifier[context] [ literal[string] ]
identifier[is_popup] = identifier[context] [ literal[string] ]
identifier[save_as] = identifier[context] [ literal[string] ]
identifier[show_save] = identifier[context] . identifier[get] ( literal[string] , keyword[True] )
identifier[show_save_and_continue] = identifier[context] . identifier[get] ( literal[string] , keyword[True] )
identifier[can_delete] = identifier[context] [ literal[string] ]
identifier[can_add] = identifier[context] [ literal[string] ]
identifier[can_change] = identifier[context] [ literal[string] ]
identifier[ctx] = identifier[Context] ( identifier[context] )
identifier[ctx] . identifier[update] ({
literal[string] :( keyword[not] identifier[is_popup] keyword[and]
identifier[can_delete] keyword[and]
identifier[change] keyword[and]
identifier[context] . identifier[get] ( literal[string] , keyword[True] )
),
literal[string] : keyword[not] identifier[is_popup] keyword[and] identifier[change] keyword[and] identifier[save_as] ,
literal[string] :( identifier[can_add] keyword[and]
keyword[not] identifier[is_popup] keyword[and]
( keyword[not] identifier[save_as] keyword[or] identifier[context] [ literal[string] ])
),
literal[string] :( keyword[not] identifier[is_popup] keyword[and] identifier[can_change] keyword[and] identifier[show_save_and_continue] ),
literal[string] : identifier[show_save] ,
})
keyword[return] identifier[ctx] | def channel_submit_row(context):
"""
Display the row of buttons for delete and save.
"""
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
can_delete = context['has_delete_permission']
can_add = context['has_add_permission']
can_change = context['has_change_permission']
ctx = Context(context)
ctx.update({'show_delete_link': not is_popup and can_delete and change and context.get('show_delete', True), 'show_save_as_new': not is_popup and change and save_as, 'show_save_and_add_another': can_add and (not is_popup) and (not save_as or context['add']), 'show_save_and_continue': not is_popup and can_change and show_save_and_continue, 'show_save': show_save})
return ctx |
def run_algorithms(file_struct, boundaries_id, labels_id, config,
annotator_id=0):
"""Runs the algorithms with the specified identifiers on the audio_file.
Parameters
----------
file_struct: `msaf.io.FileStruct`
Object with the file paths.
boundaries_id: str
Identifier of the boundaries algorithm to use ("gt" for ground truth).
labels_id: str
Identifier of the labels algorithm to use (None for not labeling).
config: dict
Dictionary containing the custom parameters of the algorithms to use.
annotator_id: int
Annotator identificator in the ground truth.
Returns
-------
est_times: np.array or list
List of estimated times for the segment boundaries.
If `list`, it will be a list of np.arrays, sorted by segmentation
layer.
est_labels: np.array or list
List of all the labels associated segments.
If `list`, it will be a list of np.arrays, sorted by segmentation
layer.
"""
# Check that there are enough audio frames
if config["features"].features.shape[0] <= msaf.config.minimum_frames:
logging.warning("Audio file too short, or too many few beats "
"estimated. Returning empty estimations.")
return np.asarray([0, config["features"].dur]), \
np.asarray([0], dtype=int)
# Get the corresponding modules
bounds_module = get_boundaries_module(boundaries_id)
labels_module = get_labels_module(labels_id)
# Get the correct frame times
frame_times = config["features"].frame_times
# Segment audio based on type of segmentation
run_fun = run_hierarchical if config["hier"] else run_flat
est_times, est_labels = run_fun(file_struct, bounds_module, labels_module,
frame_times, config, annotator_id)
return est_times, est_labels | def function[run_algorithms, parameter[file_struct, boundaries_id, labels_id, config, annotator_id]]:
constant[Runs the algorithms with the specified identifiers on the audio_file.
Parameters
----------
file_struct: `msaf.io.FileStruct`
Object with the file paths.
boundaries_id: str
Identifier of the boundaries algorithm to use ("gt" for ground truth).
labels_id: str
Identifier of the labels algorithm to use (None for not labeling).
config: dict
Dictionary containing the custom parameters of the algorithms to use.
annotator_id: int
Annotator identificator in the ground truth.
Returns
-------
est_times: np.array or list
List of estimated times for the segment boundaries.
If `list`, it will be a list of np.arrays, sorted by segmentation
layer.
est_labels: np.array or list
List of all the labels associated segments.
If `list`, it will be a list of np.arrays, sorted by segmentation
layer.
]
if compare[call[call[name[config]][constant[features]].features.shape][constant[0]] less_or_equal[<=] name[msaf].config.minimum_frames] begin[:]
call[name[logging].warning, parameter[constant[Audio file too short, or too many few beats estimated. Returning empty estimations.]]]
return[tuple[[<ast.Call object at 0x7da1b02858d0>, <ast.Call object at 0x7da1b0286830>]]]
variable[bounds_module] assign[=] call[name[get_boundaries_module], parameter[name[boundaries_id]]]
variable[labels_module] assign[=] call[name[get_labels_module], parameter[name[labels_id]]]
variable[frame_times] assign[=] call[name[config]][constant[features]].frame_times
variable[run_fun] assign[=] <ast.IfExp object at 0x7da1b03b8490>
<ast.Tuple object at 0x7da1b03b9030> assign[=] call[name[run_fun], parameter[name[file_struct], name[bounds_module], name[labels_module], name[frame_times], name[config], name[annotator_id]]]
return[tuple[[<ast.Name object at 0x7da1b03bbdc0>, <ast.Name object at 0x7da1b03bb010>]]] | keyword[def] identifier[run_algorithms] ( identifier[file_struct] , identifier[boundaries_id] , identifier[labels_id] , identifier[config] ,
identifier[annotator_id] = literal[int] ):
literal[string]
keyword[if] identifier[config] [ literal[string] ]. identifier[features] . identifier[shape] [ literal[int] ]<= identifier[msaf] . identifier[config] . identifier[minimum_frames] :
identifier[logging] . identifier[warning] ( literal[string]
literal[string] )
keyword[return] identifier[np] . identifier[asarray] ([ literal[int] , identifier[config] [ literal[string] ]. identifier[dur] ]), identifier[np] . identifier[asarray] ([ literal[int] ], identifier[dtype] = identifier[int] )
identifier[bounds_module] = identifier[get_boundaries_module] ( identifier[boundaries_id] )
identifier[labels_module] = identifier[get_labels_module] ( identifier[labels_id] )
identifier[frame_times] = identifier[config] [ literal[string] ]. identifier[frame_times]
identifier[run_fun] = identifier[run_hierarchical] keyword[if] identifier[config] [ literal[string] ] keyword[else] identifier[run_flat]
identifier[est_times] , identifier[est_labels] = identifier[run_fun] ( identifier[file_struct] , identifier[bounds_module] , identifier[labels_module] ,
identifier[frame_times] , identifier[config] , identifier[annotator_id] )
keyword[return] identifier[est_times] , identifier[est_labels] | def run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=0):
"""Runs the algorithms with the specified identifiers on the audio_file.
Parameters
----------
file_struct: `msaf.io.FileStruct`
Object with the file paths.
boundaries_id: str
Identifier of the boundaries algorithm to use ("gt" for ground truth).
labels_id: str
Identifier of the labels algorithm to use (None for not labeling).
config: dict
Dictionary containing the custom parameters of the algorithms to use.
annotator_id: int
Annotator identificator in the ground truth.
Returns
-------
est_times: np.array or list
List of estimated times for the segment boundaries.
If `list`, it will be a list of np.arrays, sorted by segmentation
layer.
est_labels: np.array or list
List of all the labels associated segments.
If `list`, it will be a list of np.arrays, sorted by segmentation
layer.
"""
# Check that there are enough audio frames
if config['features'].features.shape[0] <= msaf.config.minimum_frames:
logging.warning('Audio file too short, or too many few beats estimated. Returning empty estimations.')
return (np.asarray([0, config['features'].dur]), np.asarray([0], dtype=int)) # depends on [control=['if'], data=[]]
# Get the corresponding modules
bounds_module = get_boundaries_module(boundaries_id)
labels_module = get_labels_module(labels_id)
# Get the correct frame times
frame_times = config['features'].frame_times
# Segment audio based on type of segmentation
run_fun = run_hierarchical if config['hier'] else run_flat
(est_times, est_labels) = run_fun(file_struct, bounds_module, labels_module, frame_times, config, annotator_id)
return (est_times, est_labels) |
def add_to_watched(self, watched):
"""
:calls: `PUT /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/repos/" + watched._identity + "/subscription",
input={"subscribed": True}
) | def function[add_to_watched, parameter[self, watched]]:
constant[
:calls: `PUT /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
]
assert[call[name[isinstance], parameter[name[watched], name[github].Repository.Repository]]]
<ast.Tuple object at 0x7da1b1f4bd00> assign[=] call[name[self]._requester.requestJsonAndCheck, parameter[constant[PUT], binary_operation[binary_operation[constant[/repos/] + name[watched]._identity] + constant[/subscription]]]] | keyword[def] identifier[add_to_watched] ( identifier[self] , identifier[watched] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[watched] , identifier[github] . identifier[Repository] . identifier[Repository] ), identifier[watched]
identifier[headers] , identifier[data] = identifier[self] . identifier[_requester] . identifier[requestJsonAndCheck] (
literal[string] ,
literal[string] + identifier[watched] . identifier[_identity] + literal[string] ,
identifier[input] ={ literal[string] : keyword[True] }
) | def add_to_watched(self, watched):
"""
:calls: `PUT /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
(headers, data) = self._requester.requestJsonAndCheck('PUT', '/repos/' + watched._identity + '/subscription', input={'subscribed': True}) |
def plotLightCurves(
log,
lightCurves,
polyOrder,
pathToOutputDirectory):
"""
*plot lightcurve(s) given an list of magnitude, time pairs*
**Key Arguments:**
- ``log`` -- logger
- ``lightCurves`` -- list of magnitude, time numPy arrays
- ``polyOrder`` -- order of polynomial used to fit the model lightcurves extracted from spectra
- ``pathToOutputDirectory`` -- path to the output directory
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import matplotlib.pyplot as plt
import numpy as np
## LOCAL APPLICATION ##
resultsDict = {}
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
ax = plt.subplot(111)
curveDict = {}
for curve in lightCurves:
x = curve[1]
y = curve[0]
# CAUSE LIGHTCURVE GENERATION TO FAIL IF LESS THAN 5 POINTS EXTRACTED
# FROM THE SPECTRA
if len(x) <= 4:
curveDict['poly'] = None
continue
order = polyOrder
poly = np.polyfit(x, y, order)
curveDict['poly'] = poly
pOrder = np.poly1d(poly)
polyString = "mxxx[i] = "
polyStringMd = "\\\\(mag = "
for i in range(0, order + 1):
if i > 0 and poly[i] > 0:
polyString += "+"
polyStringMd += "+"
polyString += """%s*pow(i,%s) """ % (poly[i], order - i)
polyStringMd += """%s*time^{%s} """ % (poly[i], order - i)
polyStringMd += "\\\\)"
xp = np.arange(int(min(x)), int(max(x)), 0.2)
ax.plot(x, y, '.', xp, pOrder(xp), '--')
title = curve[5]
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': 8})
ax.titlesize = 'medium' # fontsize of the axes title
ax.labelsize = 'medium' # fontsize of the x any y labels
plt.xlabel("Days Relative to Peak")
plt.ylabel("Magnitude")
plt.title(title, fontsize='small',
verticalalignment='bottom', linespacing=0.2)
ax.invert_yaxis()
fileName = pathToOutputDirectory + title.replace(" ", "_") + ".png"
# mdPlotLink = """![%s_plot]\n\n[%s_plot]: %s\n\n""" % (title.replace(" ", "_"), title.replace(" ", "_"), fileName,)
# curveDict['mdLink'] = mdPlotLink
plt.savefig(fileName)
plt.clf() # clear figure
return curveDict | def function[plotLightCurves, parameter[log, lightCurves, polyOrder, pathToOutputDirectory]]:
constant[
*plot lightcurve(s) given an list of magnitude, time pairs*
**Key Arguments:**
- ``log`` -- logger
- ``lightCurves`` -- list of magnitude, time numPy arrays
- ``polyOrder`` -- order of polynomial used to fit the model lightcurves extracted from spectra
- ``pathToOutputDirectory`` -- path to the output directory
**Return:**
- None
]
import module[matplotlib.pyplot] as alias[plt]
import module[numpy] as alias[np]
variable[resultsDict] assign[=] dictionary[[], []]
variable[ax] assign[=] call[name[plt].subplot, parameter[constant[111]]]
variable[curveDict] assign[=] dictionary[[], []]
for taget[name[curve]] in starred[name[lightCurves]] begin[:]
variable[x] assign[=] call[name[curve]][constant[1]]
variable[y] assign[=] call[name[curve]][constant[0]]
if compare[call[name[len], parameter[name[x]]] less_or_equal[<=] constant[4]] begin[:]
call[name[curveDict]][constant[poly]] assign[=] constant[None]
continue
variable[order] assign[=] name[polyOrder]
variable[poly] assign[=] call[name[np].polyfit, parameter[name[x], name[y], name[order]]]
call[name[curveDict]][constant[poly]] assign[=] name[poly]
variable[pOrder] assign[=] call[name[np].poly1d, parameter[name[poly]]]
variable[polyString] assign[=] constant[mxxx[i] = ]
variable[polyStringMd] assign[=] constant[\\(mag = ]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[name[order] + constant[1]]]]] begin[:]
if <ast.BoolOp object at 0x7da204961c60> begin[:]
<ast.AugAssign object at 0x7da204963130>
<ast.AugAssign object at 0x7da2049623e0>
<ast.AugAssign object at 0x7da204961570>
<ast.AugAssign object at 0x7da204963d30>
<ast.AugAssign object at 0x7da204961390>
variable[xp] assign[=] call[name[np].arange, parameter[call[name[int], parameter[call[name[min], parameter[name[x]]]]], call[name[int], parameter[call[name[max], parameter[name[x]]]]], constant[0.2]]]
call[name[ax].plot, parameter[name[x], name[y], constant[.], name[xp], call[name[pOrder], parameter[name[xp]]], constant[--]]]
variable[title] assign[=] call[name[curve]][constant[5]]
variable[box] assign[=] call[name[ax].get_position, parameter[]]
call[name[ax].set_position, parameter[list[[<ast.Attribute object at 0x7da204961db0>, <ast.Attribute object at 0x7da204962230>, <ast.BinOp object at 0x7da2049608e0>, <ast.Attribute object at 0x7da2049638e0>]]]]
call[name[ax].legend, parameter[]]
name[ax].titlesize assign[=] constant[medium]
name[ax].labelsize assign[=] constant[medium]
call[name[plt].xlabel, parameter[constant[Days Relative to Peak]]]
call[name[plt].ylabel, parameter[constant[Magnitude]]]
call[name[plt].title, parameter[name[title]]]
call[name[ax].invert_yaxis, parameter[]]
variable[fileName] assign[=] binary_operation[binary_operation[name[pathToOutputDirectory] + call[name[title].replace, parameter[constant[ ], constant[_]]]] + constant[.png]]
call[name[plt].savefig, parameter[name[fileName]]]
call[name[plt].clf, parameter[]]
return[name[curveDict]] | keyword[def] identifier[plotLightCurves] (
identifier[log] ,
identifier[lightCurves] ,
identifier[polyOrder] ,
identifier[pathToOutputDirectory] ):
literal[string]
keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt]
keyword[import] identifier[numpy] keyword[as] identifier[np]
identifier[resultsDict] ={}
identifier[ax] = identifier[plt] . identifier[subplot] ( literal[int] )
identifier[curveDict] ={}
keyword[for] identifier[curve] keyword[in] identifier[lightCurves] :
identifier[x] = identifier[curve] [ literal[int] ]
identifier[y] = identifier[curve] [ literal[int] ]
keyword[if] identifier[len] ( identifier[x] )<= literal[int] :
identifier[curveDict] [ literal[string] ]= keyword[None]
keyword[continue]
identifier[order] = identifier[polyOrder]
identifier[poly] = identifier[np] . identifier[polyfit] ( identifier[x] , identifier[y] , identifier[order] )
identifier[curveDict] [ literal[string] ]= identifier[poly]
identifier[pOrder] = identifier[np] . identifier[poly1d] ( identifier[poly] )
identifier[polyString] = literal[string]
identifier[polyStringMd] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[order] + literal[int] ):
keyword[if] identifier[i] > literal[int] keyword[and] identifier[poly] [ identifier[i] ]> literal[int] :
identifier[polyString] += literal[string]
identifier[polyStringMd] += literal[string]
identifier[polyString] += literal[string] %( identifier[poly] [ identifier[i] ], identifier[order] - identifier[i] )
identifier[polyStringMd] += literal[string] %( identifier[poly] [ identifier[i] ], identifier[order] - identifier[i] )
identifier[polyStringMd] += literal[string]
identifier[xp] = identifier[np] . identifier[arange] ( identifier[int] ( identifier[min] ( identifier[x] )), identifier[int] ( identifier[max] ( identifier[x] )), literal[int] )
identifier[ax] . identifier[plot] ( identifier[x] , identifier[y] , literal[string] , identifier[xp] , identifier[pOrder] ( identifier[xp] ), literal[string] )
identifier[title] = identifier[curve] [ literal[int] ]
identifier[box] = identifier[ax] . identifier[get_position] ()
identifier[ax] . identifier[set_position] ([ identifier[box] . identifier[x0] , identifier[box] . identifier[y0] , identifier[box] . identifier[width] * literal[int] , identifier[box] . identifier[height] ])
identifier[ax] . identifier[legend] ( identifier[loc] = literal[string] , identifier[bbox_to_anchor] =( literal[int] , literal[int] ), identifier[prop] ={ literal[string] : literal[int] })
identifier[ax] . identifier[titlesize] = literal[string]
identifier[ax] . identifier[labelsize] = literal[string]
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( identifier[title] , identifier[fontsize] = literal[string] ,
identifier[verticalalignment] = literal[string] , identifier[linespacing] = literal[int] )
identifier[ax] . identifier[invert_yaxis] ()
identifier[fileName] = identifier[pathToOutputDirectory] + identifier[title] . identifier[replace] ( literal[string] , literal[string] )+ literal[string]
identifier[plt] . identifier[savefig] ( identifier[fileName] )
identifier[plt] . identifier[clf] ()
keyword[return] identifier[curveDict] | def plotLightCurves(log, lightCurves, polyOrder, pathToOutputDirectory):
"""
*plot lightcurve(s) given an list of magnitude, time pairs*
**Key Arguments:**
- ``log`` -- logger
- ``lightCurves`` -- list of magnitude, time numPy arrays
- ``polyOrder`` -- order of polynomial used to fit the model lightcurves extracted from spectra
- ``pathToOutputDirectory`` -- path to the output directory
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import matplotlib.pyplot as plt
import numpy as np
## LOCAL APPLICATION ##
resultsDict = {}
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
ax = plt.subplot(111)
curveDict = {}
for curve in lightCurves:
x = curve[1]
y = curve[0]
# CAUSE LIGHTCURVE GENERATION TO FAIL IF LESS THAN 5 POINTS EXTRACTED
# FROM THE SPECTRA
if len(x) <= 4:
curveDict['poly'] = None
continue # depends on [control=['if'], data=[]]
order = polyOrder
poly = np.polyfit(x, y, order)
curveDict['poly'] = poly
pOrder = np.poly1d(poly)
polyString = 'mxxx[i] = '
polyStringMd = '\\\\(mag = '
for i in range(0, order + 1):
if i > 0 and poly[i] > 0:
polyString += '+'
polyStringMd += '+' # depends on [control=['if'], data=[]]
polyString += '%s*pow(i,%s) ' % (poly[i], order - i)
polyStringMd += '%s*time^{%s} ' % (poly[i], order - i) # depends on [control=['for'], data=['i']]
polyStringMd += '\\\\)'
xp = np.arange(int(min(x)), int(max(x)), 0.2)
ax.plot(x, y, '.', xp, pOrder(xp), '--') # depends on [control=['for'], data=['curve']]
title = curve[5]
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': 8})
ax.titlesize = 'medium' # fontsize of the axes title
ax.labelsize = 'medium' # fontsize of the x any y labels
plt.xlabel('Days Relative to Peak')
plt.ylabel('Magnitude')
plt.title(title, fontsize='small', verticalalignment='bottom', linespacing=0.2)
ax.invert_yaxis()
fileName = pathToOutputDirectory + title.replace(' ', '_') + '.png'
# mdPlotLink = """![%s_plot]\n\n[%s_plot]: %s\n\n""" % (title.replace(" ", "_"), title.replace(" ", "_"), fileName,)
# curveDict['mdLink'] = mdPlotLink
plt.savefig(fileName)
plt.clf() # clear figure
return curveDict |
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params) | def function[get_parse, parameter[self, uri, params]]:
constant[Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
]
return[call[name[self]._request_parse, parameter[name[self].get, name[uri], name[params]]]] | keyword[def] identifier[get_parse] ( identifier[self] , identifier[uri] , identifier[params] ={}):
literal[string]
keyword[return] identifier[self] . identifier[_request_parse] ( identifier[self] . identifier[get] , identifier[uri] , identifier[params] ) | def get_parse(self, uri, params={}):
"""Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
"""
return self._request_parse(self.get, uri, params) |
def decode_conjure_union_type(cls, obj, conjure_type):
"""Decodes json into a conjure union type.
Args:
obj: the json object to decode
conjure_type: a class object which is the union type
we're decoding into
Returns:
An instance of type conjure_type.
"""
type_of_union = obj["type"] # type: str
for attr, conjure_field in conjure_type._options().items():
if conjure_field.identifier == type_of_union:
attribute = attr
conjure_field_definition = conjure_field
break
else:
raise ValueError(
"unknown union type {0} for {1}".format(
type_of_union, conjure_type
)
)
deserialized = {} # type: Dict[str, Any]
if type_of_union not in obj or obj[type_of_union] is None:
cls.check_null_field(obj, deserialized, conjure_field_definition)
else:
value = obj[type_of_union]
field_type = conjure_field_definition.field_type
deserialized[attribute] = cls.do_decode(value, field_type)
return conjure_type(**deserialized) | def function[decode_conjure_union_type, parameter[cls, obj, conjure_type]]:
constant[Decodes json into a conjure union type.
Args:
obj: the json object to decode
conjure_type: a class object which is the union type
we're decoding into
Returns:
An instance of type conjure_type.
]
variable[type_of_union] assign[=] call[name[obj]][constant[type]]
for taget[tuple[[<ast.Name object at 0x7da1b0e15d50>, <ast.Name object at 0x7da1b0e14a60>]]] in starred[call[call[name[conjure_type]._options, parameter[]].items, parameter[]]] begin[:]
if compare[name[conjure_field].identifier equal[==] name[type_of_union]] begin[:]
variable[attribute] assign[=] name[attr]
variable[conjure_field_definition] assign[=] name[conjure_field]
break
variable[deserialized] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b0c40220> begin[:]
call[name[cls].check_null_field, parameter[name[obj], name[deserialized], name[conjure_field_definition]]]
return[call[name[conjure_type], parameter[]]] | keyword[def] identifier[decode_conjure_union_type] ( identifier[cls] , identifier[obj] , identifier[conjure_type] ):
literal[string]
identifier[type_of_union] = identifier[obj] [ literal[string] ]
keyword[for] identifier[attr] , identifier[conjure_field] keyword[in] identifier[conjure_type] . identifier[_options] (). identifier[items] ():
keyword[if] identifier[conjure_field] . identifier[identifier] == identifier[type_of_union] :
identifier[attribute] = identifier[attr]
identifier[conjure_field_definition] = identifier[conjure_field]
keyword[break]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] (
identifier[type_of_union] , identifier[conjure_type]
)
)
identifier[deserialized] ={}
keyword[if] identifier[type_of_union] keyword[not] keyword[in] identifier[obj] keyword[or] identifier[obj] [ identifier[type_of_union] ] keyword[is] keyword[None] :
identifier[cls] . identifier[check_null_field] ( identifier[obj] , identifier[deserialized] , identifier[conjure_field_definition] )
keyword[else] :
identifier[value] = identifier[obj] [ identifier[type_of_union] ]
identifier[field_type] = identifier[conjure_field_definition] . identifier[field_type]
identifier[deserialized] [ identifier[attribute] ]= identifier[cls] . identifier[do_decode] ( identifier[value] , identifier[field_type] )
keyword[return] identifier[conjure_type] (** identifier[deserialized] ) | def decode_conjure_union_type(cls, obj, conjure_type):
"""Decodes json into a conjure union type.
Args:
obj: the json object to decode
conjure_type: a class object which is the union type
we're decoding into
Returns:
An instance of type conjure_type.
"""
type_of_union = obj['type'] # type: str
for (attr, conjure_field) in conjure_type._options().items():
if conjure_field.identifier == type_of_union:
attribute = attr
conjure_field_definition = conjure_field
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
raise ValueError('unknown union type {0} for {1}'.format(type_of_union, conjure_type))
deserialized = {} # type: Dict[str, Any]
if type_of_union not in obj or obj[type_of_union] is None:
cls.check_null_field(obj, deserialized, conjure_field_definition) # depends on [control=['if'], data=[]]
else:
value = obj[type_of_union]
field_type = conjure_field_definition.field_type
deserialized[attribute] = cls.do_decode(value, field_type)
return conjure_type(**deserialized) |
def obfn_g0var(self):
"""Variable to be evaluated in computing
:meth:`.ADMMTwoBlockCnstrnt.obfn_g0`, depending on the ``AuxVarObj``
option value.
"""
return self.var_y0() if self.opt['AuxVarObj'] else \
self.cnst_A0(None, self.Xf) - self.cnst_c0() | def function[obfn_g0var, parameter[self]]:
constant[Variable to be evaluated in computing
:meth:`.ADMMTwoBlockCnstrnt.obfn_g0`, depending on the ``AuxVarObj``
option value.
]
return[<ast.IfExp object at 0x7da1b06eae30>] | keyword[def] identifier[obfn_g0var] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[var_y0] () keyword[if] identifier[self] . identifier[opt] [ literal[string] ] keyword[else] identifier[self] . identifier[cnst_A0] ( keyword[None] , identifier[self] . identifier[Xf] )- identifier[self] . identifier[cnst_c0] () | def obfn_g0var(self):
"""Variable to be evaluated in computing
:meth:`.ADMMTwoBlockCnstrnt.obfn_g0`, depending on the ``AuxVarObj``
option value.
"""
return self.var_y0() if self.opt['AuxVarObj'] else self.cnst_A0(None, self.Xf) - self.cnst_c0() |
def guard(func):
""" Prevents the decorated function from parallel execution.
Internally, this decorator creates a Lock object and transparently
obtains/releases it when calling the function.
"""
semaphore = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
semaphore.acquire()
try:
return func(*args, **kwargs)
finally:
semaphore.release()
return wrapper | def function[guard, parameter[func]]:
constant[ Prevents the decorated function from parallel execution.
Internally, this decorator creates a Lock object and transparently
obtains/releases it when calling the function.
]
variable[semaphore] assign[=] call[name[threading].Lock, parameter[]]
def function[wrapper, parameter[]]:
call[name[semaphore].acquire, parameter[]]
<ast.Try object at 0x7da1b27f6e30>
return[name[wrapper]] | keyword[def] identifier[guard] ( identifier[func] ):
literal[string]
identifier[semaphore] = identifier[threading] . identifier[Lock] ()
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[semaphore] . identifier[acquire] ()
keyword[try] :
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[finally] :
identifier[semaphore] . identifier[release] ()
keyword[return] identifier[wrapper] | def guard(func):
""" Prevents the decorated function from parallel execution.
Internally, this decorator creates a Lock object and transparently
obtains/releases it when calling the function.
"""
semaphore = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
semaphore.acquire()
try:
return func(*args, **kwargs) # depends on [control=['try'], data=[]]
finally:
semaphore.release()
return wrapper |
def cmd_gethome(self, args):
'''get home position'''
self.master.mav.command_long_send(self.settings.target_system,
0,
mavutil.mavlink.MAV_CMD_GET_HOME_POSITION,
0, 0, 0, 0, 0, 0, 0, 0) | def function[cmd_gethome, parameter[self, args]]:
constant[get home position]
call[name[self].master.mav.command_long_send, parameter[name[self].settings.target_system, constant[0], name[mavutil].mavlink.MAV_CMD_GET_HOME_POSITION, constant[0], constant[0], constant[0], constant[0], constant[0], constant[0], constant[0], constant[0]]] | keyword[def] identifier[cmd_gethome] ( identifier[self] , identifier[args] ):
literal[string]
identifier[self] . identifier[master] . identifier[mav] . identifier[command_long_send] ( identifier[self] . identifier[settings] . identifier[target_system] ,
literal[int] ,
identifier[mavutil] . identifier[mavlink] . identifier[MAV_CMD_GET_HOME_POSITION] ,
literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ) | def cmd_gethome(self, args):
"""get home position"""
self.master.mav.command_long_send(self.settings.target_system, 0, mavutil.mavlink.MAV_CMD_GET_HOME_POSITION, 0, 0, 0, 0, 0, 0, 0, 0) |
def put_file(client, source_file, destination_file):
"""
Copy file to instance using Paramiko client connection.
"""
try:
sftp_client = client.open_sftp()
sftp_client.put(source_file, destination_file)
except Exception as error:
raise IpaUtilsException(
'Error copying file to instance: {0}.'.format(error)
)
finally:
with ignored(Exception):
sftp_client.close() | def function[put_file, parameter[client, source_file, destination_file]]:
constant[
Copy file to instance using Paramiko client connection.
]
<ast.Try object at 0x7da1b1adb280> | keyword[def] identifier[put_file] ( identifier[client] , identifier[source_file] , identifier[destination_file] ):
literal[string]
keyword[try] :
identifier[sftp_client] = identifier[client] . identifier[open_sftp] ()
identifier[sftp_client] . identifier[put] ( identifier[source_file] , identifier[destination_file] )
keyword[except] identifier[Exception] keyword[as] identifier[error] :
keyword[raise] identifier[IpaUtilsException] (
literal[string] . identifier[format] ( identifier[error] )
)
keyword[finally] :
keyword[with] identifier[ignored] ( identifier[Exception] ):
identifier[sftp_client] . identifier[close] () | def put_file(client, source_file, destination_file):
"""
Copy file to instance using Paramiko client connection.
"""
try:
sftp_client = client.open_sftp()
sftp_client.put(source_file, destination_file) # depends on [control=['try'], data=[]]
except Exception as error:
raise IpaUtilsException('Error copying file to instance: {0}.'.format(error)) # depends on [control=['except'], data=['error']]
finally:
with ignored(Exception):
sftp_client.close() # depends on [control=['with'], data=[]] |
def register(self, module):
"""
Function registers into self.commands from module.
Args
----
module (module): The module name.
"""
if module is not None:
cmds = self.retrieve_commands(module)
for c in cmds:
if self.valid_name(c.name):
cmd = c(self.subparsers)
self.commands.append(cmd)
else:
print(colored("Warning: Command %s has empty name. It won't be registered"
% c, 'yellow')) | def function[register, parameter[self, module]]:
constant[
Function registers into self.commands from module.
Args
----
module (module): The module name.
]
if compare[name[module] is_not constant[None]] begin[:]
variable[cmds] assign[=] call[name[self].retrieve_commands, parameter[name[module]]]
for taget[name[c]] in starred[name[cmds]] begin[:]
if call[name[self].valid_name, parameter[name[c].name]] begin[:]
variable[cmd] assign[=] call[name[c], parameter[name[self].subparsers]]
call[name[self].commands.append, parameter[name[cmd]]] | keyword[def] identifier[register] ( identifier[self] , identifier[module] ):
literal[string]
keyword[if] identifier[module] keyword[is] keyword[not] keyword[None] :
identifier[cmds] = identifier[self] . identifier[retrieve_commands] ( identifier[module] )
keyword[for] identifier[c] keyword[in] identifier[cmds] :
keyword[if] identifier[self] . identifier[valid_name] ( identifier[c] . identifier[name] ):
identifier[cmd] = identifier[c] ( identifier[self] . identifier[subparsers] )
identifier[self] . identifier[commands] . identifier[append] ( identifier[cmd] )
keyword[else] :
identifier[print] ( identifier[colored] ( literal[string]
% identifier[c] , literal[string] )) | def register(self, module):
"""
Function registers into self.commands from module.
Args
----
module (module): The module name.
"""
if module is not None:
cmds = self.retrieve_commands(module)
for c in cmds:
if self.valid_name(c.name):
cmd = c(self.subparsers)
self.commands.append(cmd) # depends on [control=['if'], data=[]]
else:
print(colored("Warning: Command %s has empty name. It won't be registered" % c, 'yellow')) # depends on [control=['for'], data=['c']] # depends on [control=['if'], data=['module']] |
def smallest_flagged(heap, row):
"""Search the heap for the smallest element that is
still flagged.
Parameters
----------
heap: array of shape (3, n_samples, n_neighbors)
The heaps to search
row: int
Which of the heaps to search
Returns
-------
index: int
The index of the smallest flagged element
of the ``row``th heap, or -1 if no flagged
elements remain in the heap.
"""
ind = heap[0, row]
dist = heap[1, row]
flag = heap[2, row]
min_dist = np.inf
result_index = -1
for i in range(ind.shape[0]):
if flag[i] == 1 and dist[i] < min_dist:
min_dist = dist[i]
result_index = i
if result_index >= 0:
flag[result_index] = 0.0
return int(ind[result_index])
else:
return -1 | def function[smallest_flagged, parameter[heap, row]]:
constant[Search the heap for the smallest element that is
still flagged.
Parameters
----------
heap: array of shape (3, n_samples, n_neighbors)
The heaps to search
row: int
Which of the heaps to search
Returns
-------
index: int
The index of the smallest flagged element
of the ``row``th heap, or -1 if no flagged
elements remain in the heap.
]
variable[ind] assign[=] call[name[heap]][tuple[[<ast.Constant object at 0x7da204623fa0>, <ast.Name object at 0x7da2046205e0>]]]
variable[dist] assign[=] call[name[heap]][tuple[[<ast.Constant object at 0x7da2046231f0>, <ast.Name object at 0x7da204620100>]]]
variable[flag] assign[=] call[name[heap]][tuple[[<ast.Constant object at 0x7da204620040>, <ast.Name object at 0x7da204623040>]]]
variable[min_dist] assign[=] name[np].inf
variable[result_index] assign[=] <ast.UnaryOp object at 0x7da204621750>
for taget[name[i]] in starred[call[name[range], parameter[call[name[ind].shape][constant[0]]]]] begin[:]
if <ast.BoolOp object at 0x7da2046209a0> begin[:]
variable[min_dist] assign[=] call[name[dist]][name[i]]
variable[result_index] assign[=] name[i]
if compare[name[result_index] greater_or_equal[>=] constant[0]] begin[:]
call[name[flag]][name[result_index]] assign[=] constant[0.0]
return[call[name[int], parameter[call[name[ind]][name[result_index]]]]] | keyword[def] identifier[smallest_flagged] ( identifier[heap] , identifier[row] ):
literal[string]
identifier[ind] = identifier[heap] [ literal[int] , identifier[row] ]
identifier[dist] = identifier[heap] [ literal[int] , identifier[row] ]
identifier[flag] = identifier[heap] [ literal[int] , identifier[row] ]
identifier[min_dist] = identifier[np] . identifier[inf]
identifier[result_index] =- literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ind] . identifier[shape] [ literal[int] ]):
keyword[if] identifier[flag] [ identifier[i] ]== literal[int] keyword[and] identifier[dist] [ identifier[i] ]< identifier[min_dist] :
identifier[min_dist] = identifier[dist] [ identifier[i] ]
identifier[result_index] = identifier[i]
keyword[if] identifier[result_index] >= literal[int] :
identifier[flag] [ identifier[result_index] ]= literal[int]
keyword[return] identifier[int] ( identifier[ind] [ identifier[result_index] ])
keyword[else] :
keyword[return] - literal[int] | def smallest_flagged(heap, row):
"""Search the heap for the smallest element that is
still flagged.
Parameters
----------
heap: array of shape (3, n_samples, n_neighbors)
The heaps to search
row: int
Which of the heaps to search
Returns
-------
index: int
The index of the smallest flagged element
of the ``row``th heap, or -1 if no flagged
elements remain in the heap.
"""
ind = heap[0, row]
dist = heap[1, row]
flag = heap[2, row]
min_dist = np.inf
result_index = -1
for i in range(ind.shape[0]):
if flag[i] == 1 and dist[i] < min_dist:
min_dist = dist[i]
result_index = i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if result_index >= 0:
flag[result_index] = 0.0
return int(ind[result_index]) # depends on [control=['if'], data=['result_index']]
else:
return -1 |
def _diff_group_position(group):
"""Generate a unified diff position line for a diff group"""
old_start = group[0][0]
new_start = group[0][1]
old_length = new_length = 0
for old_line, new_line, line_or_conflict in group:
if isinstance(line_or_conflict, tuple):
old, new = line_or_conflict
old_length += len(old)
new_length += len(new)
else:
old_length += 1
new_length += 1
if old_length:
old_start += 1
if new_length:
new_start += 1
return color.LineNumber('@@ -%s,%s +%s,%s @@' % (old_start, old_length, new_start, new_length)) | def function[_diff_group_position, parameter[group]]:
constant[Generate a unified diff position line for a diff group]
variable[old_start] assign[=] call[call[name[group]][constant[0]]][constant[0]]
variable[new_start] assign[=] call[call[name[group]][constant[0]]][constant[1]]
variable[old_length] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b08042e0>, <ast.Name object at 0x7da1b0805090>, <ast.Name object at 0x7da1b0804d60>]]] in starred[name[group]] begin[:]
if call[name[isinstance], parameter[name[line_or_conflict], name[tuple]]] begin[:]
<ast.Tuple object at 0x7da1b0804100> assign[=] name[line_or_conflict]
<ast.AugAssign object at 0x7da1b08048b0>
<ast.AugAssign object at 0x7da1b0804cd0>
if name[old_length] begin[:]
<ast.AugAssign object at 0x7da1b08040a0>
if name[new_length] begin[:]
<ast.AugAssign object at 0x7da1b08040d0>
return[call[name[color].LineNumber, parameter[binary_operation[constant[@@ -%s,%s +%s,%s @@] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b08049a0>, <ast.Name object at 0x7da1b08044c0>, <ast.Name object at 0x7da1b0804ee0>, <ast.Name object at 0x7da1b0804c40>]]]]]] | keyword[def] identifier[_diff_group_position] ( identifier[group] ):
literal[string]
identifier[old_start] = identifier[group] [ literal[int] ][ literal[int] ]
identifier[new_start] = identifier[group] [ literal[int] ][ literal[int] ]
identifier[old_length] = identifier[new_length] = literal[int]
keyword[for] identifier[old_line] , identifier[new_line] , identifier[line_or_conflict] keyword[in] identifier[group] :
keyword[if] identifier[isinstance] ( identifier[line_or_conflict] , identifier[tuple] ):
identifier[old] , identifier[new] = identifier[line_or_conflict]
identifier[old_length] += identifier[len] ( identifier[old] )
identifier[new_length] += identifier[len] ( identifier[new] )
keyword[else] :
identifier[old_length] += literal[int]
identifier[new_length] += literal[int]
keyword[if] identifier[old_length] :
identifier[old_start] += literal[int]
keyword[if] identifier[new_length] :
identifier[new_start] += literal[int]
keyword[return] identifier[color] . identifier[LineNumber] ( literal[string] %( identifier[old_start] , identifier[old_length] , identifier[new_start] , identifier[new_length] )) | def _diff_group_position(group):
"""Generate a unified diff position line for a diff group"""
old_start = group[0][0]
new_start = group[0][1]
old_length = new_length = 0
for (old_line, new_line, line_or_conflict) in group:
if isinstance(line_or_conflict, tuple):
(old, new) = line_or_conflict
old_length += len(old)
new_length += len(new) # depends on [control=['if'], data=[]]
else:
old_length += 1
new_length += 1 # depends on [control=['for'], data=[]]
if old_length:
old_start += 1 # depends on [control=['if'], data=[]]
if new_length:
new_start += 1 # depends on [control=['if'], data=[]]
return color.LineNumber('@@ -%s,%s +%s,%s @@' % (old_start, old_length, new_start, new_length)) |
def iter_multichunks(iterable, chunksizes, bordermode=None):
"""
CommandLine:
python -m utool.util_iter --test-iter_multichunks
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> iterable = list(range(20))
>>> chunksizes = (3, 2, 3)
>>> bordermode = 'cycle'
>>> genresult = iter_multichunks(iterable, chunksizes, bordermode)
>>> multichunks = list(genresult)
>>> depthprofile = ut.depth_profile(multichunks)
>>> assert depthprofile[1:] == chunksizes, 'did not generate chunks correctly'
>>> result = ut.repr4(list(map(str, multichunks)), nobr=True)
>>> print(result)
'[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]]]',
'[[[18, 19, 0], [1, 2, 3]], [[4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15]]]',
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> iterable = list(range(7))
>>> # when chunksizes is len == 1, then equlivalent to ichunks
>>> chunksizes = (3,)
>>> bordermode = 'cycle'
>>> genresult = iter_multichunks(iterable, chunksizes, bordermode)
>>> multichunks = list(genresult)
>>> depthprofile = ut.depth_profile(multichunks)
>>> assert depthprofile[1:] == chunksizes, 'did not generate chunks correctly'
>>> result = str(multichunks)
>>> print(result)
[[0, 1, 2], [3, 4, 5], [6, 0, 1]]
"""
chunksize = reduce(operator.mul, chunksizes)
for chunk in ichunks(iterable, chunksize, bordermode=bordermode):
reshaped_chunk = chunk
for d in chunksizes[1:][::-1]:
reshaped_chunk = list(ichunks(reshaped_chunk, d))
yield reshaped_chunk | def function[iter_multichunks, parameter[iterable, chunksizes, bordermode]]:
constant[
CommandLine:
python -m utool.util_iter --test-iter_multichunks
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> iterable = list(range(20))
>>> chunksizes = (3, 2, 3)
>>> bordermode = 'cycle'
>>> genresult = iter_multichunks(iterable, chunksizes, bordermode)
>>> multichunks = list(genresult)
>>> depthprofile = ut.depth_profile(multichunks)
>>> assert depthprofile[1:] == chunksizes, 'did not generate chunks correctly'
>>> result = ut.repr4(list(map(str, multichunks)), nobr=True)
>>> print(result)
'[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]]]',
'[[[18, 19, 0], [1, 2, 3]], [[4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15]]]',
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> iterable = list(range(7))
>>> # when chunksizes is len == 1, then equlivalent to ichunks
>>> chunksizes = (3,)
>>> bordermode = 'cycle'
>>> genresult = iter_multichunks(iterable, chunksizes, bordermode)
>>> multichunks = list(genresult)
>>> depthprofile = ut.depth_profile(multichunks)
>>> assert depthprofile[1:] == chunksizes, 'did not generate chunks correctly'
>>> result = str(multichunks)
>>> print(result)
[[0, 1, 2], [3, 4, 5], [6, 0, 1]]
]
variable[chunksize] assign[=] call[name[reduce], parameter[name[operator].mul, name[chunksizes]]]
for taget[name[chunk]] in starred[call[name[ichunks], parameter[name[iterable], name[chunksize]]]] begin[:]
variable[reshaped_chunk] assign[=] name[chunk]
for taget[name[d]] in starred[call[call[name[chunksizes]][<ast.Slice object at 0x7da1b253a290>]][<ast.Slice object at 0x7da1b2538040>]] begin[:]
variable[reshaped_chunk] assign[=] call[name[list], parameter[call[name[ichunks], parameter[name[reshaped_chunk], name[d]]]]]
<ast.Yield object at 0x7da1b24eac80> | keyword[def] identifier[iter_multichunks] ( identifier[iterable] , identifier[chunksizes] , identifier[bordermode] = keyword[None] ):
literal[string]
identifier[chunksize] = identifier[reduce] ( identifier[operator] . identifier[mul] , identifier[chunksizes] )
keyword[for] identifier[chunk] keyword[in] identifier[ichunks] ( identifier[iterable] , identifier[chunksize] , identifier[bordermode] = identifier[bordermode] ):
identifier[reshaped_chunk] = identifier[chunk]
keyword[for] identifier[d] keyword[in] identifier[chunksizes] [ literal[int] :][::- literal[int] ]:
identifier[reshaped_chunk] = identifier[list] ( identifier[ichunks] ( identifier[reshaped_chunk] , identifier[d] ))
keyword[yield] identifier[reshaped_chunk] | def iter_multichunks(iterable, chunksizes, bordermode=None):
"""
CommandLine:
python -m utool.util_iter --test-iter_multichunks
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> iterable = list(range(20))
>>> chunksizes = (3, 2, 3)
>>> bordermode = 'cycle'
>>> genresult = iter_multichunks(iterable, chunksizes, bordermode)
>>> multichunks = list(genresult)
>>> depthprofile = ut.depth_profile(multichunks)
>>> assert depthprofile[1:] == chunksizes, 'did not generate chunks correctly'
>>> result = ut.repr4(list(map(str, multichunks)), nobr=True)
>>> print(result)
'[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]]]',
'[[[18, 19, 0], [1, 2, 3]], [[4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15]]]',
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> iterable = list(range(7))
>>> # when chunksizes is len == 1, then equlivalent to ichunks
>>> chunksizes = (3,)
>>> bordermode = 'cycle'
>>> genresult = iter_multichunks(iterable, chunksizes, bordermode)
>>> multichunks = list(genresult)
>>> depthprofile = ut.depth_profile(multichunks)
>>> assert depthprofile[1:] == chunksizes, 'did not generate chunks correctly'
>>> result = str(multichunks)
>>> print(result)
[[0, 1, 2], [3, 4, 5], [6, 0, 1]]
"""
chunksize = reduce(operator.mul, chunksizes)
for chunk in ichunks(iterable, chunksize, bordermode=bordermode):
reshaped_chunk = chunk
for d in chunksizes[1:][::-1]:
reshaped_chunk = list(ichunks(reshaped_chunk, d)) # depends on [control=['for'], data=['d']]
yield reshaped_chunk # depends on [control=['for'], data=['chunk']] |
def from_ZNM(cls, Z, N, M, name=''):
"""
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
"""
df = pd.DataFrame.from_dict({'Z': Z, 'N': N, 'M': M}).set_index(['Z', 'N'])['M']
df.name = name
return cls(df=df, name=name) | def function[from_ZNM, parameter[cls, Z, N, M, name]]:
constant[
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
]
variable[df] assign[=] call[call[call[name[pd].DataFrame.from_dict, parameter[dictionary[[<ast.Constant object at 0x7da1b021cd30>, <ast.Constant object at 0x7da1b021c550>, <ast.Constant object at 0x7da1b021cc10>], [<ast.Name object at 0x7da18f00c100>, <ast.Name object at 0x7da18f00caf0>, <ast.Name object at 0x7da2054a7bb0>]]]].set_index, parameter[list[[<ast.Constant object at 0x7da2054a59f0>, <ast.Constant object at 0x7da2054a6320>]]]]][constant[M]]
name[df].name assign[=] name[name]
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_ZNM] ( identifier[cls] , identifier[Z] , identifier[N] , identifier[M] , identifier[name] = literal[string] ):
literal[string]
identifier[df] = identifier[pd] . identifier[DataFrame] . identifier[from_dict] ({ literal[string] : identifier[Z] , literal[string] : identifier[N] , literal[string] : identifier[M] }). identifier[set_index] ([ literal[string] , literal[string] ])[ literal[string] ]
identifier[df] . identifier[name] = identifier[name]
keyword[return] identifier[cls] ( identifier[df] = identifier[df] , identifier[name] = identifier[name] ) | def from_ZNM(cls, Z, N, M, name=''):
"""
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
"""
df = pd.DataFrame.from_dict({'Z': Z, 'N': N, 'M': M}).set_index(['Z', 'N'])['M']
df.name = name
return cls(df=df, name=name) |
def usages_list(location, **kwargs):
'''
.. versionadded:: 2019.2.0
List subscription network usage for a location.
:param location: The Azure location to query for network usage.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.usages_list westus
'''
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
result = __utils__['azurearm.paged_object_to_list'](netconn.usages.list(location))
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | def function[usages_list, parameter[location]]:
constant[
.. versionadded:: 2019.2.0
List subscription network usage for a location.
:param location: The Azure location to query for network usage.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.usages_list westus
]
variable[netconn] assign[=] call[call[name[__utils__]][constant[azurearm.get_client]], parameter[constant[network]]]
<ast.Try object at 0x7da204962ef0>
return[name[result]] | keyword[def] identifier[usages_list] ( identifier[location] ,** identifier[kwargs] ):
literal[string]
identifier[netconn] = identifier[__utils__] [ literal[string] ]( literal[string] ,** identifier[kwargs] )
keyword[try] :
identifier[result] = identifier[__utils__] [ literal[string] ]( identifier[netconn] . identifier[usages] . identifier[list] ( identifier[location] ))
keyword[except] identifier[CloudError] keyword[as] identifier[exc] :
identifier[__utils__] [ literal[string] ]( literal[string] , identifier[str] ( identifier[exc] ),** identifier[kwargs] )
identifier[result] ={ literal[string] : identifier[str] ( identifier[exc] )}
keyword[return] identifier[result] | def usages_list(location, **kwargs):
"""
.. versionadded:: 2019.2.0
List subscription network usage for a location.
:param location: The Azure location to query for network usage.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.usages_list westus
"""
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
result = __utils__['azurearm.paged_object_to_list'](netconn.usages.list(location)) # depends on [control=['try'], data=[]]
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)} # depends on [control=['except'], data=['exc']]
return result |
def check_repo_exists(deploy_repo, service='github', *, auth=None,
headers=None, ask=False):
"""
Checks that the repository exists on GitHub.
This should be done before attempting generate a key to deploy to that
repo.
Raises ``RuntimeError`` if the repo is not valid.
Returns a dictionary with the following keys:
- 'private': Indicates whether or not the repo requires authorization to
access. Private repos require authorization.
- 'service': For service='travis', is 'travis-ci.com' or 'travis-ci.org',
depending on which should be used. Otherwise it is just equal to ``service``.
For service='travis', if ask=True, it will ask at the command line if both
travis-ci.org and travis-ci.com exist. If ask=False, service='travis' will
check travis-ci.com first and only check travis-ci.org if it doesn't
exist. ask=True does nothing for service='github',
service='travis-ci.com', service='travis-ci.org'.
"""
headers = headers or {}
if deploy_repo.count("/") != 1:
raise RuntimeError('"{deploy_repo}" should be in the form username/repo'.format(deploy_repo=deploy_repo))
user, repo = deploy_repo.split('/')
if service == 'github':
REPO_URL = 'https://api.github.com/repos/{user}/{repo}'
elif service == 'travis' or service == 'travis-ci.com':
REPO_URL = 'https://api.travis-ci.com/repo/{user}%2F{repo}'
headers = {**headers, **Travis_APIv3}
elif service == 'travis-ci.org':
REPO_URL = 'https://api.travis-ci.org/repo/{user}%2F{repo}'
headers = {**headers, **Travis_APIv3}
else:
raise RuntimeError('Invalid service specified for repo check (should be one of {"github", "travis", "travis-ci.com", "travis-ci.org"}')
wiki = False
if repo.endswith('.wiki') and service == 'github':
wiki = True
repo = repo[:-5]
def _try(url):
r = requests.get(url, auth=auth, headers=headers)
if r.status_code in [requests.codes.not_found, requests.codes.forbidden]:
return False
if service == 'github':
GitHub_raise_for_status(r)
else:
r.raise_for_status()
return r
r = _try(REPO_URL.format(user=urllib.parse.quote(user),
repo=urllib.parse.quote(repo)))
r_active = r and (service == 'github' or r.json().get('active', False))
if service == 'travis':
REPO_URL = 'https://api.travis-ci.org/repo/{user}%2F{repo}'
r_org = _try(REPO_URL.format(user=urllib.parse.quote(user),
repo=urllib.parse.quote(repo)))
r_org_active = r_org and r_org.json().get('active', False)
if not r_active:
if not r_org_active:
raise RuntimeError('"{user}/{repo}" not found on travis-ci.org or travis-ci.com'.format(user=user, repo=repo))
r = r_org
r_active = r_org_active
service = 'travis-ci.org'
else:
if r_active and r_org_active:
if ask:
while True:
print(green("{user}/{repo} appears to exist on both travis-ci.org and travis-ci.com.".format(user=user, repo=repo)))
preferred = input("Which do you want to use? [{default}/travis-ci.org] ".format(default=blue("travis-ci.com")))
preferred = preferred.lower().strip()
if preferred in ['o', 'org', '.org', 'travis-ci.org']:
r = r_org
service = 'travis-ci.org'
break
elif preferred in ['c', 'com', '.com', 'travis-ci.com', '']:
service = 'travis-ci.com'
break
else:
print(red("Please type 'travis-ci.com' or 'travis-ci.org'."))
else:
service = 'travis-ci.com'
else:
# .com but not .org.
service = 'travis-ci.com'
if not r_active:
msg = '' if auth else '. If the repo is private, then you need to authenticate.'
raise RuntimeError('"{user}/{repo}" not found on {service}{msg}'.format(user=user,
repo=repo,
service=service,
msg=msg))
private = r.json().get('private', False)
if wiki and not private:
# private wiki needs authentication, so skip check for existence
p = subprocess.run(['git', 'ls-remote', '-h', 'https://github.com/{user}/{repo}.wiki'.format(
user=user, repo=repo)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
if p.stderr or p.returncode:
raise RuntimeError('Wiki not found. Please create a wiki')
return {
'private': private,
'service': service,
} | def function[check_repo_exists, parameter[deploy_repo, service]]:
constant[
Checks that the repository exists on GitHub.
This should be done before attempting generate a key to deploy to that
repo.
Raises ``RuntimeError`` if the repo is not valid.
Returns a dictionary with the following keys:
- 'private': Indicates whether or not the repo requires authorization to
access. Private repos require authorization.
- 'service': For service='travis', is 'travis-ci.com' or 'travis-ci.org',
depending on which should be used. Otherwise it is just equal to ``service``.
For service='travis', if ask=True, it will ask at the command line if both
travis-ci.org and travis-ci.com exist. If ask=False, service='travis' will
check travis-ci.com first and only check travis-ci.org if it doesn't
exist. ask=True does nothing for service='github',
service='travis-ci.com', service='travis-ci.org'.
]
variable[headers] assign[=] <ast.BoolOp object at 0x7da1b100cdf0>
if compare[call[name[deploy_repo].count, parameter[constant[/]]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da1b100e5c0>
<ast.Tuple object at 0x7da1b100f2b0> assign[=] call[name[deploy_repo].split, parameter[constant[/]]]
if compare[name[service] equal[==] constant[github]] begin[:]
variable[REPO_URL] assign[=] constant[https://api.github.com/repos/{user}/{repo}]
variable[wiki] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b100dfc0> begin[:]
variable[wiki] assign[=] constant[True]
variable[repo] assign[=] call[name[repo]][<ast.Slice object at 0x7da1b100f4f0>]
def function[_try, parameter[url]]:
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
if compare[name[r].status_code in list[[<ast.Attribute object at 0x7da1b100ed40>, <ast.Attribute object at 0x7da1b100d720>]]] begin[:]
return[constant[False]]
if compare[name[service] equal[==] constant[github]] begin[:]
call[name[GitHub_raise_for_status], parameter[name[r]]]
return[name[r]]
variable[r] assign[=] call[name[_try], parameter[call[name[REPO_URL].format, parameter[]]]]
variable[r_active] assign[=] <ast.BoolOp object at 0x7da1b102bca0>
if compare[name[service] equal[==] constant[travis]] begin[:]
variable[REPO_URL] assign[=] constant[https://api.travis-ci.org/repo/{user}%2F{repo}]
variable[r_org] assign[=] call[name[_try], parameter[call[name[REPO_URL].format, parameter[]]]]
variable[r_org_active] assign[=] <ast.BoolOp object at 0x7da1b106a980>
if <ast.UnaryOp object at 0x7da1b106bcd0> begin[:]
if <ast.UnaryOp object at 0x7da1b106b880> begin[:]
<ast.Raise object at 0x7da1b106ae60>
variable[r] assign[=] name[r_org]
variable[r_active] assign[=] name[r_org_active]
variable[service] assign[=] constant[travis-ci.org]
if <ast.UnaryOp object at 0x7da1b1029180> begin[:]
variable[msg] assign[=] <ast.IfExp object at 0x7da1b102b010>
<ast.Raise object at 0x7da1b102b3a0>
variable[private] assign[=] call[call[name[r].json, parameter[]].get, parameter[constant[private], constant[False]]]
if <ast.BoolOp object at 0x7da1b102b880> begin[:]
variable[p] assign[=] call[name[subprocess].run, parameter[list[[<ast.Constant object at 0x7da1b10289a0>, <ast.Constant object at 0x7da1b102b340>, <ast.Constant object at 0x7da1b102b460>, <ast.Call object at 0x7da1b1028160>]]]]
if <ast.BoolOp object at 0x7da1b1028af0> begin[:]
<ast.Raise object at 0x7da1b102bfd0>
return[dictionary[[<ast.Constant object at 0x7da1b102b700>, <ast.Constant object at 0x7da1b102ac80>], [<ast.Name object at 0x7da1b1029660>, <ast.Name object at 0x7da1b1038a30>]]] | keyword[def] identifier[check_repo_exists] ( identifier[deploy_repo] , identifier[service] = literal[string] ,*, identifier[auth] = keyword[None] ,
identifier[headers] = keyword[None] , identifier[ask] = keyword[False] ):
literal[string]
identifier[headers] = identifier[headers] keyword[or] {}
keyword[if] identifier[deploy_repo] . identifier[count] ( literal[string] )!= literal[int] :
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[deploy_repo] = identifier[deploy_repo] ))
identifier[user] , identifier[repo] = identifier[deploy_repo] . identifier[split] ( literal[string] )
keyword[if] identifier[service] == literal[string] :
identifier[REPO_URL] = literal[string]
keyword[elif] identifier[service] == literal[string] keyword[or] identifier[service] == literal[string] :
identifier[REPO_URL] = literal[string]
identifier[headers] ={** identifier[headers] ,** identifier[Travis_APIv3] }
keyword[elif] identifier[service] == literal[string] :
identifier[REPO_URL] = literal[string]
identifier[headers] ={** identifier[headers] ,** identifier[Travis_APIv3] }
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[wiki] = keyword[False]
keyword[if] identifier[repo] . identifier[endswith] ( literal[string] ) keyword[and] identifier[service] == literal[string] :
identifier[wiki] = keyword[True]
identifier[repo] = identifier[repo] [:- literal[int] ]
keyword[def] identifier[_try] ( identifier[url] ):
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[auth] = identifier[auth] , identifier[headers] = identifier[headers] )
keyword[if] identifier[r] . identifier[status_code] keyword[in] [ identifier[requests] . identifier[codes] . identifier[not_found] , identifier[requests] . identifier[codes] . identifier[forbidden] ]:
keyword[return] keyword[False]
keyword[if] identifier[service] == literal[string] :
identifier[GitHub_raise_for_status] ( identifier[r] )
keyword[else] :
identifier[r] . identifier[raise_for_status] ()
keyword[return] identifier[r]
identifier[r] = identifier[_try] ( identifier[REPO_URL] . identifier[format] ( identifier[user] = identifier[urllib] . identifier[parse] . identifier[quote] ( identifier[user] ),
identifier[repo] = identifier[urllib] . identifier[parse] . identifier[quote] ( identifier[repo] )))
identifier[r_active] = identifier[r] keyword[and] ( identifier[service] == literal[string] keyword[or] identifier[r] . identifier[json] (). identifier[get] ( literal[string] , keyword[False] ))
keyword[if] identifier[service] == literal[string] :
identifier[REPO_URL] = literal[string]
identifier[r_org] = identifier[_try] ( identifier[REPO_URL] . identifier[format] ( identifier[user] = identifier[urllib] . identifier[parse] . identifier[quote] ( identifier[user] ),
identifier[repo] = identifier[urllib] . identifier[parse] . identifier[quote] ( identifier[repo] )))
identifier[r_org_active] = identifier[r_org] keyword[and] identifier[r_org] . identifier[json] (). identifier[get] ( literal[string] , keyword[False] )
keyword[if] keyword[not] identifier[r_active] :
keyword[if] keyword[not] identifier[r_org_active] :
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[user] = identifier[user] , identifier[repo] = identifier[repo] ))
identifier[r] = identifier[r_org]
identifier[r_active] = identifier[r_org_active]
identifier[service] = literal[string]
keyword[else] :
keyword[if] identifier[r_active] keyword[and] identifier[r_org_active] :
keyword[if] identifier[ask] :
keyword[while] keyword[True] :
identifier[print] ( identifier[green] ( literal[string] . identifier[format] ( identifier[user] = identifier[user] , identifier[repo] = identifier[repo] )))
identifier[preferred] = identifier[input] ( literal[string] . identifier[format] ( identifier[default] = identifier[blue] ( literal[string] )))
identifier[preferred] = identifier[preferred] . identifier[lower] (). identifier[strip] ()
keyword[if] identifier[preferred] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[r] = identifier[r_org]
identifier[service] = literal[string]
keyword[break]
keyword[elif] identifier[preferred] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[service] = literal[string]
keyword[break]
keyword[else] :
identifier[print] ( identifier[red] ( literal[string] ))
keyword[else] :
identifier[service] = literal[string]
keyword[else] :
identifier[service] = literal[string]
keyword[if] keyword[not] identifier[r_active] :
identifier[msg] = literal[string] keyword[if] identifier[auth] keyword[else] literal[string]
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[user] = identifier[user] ,
identifier[repo] = identifier[repo] ,
identifier[service] = identifier[service] ,
identifier[msg] = identifier[msg] ))
identifier[private] = identifier[r] . identifier[json] (). identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[wiki] keyword[and] keyword[not] identifier[private] :
identifier[p] = identifier[subprocess] . identifier[run] ([ literal[string] , literal[string] , literal[string] , literal[string] . identifier[format] (
identifier[user] = identifier[user] , identifier[repo] = identifier[repo] )], identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] , identifier[check] = keyword[False] )
keyword[if] identifier[p] . identifier[stderr] keyword[or] identifier[p] . identifier[returncode] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] {
literal[string] : identifier[private] ,
literal[string] : identifier[service] ,
} | def check_repo_exists(deploy_repo, service='github', *, auth=None, headers=None, ask=False):
"""
Checks that the repository exists on GitHub.
This should be done before attempting generate a key to deploy to that
repo.
Raises ``RuntimeError`` if the repo is not valid.
Returns a dictionary with the following keys:
- 'private': Indicates whether or not the repo requires authorization to
access. Private repos require authorization.
- 'service': For service='travis', is 'travis-ci.com' or 'travis-ci.org',
depending on which should be used. Otherwise it is just equal to ``service``.
For service='travis', if ask=True, it will ask at the command line if both
travis-ci.org and travis-ci.com exist. If ask=False, service='travis' will
check travis-ci.com first and only check travis-ci.org if it doesn't
exist. ask=True does nothing for service='github',
service='travis-ci.com', service='travis-ci.org'.
"""
headers = headers or {}
if deploy_repo.count('/') != 1:
raise RuntimeError('"{deploy_repo}" should be in the form username/repo'.format(deploy_repo=deploy_repo)) # depends on [control=['if'], data=[]]
(user, repo) = deploy_repo.split('/')
if service == 'github':
REPO_URL = 'https://api.github.com/repos/{user}/{repo}' # depends on [control=['if'], data=[]]
elif service == 'travis' or service == 'travis-ci.com':
REPO_URL = 'https://api.travis-ci.com/repo/{user}%2F{repo}'
headers = {**headers, **Travis_APIv3} # depends on [control=['if'], data=[]]
elif service == 'travis-ci.org':
REPO_URL = 'https://api.travis-ci.org/repo/{user}%2F{repo}'
headers = {**headers, **Travis_APIv3} # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Invalid service specified for repo check (should be one of {"github", "travis", "travis-ci.com", "travis-ci.org"}')
wiki = False
if repo.endswith('.wiki') and service == 'github':
wiki = True
repo = repo[:-5] # depends on [control=['if'], data=[]]
def _try(url):
r = requests.get(url, auth=auth, headers=headers)
if r.status_code in [requests.codes.not_found, requests.codes.forbidden]:
return False # depends on [control=['if'], data=[]]
if service == 'github':
GitHub_raise_for_status(r) # depends on [control=['if'], data=[]]
else:
r.raise_for_status()
return r
r = _try(REPO_URL.format(user=urllib.parse.quote(user), repo=urllib.parse.quote(repo)))
r_active = r and (service == 'github' or r.json().get('active', False))
if service == 'travis':
REPO_URL = 'https://api.travis-ci.org/repo/{user}%2F{repo}'
r_org = _try(REPO_URL.format(user=urllib.parse.quote(user), repo=urllib.parse.quote(repo)))
r_org_active = r_org and r_org.json().get('active', False)
if not r_active:
if not r_org_active:
raise RuntimeError('"{user}/{repo}" not found on travis-ci.org or travis-ci.com'.format(user=user, repo=repo)) # depends on [control=['if'], data=[]]
r = r_org
r_active = r_org_active
service = 'travis-ci.org' # depends on [control=['if'], data=[]]
elif r_active and r_org_active:
if ask:
while True:
print(green('{user}/{repo} appears to exist on both travis-ci.org and travis-ci.com.'.format(user=user, repo=repo)))
preferred = input('Which do you want to use? [{default}/travis-ci.org] '.format(default=blue('travis-ci.com')))
preferred = preferred.lower().strip()
if preferred in ['o', 'org', '.org', 'travis-ci.org']:
r = r_org
service = 'travis-ci.org'
break # depends on [control=['if'], data=[]]
elif preferred in ['c', 'com', '.com', 'travis-ci.com', '']:
service = 'travis-ci.com'
break # depends on [control=['if'], data=[]]
else:
print(red("Please type 'travis-ci.com' or 'travis-ci.org'.")) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
service = 'travis-ci.com' # depends on [control=['if'], data=[]]
else:
# .com but not .org.
service = 'travis-ci.com' # depends on [control=['if'], data=['service']]
if not r_active:
msg = '' if auth else '. If the repo is private, then you need to authenticate.'
raise RuntimeError('"{user}/{repo}" not found on {service}{msg}'.format(user=user, repo=repo, service=service, msg=msg)) # depends on [control=['if'], data=[]]
private = r.json().get('private', False)
if wiki and (not private):
# private wiki needs authentication, so skip check for existence
p = subprocess.run(['git', 'ls-remote', '-h', 'https://github.com/{user}/{repo}.wiki'.format(user=user, repo=repo)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
if p.stderr or p.returncode:
raise RuntimeError('Wiki not found. Please create a wiki') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return {'private': private, 'service': service} |
async def emit(self, record: LogRecord): # type: ignore
"""
Emit a record.
Output the record to the file, catering for rollover as described
in `do_rollover`.
"""
try:
if self.should_rollover(record):
async with self._rollover_lock:
if self.should_rollover(record):
await self.do_rollover()
await super().emit(record)
except Exception as e:
await self.handleError(record) | <ast.AsyncFunctionDef object at 0x7da2054a5ab0> | keyword[async] keyword[def] identifier[emit] ( identifier[self] , identifier[record] : identifier[LogRecord] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[should_rollover] ( identifier[record] ):
keyword[async] keyword[with] identifier[self] . identifier[_rollover_lock] :
keyword[if] identifier[self] . identifier[should_rollover] ( identifier[record] ):
keyword[await] identifier[self] . identifier[do_rollover] ()
keyword[await] identifier[super] (). identifier[emit] ( identifier[record] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[await] identifier[self] . identifier[handleError] ( identifier[record] ) | async def emit(self, record: LogRecord): # type: ignore
'\n Emit a record.\n\n Output the record to the file, catering for rollover as described\n in `do_rollover`.\n '
try:
if self.should_rollover(record):
async with self._rollover_lock:
if self.should_rollover(record):
await self.do_rollover() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
await super().emit(record) # depends on [control=['try'], data=[]]
except Exception as e:
await self.handleError(record) # depends on [control=['except'], data=[]] |
def add_firmware_manifest(self, name, datafile, key_table_file=None, **kwargs):
"""Add a new manifest reference.
:param str name: Manifest file short name (Required)
:param str datafile: The file object or path to the manifest file (Required)
:param str key_table_file: The file object or path to the key_table file (Optional)
:param str description: Manifest file description
:return: the newly created manifest file object
:rtype: FirmwareManifest
"""
kwargs.update({
'name': name,
'url': datafile, # really it's the datafile
})
if key_table_file is not None:
kwargs.update({'key_table_url': key_table_file}) # really it's the key_table
firmware_manifest = FirmwareManifest._create_request_map(kwargs)
api = self._get_api(update_service.DefaultApi)
return FirmwareManifest(
api.firmware_manifest_create(**firmware_manifest)
) | def function[add_firmware_manifest, parameter[self, name, datafile, key_table_file]]:
constant[Add a new manifest reference.
:param str name: Manifest file short name (Required)
:param str datafile: The file object or path to the manifest file (Required)
:param str key_table_file: The file object or path to the key_table file (Optional)
:param str description: Manifest file description
:return: the newly created manifest file object
:rtype: FirmwareManifest
]
call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da20c991030>, <ast.Constant object at 0x7da20c990b80>], [<ast.Name object at 0x7da20c992080>, <ast.Name object at 0x7da20c993a90>]]]]
if compare[name[key_table_file] is_not constant[None]] begin[:]
call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da20c991fc0>], [<ast.Name object at 0x7da20c992dd0>]]]]
variable[firmware_manifest] assign[=] call[name[FirmwareManifest]._create_request_map, parameter[name[kwargs]]]
variable[api] assign[=] call[name[self]._get_api, parameter[name[update_service].DefaultApi]]
return[call[name[FirmwareManifest], parameter[call[name[api].firmware_manifest_create, parameter[]]]]] | keyword[def] identifier[add_firmware_manifest] ( identifier[self] , identifier[name] , identifier[datafile] , identifier[key_table_file] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] . identifier[update] ({
literal[string] : identifier[name] ,
literal[string] : identifier[datafile] ,
})
keyword[if] identifier[key_table_file] keyword[is] keyword[not] keyword[None] :
identifier[kwargs] . identifier[update] ({ literal[string] : identifier[key_table_file] })
identifier[firmware_manifest] = identifier[FirmwareManifest] . identifier[_create_request_map] ( identifier[kwargs] )
identifier[api] = identifier[self] . identifier[_get_api] ( identifier[update_service] . identifier[DefaultApi] )
keyword[return] identifier[FirmwareManifest] (
identifier[api] . identifier[firmware_manifest_create] (** identifier[firmware_manifest] )
) | def add_firmware_manifest(self, name, datafile, key_table_file=None, **kwargs):
"""Add a new manifest reference.
:param str name: Manifest file short name (Required)
:param str datafile: The file object or path to the manifest file (Required)
:param str key_table_file: The file object or path to the key_table file (Optional)
:param str description: Manifest file description
:return: the newly created manifest file object
:rtype: FirmwareManifest
""" # really it's the datafile
kwargs.update({'name': name, 'url': datafile})
if key_table_file is not None:
kwargs.update({'key_table_url': key_table_file}) # really it's the key_table # depends on [control=['if'], data=['key_table_file']]
firmware_manifest = FirmwareManifest._create_request_map(kwargs)
api = self._get_api(update_service.DefaultApi)
return FirmwareManifest(api.firmware_manifest_create(**firmware_manifest)) |
def SoS_exec(script: str, _dict: dict = None,
return_result: bool = True) -> None:
'''Execute a statement.'''
if _dict is None:
_dict = env.sos_dict.dict()
if not return_result:
exec(
compile(script, filename=stmtHash.hash(script), mode='exec'), _dict)
return None
try:
stmts = list(ast.iter_child_nodes(ast.parse(script)))
if not stmts:
return
if isinstance(stmts[-1], ast.Expr):
# the last one is an expression and we will try to return the results
# so we first execute the previous statements
if len(stmts) > 1:
exec(
compile(
ast.Module(body=stmts[:-1]),
filename=stmtHash.hash(script),
mode="exec"), _dict)
# then we eval the last one
res = eval(
compile(
ast.Expression(body=stmts[-1].value),
filename=stmtHash.hash(script),
mode="eval"), _dict)
else:
# otherwise we just execute the entire code
exec(
compile(script, filename=stmtHash.hash(script), mode='exec'),
_dict)
res = None
except SyntaxError as e:
raise SyntaxError(f"Invalid code {script}: {e}")
# if check_readonly:
# env.sos_dict.check_readonly_vars()
return res | def function[SoS_exec, parameter[script, _dict, return_result]]:
constant[Execute a statement.]
if compare[name[_dict] is constant[None]] begin[:]
variable[_dict] assign[=] call[name[env].sos_dict.dict, parameter[]]
if <ast.UnaryOp object at 0x7da1b1320880> begin[:]
call[name[exec], parameter[call[name[compile], parameter[name[script]]], name[_dict]]]
return[constant[None]]
<ast.Try object at 0x7da1b1320970>
return[name[res]] | keyword[def] identifier[SoS_exec] ( identifier[script] : identifier[str] , identifier[_dict] : identifier[dict] = keyword[None] ,
identifier[return_result] : identifier[bool] = keyword[True] )-> keyword[None] :
literal[string]
keyword[if] identifier[_dict] keyword[is] keyword[None] :
identifier[_dict] = identifier[env] . identifier[sos_dict] . identifier[dict] ()
keyword[if] keyword[not] identifier[return_result] :
identifier[exec] (
identifier[compile] ( identifier[script] , identifier[filename] = identifier[stmtHash] . identifier[hash] ( identifier[script] ), identifier[mode] = literal[string] ), identifier[_dict] )
keyword[return] keyword[None]
keyword[try] :
identifier[stmts] = identifier[list] ( identifier[ast] . identifier[iter_child_nodes] ( identifier[ast] . identifier[parse] ( identifier[script] )))
keyword[if] keyword[not] identifier[stmts] :
keyword[return]
keyword[if] identifier[isinstance] ( identifier[stmts] [- literal[int] ], identifier[ast] . identifier[Expr] ):
keyword[if] identifier[len] ( identifier[stmts] )> literal[int] :
identifier[exec] (
identifier[compile] (
identifier[ast] . identifier[Module] ( identifier[body] = identifier[stmts] [:- literal[int] ]),
identifier[filename] = identifier[stmtHash] . identifier[hash] ( identifier[script] ),
identifier[mode] = literal[string] ), identifier[_dict] )
identifier[res] = identifier[eval] (
identifier[compile] (
identifier[ast] . identifier[Expression] ( identifier[body] = identifier[stmts] [- literal[int] ]. identifier[value] ),
identifier[filename] = identifier[stmtHash] . identifier[hash] ( identifier[script] ),
identifier[mode] = literal[string] ), identifier[_dict] )
keyword[else] :
identifier[exec] (
identifier[compile] ( identifier[script] , identifier[filename] = identifier[stmtHash] . identifier[hash] ( identifier[script] ), identifier[mode] = literal[string] ),
identifier[_dict] )
identifier[res] = keyword[None]
keyword[except] identifier[SyntaxError] keyword[as] identifier[e] :
keyword[raise] identifier[SyntaxError] ( literal[string] )
keyword[return] identifier[res] | def SoS_exec(script: str, _dict: dict=None, return_result: bool=True) -> None:
"""Execute a statement."""
if _dict is None:
_dict = env.sos_dict.dict() # depends on [control=['if'], data=['_dict']]
if not return_result:
exec(compile(script, filename=stmtHash.hash(script), mode='exec'), _dict)
return None # depends on [control=['if'], data=[]]
try:
stmts = list(ast.iter_child_nodes(ast.parse(script)))
if not stmts:
return # depends on [control=['if'], data=[]]
if isinstance(stmts[-1], ast.Expr):
# the last one is an expression and we will try to return the results
# so we first execute the previous statements
if len(stmts) > 1:
exec(compile(ast.Module(body=stmts[:-1]), filename=stmtHash.hash(script), mode='exec'), _dict) # depends on [control=['if'], data=[]]
# then we eval the last one
res = eval(compile(ast.Expression(body=stmts[-1].value), filename=stmtHash.hash(script), mode='eval'), _dict) # depends on [control=['if'], data=[]]
else:
# otherwise we just execute the entire code
exec(compile(script, filename=stmtHash.hash(script), mode='exec'), _dict)
res = None # depends on [control=['try'], data=[]]
except SyntaxError as e:
raise SyntaxError(f'Invalid code {script}: {e}') # depends on [control=['except'], data=['e']]
# if check_readonly:
# env.sos_dict.check_readonly_vars()
return res |
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
result = super(_ReducerReader, cls).from_json(json)
result.current_key = _ReducerReader.decode_data(json["current_key"])
result.current_values = _ReducerReader.decode_data(json["current_values"])
return result | def function[from_json, parameter[cls, json]]:
constant[Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
]
variable[result] assign[=] call[call[name[super], parameter[name[_ReducerReader], name[cls]]].from_json, parameter[name[json]]]
name[result].current_key assign[=] call[name[_ReducerReader].decode_data, parameter[call[name[json]][constant[current_key]]]]
name[result].current_values assign[=] call[name[_ReducerReader].decode_data, parameter[call[name[json]][constant[current_values]]]]
return[name[result]] | keyword[def] identifier[from_json] ( identifier[cls] , identifier[json] ):
literal[string]
identifier[result] = identifier[super] ( identifier[_ReducerReader] , identifier[cls] ). identifier[from_json] ( identifier[json] )
identifier[result] . identifier[current_key] = identifier[_ReducerReader] . identifier[decode_data] ( identifier[json] [ literal[string] ])
identifier[result] . identifier[current_values] = identifier[_ReducerReader] . identifier[decode_data] ( identifier[json] [ literal[string] ])
keyword[return] identifier[result] | def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
result = super(_ReducerReader, cls).from_json(json)
result.current_key = _ReducerReader.decode_data(json['current_key'])
result.current_values = _ReducerReader.decode_data(json['current_values'])
return result |
def fmt_border(self, dimensions, t = 'm', border_style = 'utf8.a', border_formating = {}):
"""
Format table separator line.
"""
cells = []
for column in dimensions:
cells.append(self.bchar('h', t, border_style) * (dimensions[column] + 2))
border = '{}{}{}'.format(self.bchar('l', t, border_style), self.bchar('m', t, border_style).join(cells), self.bchar('r', t, border_style))
return self.fmt_text(border, **border_formating) | def function[fmt_border, parameter[self, dimensions, t, border_style, border_formating]]:
constant[
Format table separator line.
]
variable[cells] assign[=] list[[]]
for taget[name[column]] in starred[name[dimensions]] begin[:]
call[name[cells].append, parameter[binary_operation[call[name[self].bchar, parameter[constant[h], name[t], name[border_style]]] * binary_operation[call[name[dimensions]][name[column]] + constant[2]]]]]
variable[border] assign[=] call[constant[{}{}{}].format, parameter[call[name[self].bchar, parameter[constant[l], name[t], name[border_style]]], call[call[name[self].bchar, parameter[constant[m], name[t], name[border_style]]].join, parameter[name[cells]]], call[name[self].bchar, parameter[constant[r], name[t], name[border_style]]]]]
return[call[name[self].fmt_text, parameter[name[border]]]] | keyword[def] identifier[fmt_border] ( identifier[self] , identifier[dimensions] , identifier[t] = literal[string] , identifier[border_style] = literal[string] , identifier[border_formating] ={}):
literal[string]
identifier[cells] =[]
keyword[for] identifier[column] keyword[in] identifier[dimensions] :
identifier[cells] . identifier[append] ( identifier[self] . identifier[bchar] ( literal[string] , identifier[t] , identifier[border_style] )*( identifier[dimensions] [ identifier[column] ]+ literal[int] ))
identifier[border] = literal[string] . identifier[format] ( identifier[self] . identifier[bchar] ( literal[string] , identifier[t] , identifier[border_style] ), identifier[self] . identifier[bchar] ( literal[string] , identifier[t] , identifier[border_style] ). identifier[join] ( identifier[cells] ), identifier[self] . identifier[bchar] ( literal[string] , identifier[t] , identifier[border_style] ))
keyword[return] identifier[self] . identifier[fmt_text] ( identifier[border] ,** identifier[border_formating] ) | def fmt_border(self, dimensions, t='m', border_style='utf8.a', border_formating={}):
"""
Format table separator line.
"""
cells = []
for column in dimensions:
cells.append(self.bchar('h', t, border_style) * (dimensions[column] + 2)) # depends on [control=['for'], data=['column']]
border = '{}{}{}'.format(self.bchar('l', t, border_style), self.bchar('m', t, border_style).join(cells), self.bchar('r', t, border_style))
return self.fmt_text(border, **border_formating) |
def _make_images(self, images):
"""
Takes an image dict from the giphy api and converts it to attributes.
Any fields expected to be int (width, height, size, frames) will be attempted
to be converted. Also, the keys of `data` serve as the attribute names, but
with special action taken. Keys are split by the last underscore; anything prior
becomes the attribute name, anything after becomes a sub-attribute. For example:
fixed_width_downsampled will end up at `self.fixed_width.downsampled`
"""
# Order matters :)
process = ('original',
'fixed_width',
'fixed_height',
'fixed_width_downsampled',
'fixed_width_still',
'fixed_height_downsampled',
'fixed_height_still',
'downsized')
for key in process:
data = images.get(key)
# Ignore empties
if not data:
continue
parts = key.split('_')
# attr/subattr style
if len(parts) > 2:
attr, subattr = '_'.join(parts[:-1]), parts[-1]
else:
attr, subattr = '_'.join(parts), None
# Normalize data
img = AttrDict(self._normalized(data))
if subattr is None:
setattr(self, attr, img)
else:
setattr(getattr(self, attr), subattr, img) | def function[_make_images, parameter[self, images]]:
constant[
Takes an image dict from the giphy api and converts it to attributes.
Any fields expected to be int (width, height, size, frames) will be attempted
to be converted. Also, the keys of `data` serve as the attribute names, but
with special action taken. Keys are split by the last underscore; anything prior
becomes the attribute name, anything after becomes a sub-attribute. For example:
fixed_width_downsampled will end up at `self.fixed_width.downsampled`
]
variable[process] assign[=] tuple[[<ast.Constant object at 0x7da20cabee60>, <ast.Constant object at 0x7da20cabe0b0>, <ast.Constant object at 0x7da20cabe380>, <ast.Constant object at 0x7da20cabf2e0>, <ast.Constant object at 0x7da20cabfe20>, <ast.Constant object at 0x7da20cabfa30>, <ast.Constant object at 0x7da20cabd0c0>, <ast.Constant object at 0x7da20cabea40>]]
for taget[name[key]] in starred[name[process]] begin[:]
variable[data] assign[=] call[name[images].get, parameter[name[key]]]
if <ast.UnaryOp object at 0x7da20cabcfd0> begin[:]
continue
variable[parts] assign[=] call[name[key].split, parameter[constant[_]]]
if compare[call[name[len], parameter[name[parts]]] greater[>] constant[2]] begin[:]
<ast.Tuple object at 0x7da20cabcb20> assign[=] tuple[[<ast.Call object at 0x7da20cabe5f0>, <ast.Subscript object at 0x7da20cabcee0>]]
variable[img] assign[=] call[name[AttrDict], parameter[call[name[self]._normalized, parameter[name[data]]]]]
if compare[name[subattr] is constant[None]] begin[:]
call[name[setattr], parameter[name[self], name[attr], name[img]]] | keyword[def] identifier[_make_images] ( identifier[self] , identifier[images] ):
literal[string]
identifier[process] =( literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] )
keyword[for] identifier[key] keyword[in] identifier[process] :
identifier[data] = identifier[images] . identifier[get] ( identifier[key] )
keyword[if] keyword[not] identifier[data] :
keyword[continue]
identifier[parts] = identifier[key] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[parts] )> literal[int] :
identifier[attr] , identifier[subattr] = literal[string] . identifier[join] ( identifier[parts] [:- literal[int] ]), identifier[parts] [- literal[int] ]
keyword[else] :
identifier[attr] , identifier[subattr] = literal[string] . identifier[join] ( identifier[parts] ), keyword[None]
identifier[img] = identifier[AttrDict] ( identifier[self] . identifier[_normalized] ( identifier[data] ))
keyword[if] identifier[subattr] keyword[is] keyword[None] :
identifier[setattr] ( identifier[self] , identifier[attr] , identifier[img] )
keyword[else] :
identifier[setattr] ( identifier[getattr] ( identifier[self] , identifier[attr] ), identifier[subattr] , identifier[img] ) | def _make_images(self, images):
"""
Takes an image dict from the giphy api and converts it to attributes.
Any fields expected to be int (width, height, size, frames) will be attempted
to be converted. Also, the keys of `data` serve as the attribute names, but
with special action taken. Keys are split by the last underscore; anything prior
becomes the attribute name, anything after becomes a sub-attribute. For example:
fixed_width_downsampled will end up at `self.fixed_width.downsampled`
"""
# Order matters :)
process = ('original', 'fixed_width', 'fixed_height', 'fixed_width_downsampled', 'fixed_width_still', 'fixed_height_downsampled', 'fixed_height_still', 'downsized')
for key in process:
data = images.get(key)
# Ignore empties
if not data:
continue # depends on [control=['if'], data=[]]
parts = key.split('_')
# attr/subattr style
if len(parts) > 2:
(attr, subattr) = ('_'.join(parts[:-1]), parts[-1]) # depends on [control=['if'], data=[]]
else:
(attr, subattr) = ('_'.join(parts), None)
# Normalize data
img = AttrDict(self._normalized(data))
if subattr is None:
setattr(self, attr, img) # depends on [control=['if'], data=[]]
else:
setattr(getattr(self, attr), subattr, img) # depends on [control=['for'], data=['key']] |
def permission_to_perm(permission):
"""
Convert a permission instance to a permission-string.
Examples
--------
>>> permission = Permission.objects.get(
... content_type__app_label='auth',
... codename='add_user',
... )
>>> permission_to_perm(permission)
'auth.add_user'
"""
app_label = permission.content_type.app_label
codename = permission.codename
return '%s.%s' % (app_label, codename) | def function[permission_to_perm, parameter[permission]]:
constant[
Convert a permission instance to a permission-string.
Examples
--------
>>> permission = Permission.objects.get(
... content_type__app_label='auth',
... codename='add_user',
... )
>>> permission_to_perm(permission)
'auth.add_user'
]
variable[app_label] assign[=] name[permission].content_type.app_label
variable[codename] assign[=] name[permission].codename
return[binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b06be860>, <ast.Name object at 0x7da1b06bf6d0>]]]] | keyword[def] identifier[permission_to_perm] ( identifier[permission] ):
literal[string]
identifier[app_label] = identifier[permission] . identifier[content_type] . identifier[app_label]
identifier[codename] = identifier[permission] . identifier[codename]
keyword[return] literal[string] %( identifier[app_label] , identifier[codename] ) | def permission_to_perm(permission):
"""
Convert a permission instance to a permission-string.
Examples
--------
>>> permission = Permission.objects.get(
... content_type__app_label='auth',
... codename='add_user',
... )
>>> permission_to_perm(permission)
'auth.add_user'
"""
app_label = permission.content_type.app_label
codename = permission.codename
return '%s.%s' % (app_label, codename) |
def _archive_self(self, logfile, key=JobDetails.topkey, status=JobStatus.unknown):
"""Write info about a job run by this `Link` to the job archive"""
self._register_self(logfile, key, status)
if self._job_archive is None:
return
self._job_archive.register_jobs(self.get_jobs()) | def function[_archive_self, parameter[self, logfile, key, status]]:
constant[Write info about a job run by this `Link` to the job archive]
call[name[self]._register_self, parameter[name[logfile], name[key], name[status]]]
if compare[name[self]._job_archive is constant[None]] begin[:]
return[None]
call[name[self]._job_archive.register_jobs, parameter[call[name[self].get_jobs, parameter[]]]] | keyword[def] identifier[_archive_self] ( identifier[self] , identifier[logfile] , identifier[key] = identifier[JobDetails] . identifier[topkey] , identifier[status] = identifier[JobStatus] . identifier[unknown] ):
literal[string]
identifier[self] . identifier[_register_self] ( identifier[logfile] , identifier[key] , identifier[status] )
keyword[if] identifier[self] . identifier[_job_archive] keyword[is] keyword[None] :
keyword[return]
identifier[self] . identifier[_job_archive] . identifier[register_jobs] ( identifier[self] . identifier[get_jobs] ()) | def _archive_self(self, logfile, key=JobDetails.topkey, status=JobStatus.unknown):
"""Write info about a job run by this `Link` to the job archive"""
self._register_self(logfile, key, status)
if self._job_archive is None:
return # depends on [control=['if'], data=[]]
self._job_archive.register_jobs(self.get_jobs()) |
def delete_channel(self, channel_name, project_name, dataset_name):
"""
Deletes a channel given its name, name of its project
, and name of its dataset.
Arguments:
channel_name (str): Channel name
project_name (str): Project name
dataset_name (str): Dataset name
Returns:
bool: True if channel deleted, False if not
"""
return self.resources.delete_channel(channel_name, project_name,
dataset_name) | def function[delete_channel, parameter[self, channel_name, project_name, dataset_name]]:
constant[
Deletes a channel given its name, name of its project
, and name of its dataset.
Arguments:
channel_name (str): Channel name
project_name (str): Project name
dataset_name (str): Dataset name
Returns:
bool: True if channel deleted, False if not
]
return[call[name[self].resources.delete_channel, parameter[name[channel_name], name[project_name], name[dataset_name]]]] | keyword[def] identifier[delete_channel] ( identifier[self] , identifier[channel_name] , identifier[project_name] , identifier[dataset_name] ):
literal[string]
keyword[return] identifier[self] . identifier[resources] . identifier[delete_channel] ( identifier[channel_name] , identifier[project_name] ,
identifier[dataset_name] ) | def delete_channel(self, channel_name, project_name, dataset_name):
"""
Deletes a channel given its name, name of its project
, and name of its dataset.
Arguments:
channel_name (str): Channel name
project_name (str): Project name
dataset_name (str): Dataset name
Returns:
bool: True if channel deleted, False if not
"""
return self.resources.delete_channel(channel_name, project_name, dataset_name) |
def read_varint64(self):
"""Reads a varint from the stream, interprets this varint
as a signed, 64-bit integer, and returns the integer.
"""
i = self.read_var_uint64()
if i > wire_format.INT64_MAX:
i -= (1 << 64)
return i | def function[read_varint64, parameter[self]]:
constant[Reads a varint from the stream, interprets this varint
as a signed, 64-bit integer, and returns the integer.
]
variable[i] assign[=] call[name[self].read_var_uint64, parameter[]]
if compare[name[i] greater[>] name[wire_format].INT64_MAX] begin[:]
<ast.AugAssign object at 0x7da204564af0>
return[name[i]] | keyword[def] identifier[read_varint64] ( identifier[self] ):
literal[string]
identifier[i] = identifier[self] . identifier[read_var_uint64] ()
keyword[if] identifier[i] > identifier[wire_format] . identifier[INT64_MAX] :
identifier[i] -=( literal[int] << literal[int] )
keyword[return] identifier[i] | def read_varint64(self):
"""Reads a varint from the stream, interprets this varint
as a signed, 64-bit integer, and returns the integer.
"""
i = self.read_var_uint64()
if i > wire_format.INT64_MAX:
i -= 1 << 64 # depends on [control=['if'], data=['i']]
return i |
def is_member(self, m):
"""Check if a user is a member of the chatroom"""
if not m:
return False
elif isinstance(m, basestring):
jid = m
else:
jid = m['JID']
is_member = len(filter(lambda m: m['JID'] == jid and m.get('STATUS') in ('ACTIVE', 'INVITED'), self.params['MEMBERS'])) > 0
return is_member | def function[is_member, parameter[self, m]]:
constant[Check if a user is a member of the chatroom]
if <ast.UnaryOp object at 0x7da20c7c8430> begin[:]
return[constant[False]]
variable[is_member] assign[=] compare[call[name[len], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da20c7c9c90>, call[name[self].params][constant[MEMBERS]]]]]] greater[>] constant[0]]
return[name[is_member]] | keyword[def] identifier[is_member] ( identifier[self] , identifier[m] ):
literal[string]
keyword[if] keyword[not] identifier[m] :
keyword[return] keyword[False]
keyword[elif] identifier[isinstance] ( identifier[m] , identifier[basestring] ):
identifier[jid] = identifier[m]
keyword[else] :
identifier[jid] = identifier[m] [ literal[string] ]
identifier[is_member] = identifier[len] ( identifier[filter] ( keyword[lambda] identifier[m] : identifier[m] [ literal[string] ]== identifier[jid] keyword[and] identifier[m] . identifier[get] ( literal[string] ) keyword[in] ( literal[string] , literal[string] ), identifier[self] . identifier[params] [ literal[string] ]))> literal[int]
keyword[return] identifier[is_member] | def is_member(self, m):
"""Check if a user is a member of the chatroom"""
if not m:
return False # depends on [control=['if'], data=[]]
elif isinstance(m, basestring):
jid = m # depends on [control=['if'], data=[]]
else:
jid = m['JID']
is_member = len(filter(lambda m: m['JID'] == jid and m.get('STATUS') in ('ACTIVE', 'INVITED'), self.params['MEMBERS'])) > 0
return is_member |
def originate(self, data='', syn=False, ack=False, fin=False, rst=False):
"""
Create a packet, enqueue it to be sent, and return it.
"""
if self._ackTimer is not None:
self._ackTimer.cancel()
self._ackTimer = None
if syn:
# We really should be randomizing the ISN but until we finish the
# implementations of the various bits of wraparound logic that were
# started with relativeSequence
assert self.nextSendSeqNum == 0, (
"NSSN = " + repr(self.nextSendSeqNum))
assert self.hostSendISN == 0
p = PTCPPacket.create(self.hostPseudoPort,
self.peerPseudoPort,
seqNum=(self.nextSendSeqNum +
self.hostSendISN) % (2**32),
ackNum=self.currentAckNum(),
data=data,
window=self.recvWindow,
syn=syn, ack=ack, fin=fin, rst=rst,
destination=self.peerAddressTuple)
# do we want to enqueue this packet for retransmission?
sl = p.segmentLength()
self.nextSendSeqNum += sl
if p.mustRetransmit():
# print self, 'originating retransmittable packet', len(self.retransmissionQueue)
if self.retransmissionQueue:
if self.retransmissionQueue[-1].fin:
raise AssertionError("Sending %r after FIN??!" % (p,))
# print 'putting it on the queue'
self.retransmissionQueue.append(p)
# print 'and sending it later'
self._retransmitLater()
if not self.sendWindowRemaining: # len(self.retransmissionQueue) > 5:
# print 'oh no my queue is too big'
# This is a random number (5) because I ought to be summing the
# packet lengths or something.
self._writeBufferFull()
else:
# print 'my queue is still small enough', len(self.retransmissionQueue), self, self.sendWindowRemaining
pass
self.ptcp.sendPacket(p)
return p | def function[originate, parameter[self, data, syn, ack, fin, rst]]:
constant[
Create a packet, enqueue it to be sent, and return it.
]
if compare[name[self]._ackTimer is_not constant[None]] begin[:]
call[name[self]._ackTimer.cancel, parameter[]]
name[self]._ackTimer assign[=] constant[None]
if name[syn] begin[:]
assert[compare[name[self].nextSendSeqNum equal[==] constant[0]]]
assert[compare[name[self].hostSendISN equal[==] constant[0]]]
variable[p] assign[=] call[name[PTCPPacket].create, parameter[name[self].hostPseudoPort, name[self].peerPseudoPort]]
variable[sl] assign[=] call[name[p].segmentLength, parameter[]]
<ast.AugAssign object at 0x7da2054a7910>
if call[name[p].mustRetransmit, parameter[]] begin[:]
if name[self].retransmissionQueue begin[:]
if call[name[self].retransmissionQueue][<ast.UnaryOp object at 0x7da2054a4d90>].fin begin[:]
<ast.Raise object at 0x7da20c7c9ae0>
call[name[self].retransmissionQueue.append, parameter[name[p]]]
call[name[self]._retransmitLater, parameter[]]
if <ast.UnaryOp object at 0x7da20c7cacb0> begin[:]
call[name[self]._writeBufferFull, parameter[]]
call[name[self].ptcp.sendPacket, parameter[name[p]]]
return[name[p]] | keyword[def] identifier[originate] ( identifier[self] , identifier[data] = literal[string] , identifier[syn] = keyword[False] , identifier[ack] = keyword[False] , identifier[fin] = keyword[False] , identifier[rst] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[_ackTimer] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_ackTimer] . identifier[cancel] ()
identifier[self] . identifier[_ackTimer] = keyword[None]
keyword[if] identifier[syn] :
keyword[assert] identifier[self] . identifier[nextSendSeqNum] == literal[int] ,(
literal[string] + identifier[repr] ( identifier[self] . identifier[nextSendSeqNum] ))
keyword[assert] identifier[self] . identifier[hostSendISN] == literal[int]
identifier[p] = identifier[PTCPPacket] . identifier[create] ( identifier[self] . identifier[hostPseudoPort] ,
identifier[self] . identifier[peerPseudoPort] ,
identifier[seqNum] =( identifier[self] . identifier[nextSendSeqNum] +
identifier[self] . identifier[hostSendISN] )%( literal[int] ** literal[int] ),
identifier[ackNum] = identifier[self] . identifier[currentAckNum] (),
identifier[data] = identifier[data] ,
identifier[window] = identifier[self] . identifier[recvWindow] ,
identifier[syn] = identifier[syn] , identifier[ack] = identifier[ack] , identifier[fin] = identifier[fin] , identifier[rst] = identifier[rst] ,
identifier[destination] = identifier[self] . identifier[peerAddressTuple] )
identifier[sl] = identifier[p] . identifier[segmentLength] ()
identifier[self] . identifier[nextSendSeqNum] += identifier[sl]
keyword[if] identifier[p] . identifier[mustRetransmit] ():
keyword[if] identifier[self] . identifier[retransmissionQueue] :
keyword[if] identifier[self] . identifier[retransmissionQueue] [- literal[int] ]. identifier[fin] :
keyword[raise] identifier[AssertionError] ( literal[string] %( identifier[p] ,))
identifier[self] . identifier[retransmissionQueue] . identifier[append] ( identifier[p] )
identifier[self] . identifier[_retransmitLater] ()
keyword[if] keyword[not] identifier[self] . identifier[sendWindowRemaining] :
identifier[self] . identifier[_writeBufferFull] ()
keyword[else] :
keyword[pass]
identifier[self] . identifier[ptcp] . identifier[sendPacket] ( identifier[p] )
keyword[return] identifier[p] | def originate(self, data='', syn=False, ack=False, fin=False, rst=False):
"""
Create a packet, enqueue it to be sent, and return it.
"""
if self._ackTimer is not None:
self._ackTimer.cancel()
self._ackTimer = None # depends on [control=['if'], data=[]]
if syn:
# We really should be randomizing the ISN but until we finish the
# implementations of the various bits of wraparound logic that were
# started with relativeSequence
assert self.nextSendSeqNum == 0, 'NSSN = ' + repr(self.nextSendSeqNum)
assert self.hostSendISN == 0 # depends on [control=['if'], data=[]]
p = PTCPPacket.create(self.hostPseudoPort, self.peerPseudoPort, seqNum=(self.nextSendSeqNum + self.hostSendISN) % 2 ** 32, ackNum=self.currentAckNum(), data=data, window=self.recvWindow, syn=syn, ack=ack, fin=fin, rst=rst, destination=self.peerAddressTuple)
# do we want to enqueue this packet for retransmission?
sl = p.segmentLength()
self.nextSendSeqNum += sl
if p.mustRetransmit():
# print self, 'originating retransmittable packet', len(self.retransmissionQueue)
if self.retransmissionQueue:
if self.retransmissionQueue[-1].fin:
raise AssertionError('Sending %r after FIN??!' % (p,)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# print 'putting it on the queue'
self.retransmissionQueue.append(p)
# print 'and sending it later'
self._retransmitLater()
if not self.sendWindowRemaining: # len(self.retransmissionQueue) > 5:
# print 'oh no my queue is too big'
# This is a random number (5) because I ought to be summing the
# packet lengths or something.
self._writeBufferFull() # depends on [control=['if'], data=[]]
else:
# print 'my queue is still small enough', len(self.retransmissionQueue), self, self.sendWindowRemaining
pass # depends on [control=['if'], data=[]]
self.ptcp.sendPacket(p)
return p |
def add_file(self, file, **kwargs):
"""Append a file to file repository.
For file monitoring, monitor instance needs file.
Please put the name of file to `file` argument.
:param file: the name of file you want monitor.
"""
if os.access(file, os.F_OK):
if file in self.f_repository:
raise DuplicationError("file already added.")
self.f_repository.append(file)
else:
raise IOError("file not found.") | def function[add_file, parameter[self, file]]:
constant[Append a file to file repository.
For file monitoring, monitor instance needs file.
Please put the name of file to `file` argument.
:param file: the name of file you want monitor.
]
if call[name[os].access, parameter[name[file], name[os].F_OK]] begin[:]
if compare[name[file] in name[self].f_repository] begin[:]
<ast.Raise object at 0x7da1afe8a800>
call[name[self].f_repository.append, parameter[name[file]]] | keyword[def] identifier[add_file] ( identifier[self] , identifier[file] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[os] . identifier[access] ( identifier[file] , identifier[os] . identifier[F_OK] ):
keyword[if] identifier[file] keyword[in] identifier[self] . identifier[f_repository] :
keyword[raise] identifier[DuplicationError] ( literal[string] )
identifier[self] . identifier[f_repository] . identifier[append] ( identifier[file] )
keyword[else] :
keyword[raise] identifier[IOError] ( literal[string] ) | def add_file(self, file, **kwargs):
"""Append a file to file repository.
For file monitoring, monitor instance needs file.
Please put the name of file to `file` argument.
:param file: the name of file you want monitor.
"""
if os.access(file, os.F_OK):
if file in self.f_repository:
raise DuplicationError('file already added.') # depends on [control=['if'], data=[]]
self.f_repository.append(file) # depends on [control=['if'], data=[]]
else:
raise IOError('file not found.') |
def vhost_add(cls, resource, params):
""" Add a vhost into a webaccelerator """
try:
oper = cls.call(
'hosting.rproxy.vhost.create', cls.usable_id(resource), params)
cls.echo('Adding your virtual host (%s) into %s' %
(params['vhost'], resource))
cls.display_progress(oper)
cls.echo('Your virtual host habe been added')
return oper
except Exception as err:
if err.code == 580142:
dc = cls.info(resource)
dns_entry = cls.call('hosting.rproxy.vhost.get_dns_entries',
{'datacenter': dc['datacenter']['id'],
'vhost': params['vhost']})
txt_record = "%s 3600 IN TXT \"%s=%s\"" % (dns_entry['key'],
dns_entry['key'],
dns_entry['txt'])
cname_record = "%s 3600 IN CNAME %s" % (dns_entry['key'],
dns_entry['cname'])
cls.echo('The domain don\'t use Gandi DNS or you have not'
' sufficient right to alter the zone file. '
'Edit your zone file adding this TXT and CNAME '
'record and try again :')
cls.echo(txt_record)
cls.echo(cname_record)
cls.echo('\nOr add a file containing %s at :\n'
'http://%s/%s.txt\n' % (dns_entry['txt'],
dns_entry['domain'],
dns_entry['txt']))
else:
cls.echo(err) | def function[vhost_add, parameter[cls, resource, params]]:
constant[ Add a vhost into a webaccelerator ]
<ast.Try object at 0x7da1b2347910> | keyword[def] identifier[vhost_add] ( identifier[cls] , identifier[resource] , identifier[params] ):
literal[string]
keyword[try] :
identifier[oper] = identifier[cls] . identifier[call] (
literal[string] , identifier[cls] . identifier[usable_id] ( identifier[resource] ), identifier[params] )
identifier[cls] . identifier[echo] ( literal[string] %
( identifier[params] [ literal[string] ], identifier[resource] ))
identifier[cls] . identifier[display_progress] ( identifier[oper] )
identifier[cls] . identifier[echo] ( literal[string] )
keyword[return] identifier[oper]
keyword[except] identifier[Exception] keyword[as] identifier[err] :
keyword[if] identifier[err] . identifier[code] == literal[int] :
identifier[dc] = identifier[cls] . identifier[info] ( identifier[resource] )
identifier[dns_entry] = identifier[cls] . identifier[call] ( literal[string] ,
{ literal[string] : identifier[dc] [ literal[string] ][ literal[string] ],
literal[string] : identifier[params] [ literal[string] ]})
identifier[txt_record] = literal[string] %( identifier[dns_entry] [ literal[string] ],
identifier[dns_entry] [ literal[string] ],
identifier[dns_entry] [ literal[string] ])
identifier[cname_record] = literal[string] %( identifier[dns_entry] [ literal[string] ],
identifier[dns_entry] [ literal[string] ])
identifier[cls] . identifier[echo] ( literal[string]
literal[string]
literal[string]
literal[string] )
identifier[cls] . identifier[echo] ( identifier[txt_record] )
identifier[cls] . identifier[echo] ( identifier[cname_record] )
identifier[cls] . identifier[echo] ( literal[string]
literal[string] %( identifier[dns_entry] [ literal[string] ],
identifier[dns_entry] [ literal[string] ],
identifier[dns_entry] [ literal[string] ]))
keyword[else] :
identifier[cls] . identifier[echo] ( identifier[err] ) | def vhost_add(cls, resource, params):
""" Add a vhost into a webaccelerator """
try:
oper = cls.call('hosting.rproxy.vhost.create', cls.usable_id(resource), params)
cls.echo('Adding your virtual host (%s) into %s' % (params['vhost'], resource))
cls.display_progress(oper)
cls.echo('Your virtual host habe been added')
return oper # depends on [control=['try'], data=[]]
except Exception as err:
if err.code == 580142:
dc = cls.info(resource)
dns_entry = cls.call('hosting.rproxy.vhost.get_dns_entries', {'datacenter': dc['datacenter']['id'], 'vhost': params['vhost']})
txt_record = '%s 3600 IN TXT "%s=%s"' % (dns_entry['key'], dns_entry['key'], dns_entry['txt'])
cname_record = '%s 3600 IN CNAME %s' % (dns_entry['key'], dns_entry['cname'])
cls.echo("The domain don't use Gandi DNS or you have not sufficient right to alter the zone file. Edit your zone file adding this TXT and CNAME record and try again :")
cls.echo(txt_record)
cls.echo(cname_record)
cls.echo('\nOr add a file containing %s at :\nhttp://%s/%s.txt\n' % (dns_entry['txt'], dns_entry['domain'], dns_entry['txt'])) # depends on [control=['if'], data=[]]
else:
cls.echo(err) # depends on [control=['except'], data=['err']] |
def _get_value(self, var):
"""Return value of variable in solution."""
return self._problem._cp.solution.get_values(
self._problem._variables[var]) | def function[_get_value, parameter[self, var]]:
constant[Return value of variable in solution.]
return[call[name[self]._problem._cp.solution.get_values, parameter[call[name[self]._problem._variables][name[var]]]]] | keyword[def] identifier[_get_value] ( identifier[self] , identifier[var] ):
literal[string]
keyword[return] identifier[self] . identifier[_problem] . identifier[_cp] . identifier[solution] . identifier[get_values] (
identifier[self] . identifier[_problem] . identifier[_variables] [ identifier[var] ]) | def _get_value(self, var):
"""Return value of variable in solution."""
return self._problem._cp.solution.get_values(self._problem._variables[var]) |
def error_response(self, kwargs_lens, kwargs_ps):
"""
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
"""
C_D_response, model_error = [], []
for i in range(self._num_bands):
if self._compute_bool[i] is True:
kwargs_lens_i = [kwargs_lens[k] for k in self._idex_lens_list[i]]
C_D_response_i, model_error_i = self._imageModel_list[i].error_response(kwargs_lens_i, kwargs_ps)
model_error.append(model_error_i)
if C_D_response == []:
C_D_response = C_D_response_i
else:
C_D_response = np.append(C_D_response, C_D_response_i)
return C_D_response, model_error | def function[error_response, parameter[self, kwargs_lens, kwargs_ps]]:
constant[
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
]
<ast.Tuple object at 0x7da18bcc9630> assign[=] tuple[[<ast.List object at 0x7da18bccb9a0>, <ast.List object at 0x7da18bcc8d00>]]
for taget[name[i]] in starred[call[name[range], parameter[name[self]._num_bands]]] begin[:]
if compare[call[name[self]._compute_bool][name[i]] is constant[True]] begin[:]
variable[kwargs_lens_i] assign[=] <ast.ListComp object at 0x7da18bcc9b10>
<ast.Tuple object at 0x7da18bcca020> assign[=] call[call[name[self]._imageModel_list][name[i]].error_response, parameter[name[kwargs_lens_i], name[kwargs_ps]]]
call[name[model_error].append, parameter[name[model_error_i]]]
if compare[name[C_D_response] equal[==] list[[]]] begin[:]
variable[C_D_response] assign[=] name[C_D_response_i]
return[tuple[[<ast.Name object at 0x7da18dc99090>, <ast.Name object at 0x7da18dc9a6b0>]]] | keyword[def] identifier[error_response] ( identifier[self] , identifier[kwargs_lens] , identifier[kwargs_ps] ):
literal[string]
identifier[C_D_response] , identifier[model_error] =[],[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[_num_bands] ):
keyword[if] identifier[self] . identifier[_compute_bool] [ identifier[i] ] keyword[is] keyword[True] :
identifier[kwargs_lens_i] =[ identifier[kwargs_lens] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[self] . identifier[_idex_lens_list] [ identifier[i] ]]
identifier[C_D_response_i] , identifier[model_error_i] = identifier[self] . identifier[_imageModel_list] [ identifier[i] ]. identifier[error_response] ( identifier[kwargs_lens_i] , identifier[kwargs_ps] )
identifier[model_error] . identifier[append] ( identifier[model_error_i] )
keyword[if] identifier[C_D_response] ==[]:
identifier[C_D_response] = identifier[C_D_response_i]
keyword[else] :
identifier[C_D_response] = identifier[np] . identifier[append] ( identifier[C_D_response] , identifier[C_D_response_i] )
keyword[return] identifier[C_D_response] , identifier[model_error] | def error_response(self, kwargs_lens, kwargs_ps):
"""
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
"""
(C_D_response, model_error) = ([], [])
for i in range(self._num_bands):
if self._compute_bool[i] is True:
kwargs_lens_i = [kwargs_lens[k] for k in self._idex_lens_list[i]]
(C_D_response_i, model_error_i) = self._imageModel_list[i].error_response(kwargs_lens_i, kwargs_ps)
model_error.append(model_error_i)
if C_D_response == []:
C_D_response = C_D_response_i # depends on [control=['if'], data=['C_D_response']]
else:
C_D_response = np.append(C_D_response, C_D_response_i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return (C_D_response, model_error) |
def FromDBInstance(db_token):
"""
Get a NEP5Token instance from a database token.
Args:
db_token (neo.Implementations.Wallets.peewee.Models.NEP5Token):
Returns:
NEP5Token: self.
"""
hash_ar = bytearray(binascii.unhexlify(db_token.ContractHash))
hash_ar.reverse()
hash = UInt160(data=hash_ar)
token = NEP5Token(script=None)
token.SetScriptHash(hash)
token.name = db_token.Name
token.symbol = db_token.Symbol
token.decimals = db_token.Decimals
return token | def function[FromDBInstance, parameter[db_token]]:
constant[
Get a NEP5Token instance from a database token.
Args:
db_token (neo.Implementations.Wallets.peewee.Models.NEP5Token):
Returns:
NEP5Token: self.
]
variable[hash_ar] assign[=] call[name[bytearray], parameter[call[name[binascii].unhexlify, parameter[name[db_token].ContractHash]]]]
call[name[hash_ar].reverse, parameter[]]
variable[hash] assign[=] call[name[UInt160], parameter[]]
variable[token] assign[=] call[name[NEP5Token], parameter[]]
call[name[token].SetScriptHash, parameter[name[hash]]]
name[token].name assign[=] name[db_token].Name
name[token].symbol assign[=] name[db_token].Symbol
name[token].decimals assign[=] name[db_token].Decimals
return[name[token]] | keyword[def] identifier[FromDBInstance] ( identifier[db_token] ):
literal[string]
identifier[hash_ar] = identifier[bytearray] ( identifier[binascii] . identifier[unhexlify] ( identifier[db_token] . identifier[ContractHash] ))
identifier[hash_ar] . identifier[reverse] ()
identifier[hash] = identifier[UInt160] ( identifier[data] = identifier[hash_ar] )
identifier[token] = identifier[NEP5Token] ( identifier[script] = keyword[None] )
identifier[token] . identifier[SetScriptHash] ( identifier[hash] )
identifier[token] . identifier[name] = identifier[db_token] . identifier[Name]
identifier[token] . identifier[symbol] = identifier[db_token] . identifier[Symbol]
identifier[token] . identifier[decimals] = identifier[db_token] . identifier[Decimals]
keyword[return] identifier[token] | def FromDBInstance(db_token):
"""
Get a NEP5Token instance from a database token.
Args:
db_token (neo.Implementations.Wallets.peewee.Models.NEP5Token):
Returns:
NEP5Token: self.
"""
hash_ar = bytearray(binascii.unhexlify(db_token.ContractHash))
hash_ar.reverse()
hash = UInt160(data=hash_ar)
token = NEP5Token(script=None)
token.SetScriptHash(hash)
token.name = db_token.Name
token.symbol = db_token.Symbol
token.decimals = db_token.Decimals
return token |
def get_all_accounts(self):
"""iterates through trie to and yields non-blank leafs as accounts."""
for address_hash, rlpdata in self.secure_trie.trie.iter_branch():
if rlpdata != trie.BLANK_NODE:
yield rlp.decode(rlpdata, Account, db=self.db, addr=address_hash) | def function[get_all_accounts, parameter[self]]:
constant[iterates through trie to and yields non-blank leafs as accounts.]
for taget[tuple[[<ast.Name object at 0x7da1b1d34790>, <ast.Name object at 0x7da1b1d34400>]]] in starred[call[name[self].secure_trie.trie.iter_branch, parameter[]]] begin[:]
if compare[name[rlpdata] not_equal[!=] name[trie].BLANK_NODE] begin[:]
<ast.Yield object at 0x7da1b1d35a50> | keyword[def] identifier[get_all_accounts] ( identifier[self] ):
literal[string]
keyword[for] identifier[address_hash] , identifier[rlpdata] keyword[in] identifier[self] . identifier[secure_trie] . identifier[trie] . identifier[iter_branch] ():
keyword[if] identifier[rlpdata] != identifier[trie] . identifier[BLANK_NODE] :
keyword[yield] identifier[rlp] . identifier[decode] ( identifier[rlpdata] , identifier[Account] , identifier[db] = identifier[self] . identifier[db] , identifier[addr] = identifier[address_hash] ) | def get_all_accounts(self):
"""iterates through trie to and yields non-blank leafs as accounts."""
for (address_hash, rlpdata) in self.secure_trie.trie.iter_branch():
if rlpdata != trie.BLANK_NODE:
yield rlp.decode(rlpdata, Account, db=self.db, addr=address_hash) # depends on [control=['if'], data=['rlpdata']] # depends on [control=['for'], data=[]] |
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws | def function[_build_master, parameter[cls]]:
constant[
Prepare the master working set.
]
variable[ws] assign[=] call[name[cls], parameter[]]
<ast.Try object at 0x7da18ede6290>
<ast.Try object at 0x7da18ede4130>
return[name[ws]] | keyword[def] identifier[_build_master] ( identifier[cls] ):
literal[string]
identifier[ws] = identifier[cls] ()
keyword[try] :
keyword[from] identifier[__main__] keyword[import] identifier[__requires__]
keyword[except] identifier[ImportError] :
keyword[return] identifier[ws]
keyword[try] :
identifier[ws] . identifier[require] ( identifier[__requires__] )
keyword[except] identifier[VersionConflict] :
keyword[return] identifier[cls] . identifier[_build_from_requirements] ( identifier[__requires__] )
keyword[return] identifier[ws] | def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__ # depends on [control=['try'], data=[]]
except ImportError:
# The main program does not list any requirements
return ws # depends on [control=['except'], data=[]]
# ensure the requirements are met
try:
ws.require(__requires__) # depends on [control=['try'], data=[]]
except VersionConflict:
return cls._build_from_requirements(__requires__) # depends on [control=['except'], data=[]]
return ws |
def open_tablebase(directory: PathLike, *, load_wdl: bool = True, load_dtz: bool = True, max_fds: Optional[int] = 128, VariantBoard: Type[chess.Board] = chess.Board) -> Tablebase:
"""
Opens a collection of tables for probing. See
:class:`~chess.syzygy.Tablebase`.
.. note::
Generally probing requires tablebase files for the specific
material composition, **as well as** tablebase files with less pieces.
This is important because 6-piece and 5-piece files are often
distributed seperately, but are both required for 6-piece positions.
Use :func:`~chess.syzygy.Tablebase.add_directory()` to load
tables from additional directories.
"""
tables = Tablebase(max_fds=max_fds, VariantBoard=VariantBoard)
tables.add_directory(directory, load_wdl=load_wdl, load_dtz=load_dtz)
return tables | def function[open_tablebase, parameter[directory]]:
constant[
Opens a collection of tables for probing. See
:class:`~chess.syzygy.Tablebase`.
.. note::
Generally probing requires tablebase files for the specific
material composition, **as well as** tablebase files with less pieces.
This is important because 6-piece and 5-piece files are often
distributed seperately, but are both required for 6-piece positions.
Use :func:`~chess.syzygy.Tablebase.add_directory()` to load
tables from additional directories.
]
variable[tables] assign[=] call[name[Tablebase], parameter[]]
call[name[tables].add_directory, parameter[name[directory]]]
return[name[tables]] | keyword[def] identifier[open_tablebase] ( identifier[directory] : identifier[PathLike] ,*, identifier[load_wdl] : identifier[bool] = keyword[True] , identifier[load_dtz] : identifier[bool] = keyword[True] , identifier[max_fds] : identifier[Optional] [ identifier[int] ]= literal[int] , identifier[VariantBoard] : identifier[Type] [ identifier[chess] . identifier[Board] ]= identifier[chess] . identifier[Board] )-> identifier[Tablebase] :
literal[string]
identifier[tables] = identifier[Tablebase] ( identifier[max_fds] = identifier[max_fds] , identifier[VariantBoard] = identifier[VariantBoard] )
identifier[tables] . identifier[add_directory] ( identifier[directory] , identifier[load_wdl] = identifier[load_wdl] , identifier[load_dtz] = identifier[load_dtz] )
keyword[return] identifier[tables] | def open_tablebase(directory: PathLike, *, load_wdl: bool=True, load_dtz: bool=True, max_fds: Optional[int]=128, VariantBoard: Type[chess.Board]=chess.Board) -> Tablebase:
"""
Opens a collection of tables for probing. See
:class:`~chess.syzygy.Tablebase`.
.. note::
Generally probing requires tablebase files for the specific
material composition, **as well as** tablebase files with less pieces.
This is important because 6-piece and 5-piece files are often
distributed seperately, but are both required for 6-piece positions.
Use :func:`~chess.syzygy.Tablebase.add_directory()` to load
tables from additional directories.
"""
tables = Tablebase(max_fds=max_fds, VariantBoard=VariantBoard)
tables.add_directory(directory, load_wdl=load_wdl, load_dtz=load_dtz)
return tables |
def _get_stockprices(symbols, chart_range='1y'):
'''Get stock data (key stats and previous) from IEX.
Just deal with IEX's 100 stocks limit per request.
'''
def fetch(symbols):
charts = _ensure_dict(
iexfinance.stocks.Stock(symbols).get_chart(range=chart_range),
symbols
)
result = {}
for symbol, obj in charts.items():
df = pd.DataFrame(
obj,
columns=('date', 'open', 'high', 'low', 'close', 'volume'),
).set_index('date')
df.index = pd.to_datetime(df.index, utc=True)
result[symbol] = df
return result
return parallelize(fetch, splitlen=99)(symbols) | def function[_get_stockprices, parameter[symbols, chart_range]]:
constant[Get stock data (key stats and previous) from IEX.
Just deal with IEX's 100 stocks limit per request.
]
def function[fetch, parameter[symbols]]:
variable[charts] assign[=] call[name[_ensure_dict], parameter[call[call[name[iexfinance].stocks.Stock, parameter[name[symbols]]].get_chart, parameter[]], name[symbols]]]
variable[result] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18dc98820>, <ast.Name object at 0x7da18dc9ba60>]]] in starred[call[name[charts].items, parameter[]]] begin[:]
variable[df] assign[=] call[call[name[pd].DataFrame, parameter[name[obj]]].set_index, parameter[constant[date]]]
name[df].index assign[=] call[name[pd].to_datetime, parameter[name[df].index]]
call[name[result]][name[symbol]] assign[=] name[df]
return[name[result]]
return[call[call[name[parallelize], parameter[name[fetch]]], parameter[name[symbols]]]] | keyword[def] identifier[_get_stockprices] ( identifier[symbols] , identifier[chart_range] = literal[string] ):
literal[string]
keyword[def] identifier[fetch] ( identifier[symbols] ):
identifier[charts] = identifier[_ensure_dict] (
identifier[iexfinance] . identifier[stocks] . identifier[Stock] ( identifier[symbols] ). identifier[get_chart] ( identifier[range] = identifier[chart_range] ),
identifier[symbols]
)
identifier[result] ={}
keyword[for] identifier[symbol] , identifier[obj] keyword[in] identifier[charts] . identifier[items] ():
identifier[df] = identifier[pd] . identifier[DataFrame] (
identifier[obj] ,
identifier[columns] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ),
). identifier[set_index] ( literal[string] )
identifier[df] . identifier[index] = identifier[pd] . identifier[to_datetime] ( identifier[df] . identifier[index] , identifier[utc] = keyword[True] )
identifier[result] [ identifier[symbol] ]= identifier[df]
keyword[return] identifier[result]
keyword[return] identifier[parallelize] ( identifier[fetch] , identifier[splitlen] = literal[int] )( identifier[symbols] ) | def _get_stockprices(symbols, chart_range='1y'):
"""Get stock data (key stats and previous) from IEX.
Just deal with IEX's 100 stocks limit per request.
"""
def fetch(symbols):
charts = _ensure_dict(iexfinance.stocks.Stock(symbols).get_chart(range=chart_range), symbols)
result = {}
for (symbol, obj) in charts.items():
df = pd.DataFrame(obj, columns=('date', 'open', 'high', 'low', 'close', 'volume')).set_index('date')
df.index = pd.to_datetime(df.index, utc=True)
result[symbol] = df # depends on [control=['for'], data=[]]
return result
return parallelize(fetch, splitlen=99)(symbols) |
def connect(creds):
"""
Construct a connection value from a container
"""
return swiftclient.Connection(
authurl=creds.authurl,
user=creds.user,
key=creds.password,
auth_version=creds.auth_version,
tenant_name=creds.tenant_name,
os_options={
"region_name": creds.region,
"endpoint_type": creds.endpoint_type,
"domain_id": creds.domain_id,
"domain_name": creds.domain_name,
"tenant_id": creds.tenant_id,
"user_id": creds.user_id,
"user_domain_id": creds.user_domain_id,
"user_domain_name": creds.user_domain_name,
"project_id": creds.project_id,
"project_name": creds.project_name,
"project_domain_id": creds.project_domain_id,
"project_domain_name": creds.project_domain_name,
}
) | def function[connect, parameter[creds]]:
constant[
Construct a connection value from a container
]
return[call[name[swiftclient].Connection, parameter[]]] | keyword[def] identifier[connect] ( identifier[creds] ):
literal[string]
keyword[return] identifier[swiftclient] . identifier[Connection] (
identifier[authurl] = identifier[creds] . identifier[authurl] ,
identifier[user] = identifier[creds] . identifier[user] ,
identifier[key] = identifier[creds] . identifier[password] ,
identifier[auth_version] = identifier[creds] . identifier[auth_version] ,
identifier[tenant_name] = identifier[creds] . identifier[tenant_name] ,
identifier[os_options] ={
literal[string] : identifier[creds] . identifier[region] ,
literal[string] : identifier[creds] . identifier[endpoint_type] ,
literal[string] : identifier[creds] . identifier[domain_id] ,
literal[string] : identifier[creds] . identifier[domain_name] ,
literal[string] : identifier[creds] . identifier[tenant_id] ,
literal[string] : identifier[creds] . identifier[user_id] ,
literal[string] : identifier[creds] . identifier[user_domain_id] ,
literal[string] : identifier[creds] . identifier[user_domain_name] ,
literal[string] : identifier[creds] . identifier[project_id] ,
literal[string] : identifier[creds] . identifier[project_name] ,
literal[string] : identifier[creds] . identifier[project_domain_id] ,
literal[string] : identifier[creds] . identifier[project_domain_name] ,
}
) | def connect(creds):
"""
Construct a connection value from a container
"""
return swiftclient.Connection(authurl=creds.authurl, user=creds.user, key=creds.password, auth_version=creds.auth_version, tenant_name=creds.tenant_name, os_options={'region_name': creds.region, 'endpoint_type': creds.endpoint_type, 'domain_id': creds.domain_id, 'domain_name': creds.domain_name, 'tenant_id': creds.tenant_id, 'user_id': creds.user_id, 'user_domain_id': creds.user_domain_id, 'user_domain_name': creds.user_domain_name, 'project_id': creds.project_id, 'project_name': creds.project_name, 'project_domain_id': creds.project_domain_id, 'project_domain_name': creds.project_domain_name}) |
def diff_prof(step):
"""Diffusion.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated.
"""
rbot, rtop = misc.get_rbounds(step)
rad = step.rprof['r'].values + rbot
tprof = step.rprof['Tmean'].values
diff = (tprof[:-1] - tprof[1:]) / (rad[1:] - rad[:-1])
# assume tbot = 1
diff = np.insert(diff, 0, (1 - tprof[0]) / (rad[0] - rbot))
# assume ttop = 0
diff = np.append(diff, tprof[-1] / (rtop - rad[-1]))
# actually computed at r_edges...
return diff, np.append(rad, rtop) | def function[diff_prof, parameter[step]]:
constant[Diffusion.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated.
]
<ast.Tuple object at 0x7da1b1846d40> assign[=] call[name[misc].get_rbounds, parameter[name[step]]]
variable[rad] assign[=] binary_operation[call[name[step].rprof][constant[r]].values + name[rbot]]
variable[tprof] assign[=] call[name[step].rprof][constant[Tmean]].values
variable[diff] assign[=] binary_operation[binary_operation[call[name[tprof]][<ast.Slice object at 0x7da1b18454e0>] - call[name[tprof]][<ast.Slice object at 0x7da1b1845b70>]] / binary_operation[call[name[rad]][<ast.Slice object at 0x7da1b1845d80>] - call[name[rad]][<ast.Slice object at 0x7da1b1845930>]]]
variable[diff] assign[=] call[name[np].insert, parameter[name[diff], constant[0], binary_operation[binary_operation[constant[1] - call[name[tprof]][constant[0]]] / binary_operation[call[name[rad]][constant[0]] - name[rbot]]]]]
variable[diff] assign[=] call[name[np].append, parameter[name[diff], binary_operation[call[name[tprof]][<ast.UnaryOp object at 0x7da1b1845e40>] / binary_operation[name[rtop] - call[name[rad]][<ast.UnaryOp object at 0x7da1b1846c20>]]]]]
return[tuple[[<ast.Name object at 0x7da1b1844370>, <ast.Call object at 0x7da1b18446a0>]]] | keyword[def] identifier[diff_prof] ( identifier[step] ):
literal[string]
identifier[rbot] , identifier[rtop] = identifier[misc] . identifier[get_rbounds] ( identifier[step] )
identifier[rad] = identifier[step] . identifier[rprof] [ literal[string] ]. identifier[values] + identifier[rbot]
identifier[tprof] = identifier[step] . identifier[rprof] [ literal[string] ]. identifier[values]
identifier[diff] =( identifier[tprof] [:- literal[int] ]- identifier[tprof] [ literal[int] :])/( identifier[rad] [ literal[int] :]- identifier[rad] [:- literal[int] ])
identifier[diff] = identifier[np] . identifier[insert] ( identifier[diff] , literal[int] ,( literal[int] - identifier[tprof] [ literal[int] ])/( identifier[rad] [ literal[int] ]- identifier[rbot] ))
identifier[diff] = identifier[np] . identifier[append] ( identifier[diff] , identifier[tprof] [- literal[int] ]/( identifier[rtop] - identifier[rad] [- literal[int] ]))
keyword[return] identifier[diff] , identifier[np] . identifier[append] ( identifier[rad] , identifier[rtop] ) | def diff_prof(step):
"""Diffusion.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated.
"""
(rbot, rtop) = misc.get_rbounds(step)
rad = step.rprof['r'].values + rbot
tprof = step.rprof['Tmean'].values
diff = (tprof[:-1] - tprof[1:]) / (rad[1:] - rad[:-1])
# assume tbot = 1
diff = np.insert(diff, 0, (1 - tprof[0]) / (rad[0] - rbot))
# assume ttop = 0
diff = np.append(diff, tprof[-1] / (rtop - rad[-1]))
# actually computed at r_edges...
return (diff, np.append(rad, rtop)) |
def build_downstream(self, process_descriptions, task, all_tasks,
task_pipeline,
count_forks, total_tasks, forks):
"""Builds the downstream pipeline of the current process
Checks for the downstream processes to the current process and
adds them to the current pipeline fragment.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
"""
if task in process_descriptions:
if process_descriptions[task][2] is not None:
if len(process_descriptions[task][2].split("|")) > 1:
local_forks = process_descriptions[task][2].split("|")
# Adds the process to the pipeline fragment downstream
# and defines a new pipeline fragment for each fork.
# Those will only look for downstream processes
for local_fork in local_forks:
if local_fork in total_tasks:
count_forks += 1
task_pipeline.append(process_descriptions[task][2])
self.define_pipeline_string(
process_descriptions,
local_fork,
False,
True,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
if process_descriptions[task][2] in total_tasks:
task_pipeline.append(process_descriptions[task][2].split("|")[0])
# Proceeds building downstream until the output for a
# process is None
self.build_downstream(
process_descriptions,
process_descriptions[task][2].split("|")[0],
all_tasks,
task_pipeline,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
return task_pipeline | def function[build_downstream, parameter[self, process_descriptions, task, all_tasks, task_pipeline, count_forks, total_tasks, forks]]:
constant[Builds the downstream pipeline of the current process
Checks for the downstream processes to the current process and
adds them to the current pipeline fragment.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
]
if compare[name[task] in name[process_descriptions]] begin[:]
if compare[call[call[name[process_descriptions]][name[task]]][constant[2]] is_not constant[None]] begin[:]
if compare[call[name[len], parameter[call[call[call[name[process_descriptions]][name[task]]][constant[2]].split, parameter[constant[|]]]]] greater[>] constant[1]] begin[:]
variable[local_forks] assign[=] call[call[call[name[process_descriptions]][name[task]]][constant[2]].split, parameter[constant[|]]]
for taget[name[local_fork]] in starred[name[local_forks]] begin[:]
if compare[name[local_fork] in name[total_tasks]] begin[:]
<ast.AugAssign object at 0x7da1b03f92d0>
call[name[task_pipeline].append, parameter[call[call[name[process_descriptions]][name[task]]][constant[2]]]]
call[name[self].define_pipeline_string, parameter[name[process_descriptions], name[local_fork], constant[False], constant[True], name[count_forks], name[total_tasks], name[forks]]]
return[name[task_pipeline]] | keyword[def] identifier[build_downstream] ( identifier[self] , identifier[process_descriptions] , identifier[task] , identifier[all_tasks] ,
identifier[task_pipeline] ,
identifier[count_forks] , identifier[total_tasks] , identifier[forks] ):
literal[string]
keyword[if] identifier[task] keyword[in] identifier[process_descriptions] :
keyword[if] identifier[process_descriptions] [ identifier[task] ][ literal[int] ] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[len] ( identifier[process_descriptions] [ identifier[task] ][ literal[int] ]. identifier[split] ( literal[string] ))> literal[int] :
identifier[local_forks] = identifier[process_descriptions] [ identifier[task] ][ literal[int] ]. identifier[split] ( literal[string] )
keyword[for] identifier[local_fork] keyword[in] identifier[local_forks] :
keyword[if] identifier[local_fork] keyword[in] identifier[total_tasks] :
identifier[count_forks] += literal[int]
identifier[task_pipeline] . identifier[append] ( identifier[process_descriptions] [ identifier[task] ][ literal[int] ])
identifier[self] . identifier[define_pipeline_string] (
identifier[process_descriptions] ,
identifier[local_fork] ,
keyword[False] ,
keyword[True] ,
identifier[count_forks] ,
identifier[total_tasks] ,
identifier[forks]
)
keyword[return] identifier[task_pipeline]
keyword[else] :
keyword[if] identifier[process_descriptions] [ identifier[task] ][ literal[int] ] keyword[in] identifier[total_tasks] :
identifier[task_pipeline] . identifier[append] ( identifier[process_descriptions] [ identifier[task] ][ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ])
identifier[self] . identifier[build_downstream] (
identifier[process_descriptions] ,
identifier[process_descriptions] [ identifier[task] ][ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ],
identifier[all_tasks] ,
identifier[task_pipeline] ,
identifier[count_forks] ,
identifier[total_tasks] ,
identifier[forks]
)
keyword[return] identifier[task_pipeline]
keyword[else] :
keyword[return] identifier[task_pipeline] | def build_downstream(self, process_descriptions, task, all_tasks, task_pipeline, count_forks, total_tasks, forks):
"""Builds the downstream pipeline of the current process
Checks for the downstream processes to the current process and
adds them to the current pipeline fragment.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
"""
if task in process_descriptions:
if process_descriptions[task][2] is not None:
if len(process_descriptions[task][2].split('|')) > 1:
local_forks = process_descriptions[task][2].split('|')
# Adds the process to the pipeline fragment downstream
# and defines a new pipeline fragment for each fork.
# Those will only look for downstream processes
for local_fork in local_forks:
if local_fork in total_tasks:
count_forks += 1
task_pipeline.append(process_descriptions[task][2])
self.define_pipeline_string(process_descriptions, local_fork, False, True, count_forks, total_tasks, forks) # depends on [control=['if'], data=['local_fork', 'total_tasks']] # depends on [control=['for'], data=['local_fork']]
return task_pipeline # depends on [control=['if'], data=[]]
else:
if process_descriptions[task][2] in total_tasks:
task_pipeline.append(process_descriptions[task][2].split('|')[0])
# Proceeds building downstream until the output for a
# process is None
self.build_downstream(process_descriptions, process_descriptions[task][2].split('|')[0], all_tasks, task_pipeline, count_forks, total_tasks, forks) # depends on [control=['if'], data=['total_tasks']]
return task_pipeline # depends on [control=['if'], data=[]]
else:
return task_pipeline # depends on [control=['if'], data=['task', 'process_descriptions']] |
def label_field(self, f):
"""
Select one field as the label field.
Note that this field will be exclude from feature fields.
:param f: Selected label field
:type f: str
:rtype: DataFrame
"""
if f is None:
raise ValueError("Label field name cannot be None.")
self._assert_ml_fields_valid(f)
return _change_singleton_roles(self, {_get_field_name(f): FieldRole.LABEL}, clear_feature=True) | def function[label_field, parameter[self, f]]:
constant[
Select one field as the label field.
Note that this field will be exclude from feature fields.
:param f: Selected label field
:type f: str
:rtype: DataFrame
]
if compare[name[f] is constant[None]] begin[:]
<ast.Raise object at 0x7da204346d40>
call[name[self]._assert_ml_fields_valid, parameter[name[f]]]
return[call[name[_change_singleton_roles], parameter[name[self], dictionary[[<ast.Call object at 0x7da204345960>], [<ast.Attribute object at 0x7da204347340>]]]]] | keyword[def] identifier[label_field] ( identifier[self] , identifier[f] ):
literal[string]
keyword[if] identifier[f] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_assert_ml_fields_valid] ( identifier[f] )
keyword[return] identifier[_change_singleton_roles] ( identifier[self] ,{ identifier[_get_field_name] ( identifier[f] ): identifier[FieldRole] . identifier[LABEL] }, identifier[clear_feature] = keyword[True] ) | def label_field(self, f):
"""
Select one field as the label field.
Note that this field will be exclude from feature fields.
:param f: Selected label field
:type f: str
:rtype: DataFrame
"""
if f is None:
raise ValueError('Label field name cannot be None.') # depends on [control=['if'], data=[]]
self._assert_ml_fields_valid(f)
return _change_singleton_roles(self, {_get_field_name(f): FieldRole.LABEL}, clear_feature=True) |
def transform(self, y):
"""
Transform features per specified math function.
:param y:
:return:
"""
if isinstance(y, pd.DataFrame):
x = y.ix[:,0]
y = y.ix[:,1]
else:
x = y[:,0]
y = y[:,1]
if self.transform_type == 'add':
return pd.DataFrame(np.add(x, y))
elif self.transform_type == 'sub':
return pd.DataFrame(np.subtract(x, y))
elif self.transform_type == 'mul':
return pd.DataFrame(np.multiply(x, y))
elif self.transform_type == 'div':
return pd.DataFrame(np.divide(x, y))
elif self.transform_type == 'rem':
return pd.DataFrame(np.remainder(x, y))
elif self.transform_type == 'pow':
return pd.DataFrame(x**y) | def function[transform, parameter[self, y]]:
constant[
Transform features per specified math function.
:param y:
:return:
]
if call[name[isinstance], parameter[name[y], name[pd].DataFrame]] begin[:]
variable[x] assign[=] call[name[y].ix][tuple[[<ast.Slice object at 0x7da1b1ce9720>, <ast.Constant object at 0x7da1b1ce9840>]]]
variable[y] assign[=] call[name[y].ix][tuple[[<ast.Slice object at 0x7da1b1ce8b80>, <ast.Constant object at 0x7da1b1ce88b0>]]]
if compare[name[self].transform_type equal[==] constant[add]] begin[:]
return[call[name[pd].DataFrame, parameter[call[name[np].add, parameter[name[x], name[y]]]]]] | keyword[def] identifier[transform] ( identifier[self] , identifier[y] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[y] , identifier[pd] . identifier[DataFrame] ):
identifier[x] = identifier[y] . identifier[ix] [:, literal[int] ]
identifier[y] = identifier[y] . identifier[ix] [:, literal[int] ]
keyword[else] :
identifier[x] = identifier[y] [:, literal[int] ]
identifier[y] = identifier[y] [:, literal[int] ]
keyword[if] identifier[self] . identifier[transform_type] == literal[string] :
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[np] . identifier[add] ( identifier[x] , identifier[y] ))
keyword[elif] identifier[self] . identifier[transform_type] == literal[string] :
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[np] . identifier[subtract] ( identifier[x] , identifier[y] ))
keyword[elif] identifier[self] . identifier[transform_type] == literal[string] :
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[np] . identifier[multiply] ( identifier[x] , identifier[y] ))
keyword[elif] identifier[self] . identifier[transform_type] == literal[string] :
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[np] . identifier[divide] ( identifier[x] , identifier[y] ))
keyword[elif] identifier[self] . identifier[transform_type] == literal[string] :
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[np] . identifier[remainder] ( identifier[x] , identifier[y] ))
keyword[elif] identifier[self] . identifier[transform_type] == literal[string] :
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[x] ** identifier[y] ) | def transform(self, y):
"""
Transform features per specified math function.
:param y:
:return:
"""
if isinstance(y, pd.DataFrame):
x = y.ix[:, 0]
y = y.ix[:, 1] # depends on [control=['if'], data=[]]
else:
x = y[:, 0]
y = y[:, 1]
if self.transform_type == 'add':
return pd.DataFrame(np.add(x, y)) # depends on [control=['if'], data=[]]
elif self.transform_type == 'sub':
return pd.DataFrame(np.subtract(x, y)) # depends on [control=['if'], data=[]]
elif self.transform_type == 'mul':
return pd.DataFrame(np.multiply(x, y)) # depends on [control=['if'], data=[]]
elif self.transform_type == 'div':
return pd.DataFrame(np.divide(x, y)) # depends on [control=['if'], data=[]]
elif self.transform_type == 'rem':
return pd.DataFrame(np.remainder(x, y)) # depends on [control=['if'], data=[]]
elif self.transform_type == 'pow':
return pd.DataFrame(x ** y) # depends on [control=['if'], data=[]] |
def visit_member(self, attribute_key, attribute, member_node, member_data,
is_link_node, parent_data, index=None):
"""
Visits a member node in a resource data tree.
:param tuple attribute_key: tuple containing the attribute tokens
identifying the member node's position in the resource data tree.
:param attribute: mapped attribute holding information about the
member node's name (in the parent) and type etc.
:type attribute:
:class:`everest.representers.attributes.MappedAttribute`
:param member_node: the node holding resource data. This is either a
resource instance (when using a :class:`ResourceTreeTraverser` on
a tree of resources) or a data element instance (when using a
:class:`DataElementTreeTraverser` on a data element tree.
:param dict member_data: dictionary holding all member data
extracted during traversal (with mapped attributes as keys).
:param bool is_link_node: indicates if the given member node is a link.
:param dict parent_data: dictionary holding all parent data extracted
during traversal (with mapped attributes as keys).
:param int index: this indicates a member node's index in a collection
parent node. If the parent node is a member node, it will be `None`.
"""
raise NotImplementedError('Abstract method.') | def function[visit_member, parameter[self, attribute_key, attribute, member_node, member_data, is_link_node, parent_data, index]]:
constant[
Visits a member node in a resource data tree.
:param tuple attribute_key: tuple containing the attribute tokens
identifying the member node's position in the resource data tree.
:param attribute: mapped attribute holding information about the
member node's name (in the parent) and type etc.
:type attribute:
:class:`everest.representers.attributes.MappedAttribute`
:param member_node: the node holding resource data. This is either a
resource instance (when using a :class:`ResourceTreeTraverser` on
a tree of resources) or a data element instance (when using a
:class:`DataElementTreeTraverser` on a data element tree.
:param dict member_data: dictionary holding all member data
extracted during traversal (with mapped attributes as keys).
:param bool is_link_node: indicates if the given member node is a link.
:param dict parent_data: dictionary holding all parent data extracted
during traversal (with mapped attributes as keys).
:param int index: this indicates a member node's index in a collection
parent node. If the parent node is a member node, it will be `None`.
]
<ast.Raise object at 0x7da20c9924d0> | keyword[def] identifier[visit_member] ( identifier[self] , identifier[attribute_key] , identifier[attribute] , identifier[member_node] , identifier[member_data] ,
identifier[is_link_node] , identifier[parent_data] , identifier[index] = keyword[None] ):
literal[string]
keyword[raise] identifier[NotImplementedError] ( literal[string] ) | def visit_member(self, attribute_key, attribute, member_node, member_data, is_link_node, parent_data, index=None):
"""
Visits a member node in a resource data tree.
:param tuple attribute_key: tuple containing the attribute tokens
identifying the member node's position in the resource data tree.
:param attribute: mapped attribute holding information about the
member node's name (in the parent) and type etc.
:type attribute:
:class:`everest.representers.attributes.MappedAttribute`
:param member_node: the node holding resource data. This is either a
resource instance (when using a :class:`ResourceTreeTraverser` on
a tree of resources) or a data element instance (when using a
:class:`DataElementTreeTraverser` on a data element tree.
:param dict member_data: dictionary holding all member data
extracted during traversal (with mapped attributes as keys).
:param bool is_link_node: indicates if the given member node is a link.
:param dict parent_data: dictionary holding all parent data extracted
during traversal (with mapped attributes as keys).
:param int index: this indicates a member node's index in a collection
parent node. If the parent node is a member node, it will be `None`.
"""
raise NotImplementedError('Abstract method.') |
def add(self, output, target):
"""
Args:
output (Tensor): NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model. The probabilities should
sum to one over all classes
target (Tensor): binary NxK tensort that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weight (optional, Tensor): Nx1 tensor representing the weight for
each example (each weight > 0)
"""
if not torch.is_tensor(output):
output = torch.from_numpy(output)
if not torch.is_tensor(target):
target = torch.from_numpy(target)
if output.dim() == 1:
output = output.view(-1, 1)
else:
assert output.dim() == 2, \
'wrong output size (should be 1D or 2D with one column \
per class)'
if target.dim() == 1:
target = target.view(-1, 1)
else:
assert target.dim() == 2, \
'wrong target size (should be 1D or 2D with one column \
per class)'
if self.scores.numel() > 0:
assert target.size(1) == self.targets.size(1), \
'dimensions for output should match previously added examples.'
# make sure storage is of sufficient size
if self.scores.storage().size() < self.scores.numel() + output.numel():
new_size = math.ceil(self.scores.storage().size() * 1.5)
self.scores.storage().resize_(int(new_size + output.numel()))
self.targets.storage().resize_(int(new_size + output.numel()))
# store scores and targets
offset = self.scores.size(0) if self.scores.dim() > 0 else 0
self.scores.resize_(offset + output.size(0), output.size(1))
self.targets.resize_(offset + target.size(0), target.size(1))
self.scores.narrow(0, offset, output.size(0)).copy_(output)
self.targets.narrow(0, offset, target.size(0)).copy_(target) | def function[add, parameter[self, output, target]]:
constant[
Args:
output (Tensor): NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model. The probabilities should
sum to one over all classes
target (Tensor): binary NxK tensort that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weight (optional, Tensor): Nx1 tensor representing the weight for
each example (each weight > 0)
]
if <ast.UnaryOp object at 0x7da1b2043250> begin[:]
variable[output] assign[=] call[name[torch].from_numpy, parameter[name[output]]]
if <ast.UnaryOp object at 0x7da1b20411e0> begin[:]
variable[target] assign[=] call[name[torch].from_numpy, parameter[name[target]]]
if compare[call[name[output].dim, parameter[]] equal[==] constant[1]] begin[:]
variable[output] assign[=] call[name[output].view, parameter[<ast.UnaryOp object at 0x7da1b2042b90>, constant[1]]]
if compare[call[name[target].dim, parameter[]] equal[==] constant[1]] begin[:]
variable[target] assign[=] call[name[target].view, parameter[<ast.UnaryOp object at 0x7da1b2040c70>, constant[1]]]
if compare[call[name[self].scores.numel, parameter[]] greater[>] constant[0]] begin[:]
assert[compare[call[name[target].size, parameter[constant[1]]] equal[==] call[name[self].targets.size, parameter[constant[1]]]]]
if compare[call[call[name[self].scores.storage, parameter[]].size, parameter[]] less[<] binary_operation[call[name[self].scores.numel, parameter[]] + call[name[output].numel, parameter[]]]] begin[:]
variable[new_size] assign[=] call[name[math].ceil, parameter[binary_operation[call[call[name[self].scores.storage, parameter[]].size, parameter[]] * constant[1.5]]]]
call[call[name[self].scores.storage, parameter[]].resize_, parameter[call[name[int], parameter[binary_operation[name[new_size] + call[name[output].numel, parameter[]]]]]]]
call[call[name[self].targets.storage, parameter[]].resize_, parameter[call[name[int], parameter[binary_operation[name[new_size] + call[name[output].numel, parameter[]]]]]]]
variable[offset] assign[=] <ast.IfExp object at 0x7da1b2043b50>
call[name[self].scores.resize_, parameter[binary_operation[name[offset] + call[name[output].size, parameter[constant[0]]]], call[name[output].size, parameter[constant[1]]]]]
call[name[self].targets.resize_, parameter[binary_operation[name[offset] + call[name[target].size, parameter[constant[0]]]], call[name[target].size, parameter[constant[1]]]]]
call[call[name[self].scores.narrow, parameter[constant[0], name[offset], call[name[output].size, parameter[constant[0]]]]].copy_, parameter[name[output]]]
call[call[name[self].targets.narrow, parameter[constant[0], name[offset], call[name[target].size, parameter[constant[0]]]]].copy_, parameter[name[target]]] | keyword[def] identifier[add] ( identifier[self] , identifier[output] , identifier[target] ):
literal[string]
keyword[if] keyword[not] identifier[torch] . identifier[is_tensor] ( identifier[output] ):
identifier[output] = identifier[torch] . identifier[from_numpy] ( identifier[output] )
keyword[if] keyword[not] identifier[torch] . identifier[is_tensor] ( identifier[target] ):
identifier[target] = identifier[torch] . identifier[from_numpy] ( identifier[target] )
keyword[if] identifier[output] . identifier[dim] ()== literal[int] :
identifier[output] = identifier[output] . identifier[view] (- literal[int] , literal[int] )
keyword[else] :
keyword[assert] identifier[output] . identifier[dim] ()== literal[int] , literal[string]
keyword[if] identifier[target] . identifier[dim] ()== literal[int] :
identifier[target] = identifier[target] . identifier[view] (- literal[int] , literal[int] )
keyword[else] :
keyword[assert] identifier[target] . identifier[dim] ()== literal[int] , literal[string]
keyword[if] identifier[self] . identifier[scores] . identifier[numel] ()> literal[int] :
keyword[assert] identifier[target] . identifier[size] ( literal[int] )== identifier[self] . identifier[targets] . identifier[size] ( literal[int] ), literal[string]
keyword[if] identifier[self] . identifier[scores] . identifier[storage] (). identifier[size] ()< identifier[self] . identifier[scores] . identifier[numel] ()+ identifier[output] . identifier[numel] ():
identifier[new_size] = identifier[math] . identifier[ceil] ( identifier[self] . identifier[scores] . identifier[storage] (). identifier[size] ()* literal[int] )
identifier[self] . identifier[scores] . identifier[storage] (). identifier[resize_] ( identifier[int] ( identifier[new_size] + identifier[output] . identifier[numel] ()))
identifier[self] . identifier[targets] . identifier[storage] (). identifier[resize_] ( identifier[int] ( identifier[new_size] + identifier[output] . identifier[numel] ()))
identifier[offset] = identifier[self] . identifier[scores] . identifier[size] ( literal[int] ) keyword[if] identifier[self] . identifier[scores] . identifier[dim] ()> literal[int] keyword[else] literal[int]
identifier[self] . identifier[scores] . identifier[resize_] ( identifier[offset] + identifier[output] . identifier[size] ( literal[int] ), identifier[output] . identifier[size] ( literal[int] ))
identifier[self] . identifier[targets] . identifier[resize_] ( identifier[offset] + identifier[target] . identifier[size] ( literal[int] ), identifier[target] . identifier[size] ( literal[int] ))
identifier[self] . identifier[scores] . identifier[narrow] ( literal[int] , identifier[offset] , identifier[output] . identifier[size] ( literal[int] )). identifier[copy_] ( identifier[output] )
identifier[self] . identifier[targets] . identifier[narrow] ( literal[int] , identifier[offset] , identifier[target] . identifier[size] ( literal[int] )). identifier[copy_] ( identifier[target] ) | def add(self, output, target):
"""
Args:
output (Tensor): NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model. The probabilities should
sum to one over all classes
target (Tensor): binary NxK tensort that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weight (optional, Tensor): Nx1 tensor representing the weight for
each example (each weight > 0)
"""
if not torch.is_tensor(output):
output = torch.from_numpy(output) # depends on [control=['if'], data=[]]
if not torch.is_tensor(target):
target = torch.from_numpy(target) # depends on [control=['if'], data=[]]
if output.dim() == 1:
output = output.view(-1, 1) # depends on [control=['if'], data=[]]
else:
assert output.dim() == 2, 'wrong output size (should be 1D or 2D with one column per class)'
if target.dim() == 1:
target = target.view(-1, 1) # depends on [control=['if'], data=[]]
else:
assert target.dim() == 2, 'wrong target size (should be 1D or 2D with one column per class)'
if self.scores.numel() > 0:
assert target.size(1) == self.targets.size(1), 'dimensions for output should match previously added examples.' # depends on [control=['if'], data=[]]
# make sure storage is of sufficient size
if self.scores.storage().size() < self.scores.numel() + output.numel():
new_size = math.ceil(self.scores.storage().size() * 1.5)
self.scores.storage().resize_(int(new_size + output.numel()))
self.targets.storage().resize_(int(new_size + output.numel())) # depends on [control=['if'], data=[]]
# store scores and targets
offset = self.scores.size(0) if self.scores.dim() > 0 else 0
self.scores.resize_(offset + output.size(0), output.size(1))
self.targets.resize_(offset + target.size(0), target.size(1))
self.scores.narrow(0, offset, output.size(0)).copy_(output)
self.targets.narrow(0, offset, target.size(0)).copy_(target) |
def agp(args):
"""
%prog agp main_results/ contigs.fasta
Generate AGP file based on LACHESIS output.
"""
p = OptionParser(agp.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
odir, contigsfasta = args
fwagp = must_open(opts.outfile, 'w')
orderingfiles = natsorted(iglob(odir, "*.ordering"))
sizes = Sizes(contigsfasta).mapping
contigs = set(sizes.keys())
anchored = set()
for ofile in orderingfiles:
co = ContigOrdering(ofile)
anchored |= set([x.contig_name for x in co])
obj = op.basename(ofile).split('.')[0]
co.write_agp(obj, sizes, fwagp)
singletons = contigs - anchored
logging.debug('Anchored: {}, Singletons: {}'.
format(len(anchored), len(singletons)))
for s in natsorted(singletons):
order_to_agp(s, [(s, "?")], sizes, fwagp) | def function[agp, parameter[args]]:
constant[
%prog agp main_results/ contigs.fasta
Generate AGP file based on LACHESIS output.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[agp].__doc__]]
call[name[p].set_outfile, parameter[]]
<ast.Tuple object at 0x7da20c6a9f30> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b08adba0>]]
<ast.Tuple object at 0x7da1b08ad000> assign[=] name[args]
variable[fwagp] assign[=] call[name[must_open], parameter[name[opts].outfile, constant[w]]]
variable[orderingfiles] assign[=] call[name[natsorted], parameter[call[name[iglob], parameter[name[odir], constant[*.ordering]]]]]
variable[sizes] assign[=] call[name[Sizes], parameter[name[contigsfasta]]].mapping
variable[contigs] assign[=] call[name[set], parameter[call[name[sizes].keys, parameter[]]]]
variable[anchored] assign[=] call[name[set], parameter[]]
for taget[name[ofile]] in starred[name[orderingfiles]] begin[:]
variable[co] assign[=] call[name[ContigOrdering], parameter[name[ofile]]]
<ast.AugAssign object at 0x7da1b0981360>
variable[obj] assign[=] call[call[call[name[op].basename, parameter[name[ofile]]].split, parameter[constant[.]]]][constant[0]]
call[name[co].write_agp, parameter[name[obj], name[sizes], name[fwagp]]]
variable[singletons] assign[=] binary_operation[name[contigs] - name[anchored]]
call[name[logging].debug, parameter[call[constant[Anchored: {}, Singletons: {}].format, parameter[call[name[len], parameter[name[anchored]]], call[name[len], parameter[name[singletons]]]]]]]
for taget[name[s]] in starred[call[name[natsorted], parameter[name[singletons]]]] begin[:]
call[name[order_to_agp], parameter[name[s], list[[<ast.Tuple object at 0x7da1b0981990>]], name[sizes], name[fwagp]]] | keyword[def] identifier[agp] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[agp] . identifier[__doc__] )
identifier[p] . identifier[set_outfile] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[odir] , identifier[contigsfasta] = identifier[args]
identifier[fwagp] = identifier[must_open] ( identifier[opts] . identifier[outfile] , literal[string] )
identifier[orderingfiles] = identifier[natsorted] ( identifier[iglob] ( identifier[odir] , literal[string] ))
identifier[sizes] = identifier[Sizes] ( identifier[contigsfasta] ). identifier[mapping]
identifier[contigs] = identifier[set] ( identifier[sizes] . identifier[keys] ())
identifier[anchored] = identifier[set] ()
keyword[for] identifier[ofile] keyword[in] identifier[orderingfiles] :
identifier[co] = identifier[ContigOrdering] ( identifier[ofile] )
identifier[anchored] |= identifier[set] ([ identifier[x] . identifier[contig_name] keyword[for] identifier[x] keyword[in] identifier[co] ])
identifier[obj] = identifier[op] . identifier[basename] ( identifier[ofile] ). identifier[split] ( literal[string] )[ literal[int] ]
identifier[co] . identifier[write_agp] ( identifier[obj] , identifier[sizes] , identifier[fwagp] )
identifier[singletons] = identifier[contigs] - identifier[anchored]
identifier[logging] . identifier[debug] ( literal[string] .
identifier[format] ( identifier[len] ( identifier[anchored] ), identifier[len] ( identifier[singletons] )))
keyword[for] identifier[s] keyword[in] identifier[natsorted] ( identifier[singletons] ):
identifier[order_to_agp] ( identifier[s] ,[( identifier[s] , literal[string] )], identifier[sizes] , identifier[fwagp] ) | def agp(args):
"""
%prog agp main_results/ contigs.fasta
Generate AGP file based on LACHESIS output.
"""
p = OptionParser(agp.__doc__)
p.set_outfile()
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(odir, contigsfasta) = args
fwagp = must_open(opts.outfile, 'w')
orderingfiles = natsorted(iglob(odir, '*.ordering'))
sizes = Sizes(contigsfasta).mapping
contigs = set(sizes.keys())
anchored = set()
for ofile in orderingfiles:
co = ContigOrdering(ofile)
anchored |= set([x.contig_name for x in co])
obj = op.basename(ofile).split('.')[0]
co.write_agp(obj, sizes, fwagp) # depends on [control=['for'], data=['ofile']]
singletons = contigs - anchored
logging.debug('Anchored: {}, Singletons: {}'.format(len(anchored), len(singletons)))
for s in natsorted(singletons):
order_to_agp(s, [(s, '?')], sizes, fwagp) # depends on [control=['for'], data=['s']] |
def _split_token_to_subtokens(token, subtoken_dict, max_subtoken_length):
"""Splits a token into subtokens defined in the subtoken dict."""
ret = []
start = 0
token_len = len(token)
while start < token_len:
# Find the longest subtoken, so iterate backwards.
for end in xrange(min(token_len, start + max_subtoken_length), start, -1):
subtoken = token[start:end]
if subtoken in subtoken_dict:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
raise ValueError("Was unable to split token \"%s\" into subtokens." %
token)
return ret | def function[_split_token_to_subtokens, parameter[token, subtoken_dict, max_subtoken_length]]:
constant[Splits a token into subtokens defined in the subtoken dict.]
variable[ret] assign[=] list[[]]
variable[start] assign[=] constant[0]
variable[token_len] assign[=] call[name[len], parameter[name[token]]]
while compare[name[start] less[<] name[token_len]] begin[:]
for taget[name[end]] in starred[call[name[xrange], parameter[call[name[min], parameter[name[token_len], binary_operation[name[start] + name[max_subtoken_length]]]], name[start], <ast.UnaryOp object at 0x7da20c7ca4d0>]]] begin[:]
variable[subtoken] assign[=] call[name[token]][<ast.Slice object at 0x7da18f00d990>]
if compare[name[subtoken] in name[subtoken_dict]] begin[:]
call[name[ret].append, parameter[name[subtoken]]]
variable[start] assign[=] name[end]
break
return[name[ret]] | keyword[def] identifier[_split_token_to_subtokens] ( identifier[token] , identifier[subtoken_dict] , identifier[max_subtoken_length] ):
literal[string]
identifier[ret] =[]
identifier[start] = literal[int]
identifier[token_len] = identifier[len] ( identifier[token] )
keyword[while] identifier[start] < identifier[token_len] :
keyword[for] identifier[end] keyword[in] identifier[xrange] ( identifier[min] ( identifier[token_len] , identifier[start] + identifier[max_subtoken_length] ), identifier[start] ,- literal[int] ):
identifier[subtoken] = identifier[token] [ identifier[start] : identifier[end] ]
keyword[if] identifier[subtoken] keyword[in] identifier[subtoken_dict] :
identifier[ret] . identifier[append] ( identifier[subtoken] )
identifier[start] = identifier[end]
keyword[break]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] %
identifier[token] )
keyword[return] identifier[ret] | def _split_token_to_subtokens(token, subtoken_dict, max_subtoken_length):
"""Splits a token into subtokens defined in the subtoken dict."""
ret = []
start = 0
token_len = len(token)
while start < token_len:
# Find the longest subtoken, so iterate backwards.
for end in xrange(min(token_len, start + max_subtoken_length), start, -1):
subtoken = token[start:end]
if subtoken in subtoken_dict:
ret.append(subtoken)
start = end
break # depends on [control=['if'], data=['subtoken']] # depends on [control=['for'], data=['end']]
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
raise ValueError('Was unable to split token "%s" into subtokens.' % token) # depends on [control=['while'], data=['start', 'token_len']]
return ret |
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
if set(email_message.recipients()).issubset(admin_emails()):
# Skip on admin notifications
return super(DevEmailBackend, self)._send(email_message)
from_email = sanitize_address(email_message.from_email, email_message.encoding)
try:
recipients = [sanitize_address(addr, email_message.encoding)
for addr in settings.DEV_EMAIL_LIST]
except:
raise ImproperlyConfigured("You must set a DEV_EMAIL_LIST setting to use the Dev Email Backend.")
message = email_message.message()
charset = message.get_charset().get_output_charset() if message.get_charset() else "utf-8"
try:
self.connection.sendmail(from_email, recipients,
force_bytes(message.as_string(), charset))
except:
if not self.fail_silently:
raise
return False
return True | def function[_send, parameter[self, email_message]]:
constant[A helper method that does the actual sending.]
if <ast.UnaryOp object at 0x7da1b09bdb10> begin[:]
return[constant[False]]
if call[call[name[set], parameter[call[name[email_message].recipients, parameter[]]]].issubset, parameter[call[name[admin_emails], parameter[]]]] begin[:]
return[call[call[name[super], parameter[name[DevEmailBackend], name[self]]]._send, parameter[name[email_message]]]]
variable[from_email] assign[=] call[name[sanitize_address], parameter[name[email_message].from_email, name[email_message].encoding]]
<ast.Try object at 0x7da1b09bfa00>
variable[message] assign[=] call[name[email_message].message, parameter[]]
variable[charset] assign[=] <ast.IfExp object at 0x7da1b09bfdc0>
<ast.Try object at 0x7da1b09bdab0>
return[constant[True]] | keyword[def] identifier[_send] ( identifier[self] , identifier[email_message] ):
literal[string]
keyword[if] keyword[not] identifier[email_message] . identifier[recipients] ():
keyword[return] keyword[False]
keyword[if] identifier[set] ( identifier[email_message] . identifier[recipients] ()). identifier[issubset] ( identifier[admin_emails] ()):
keyword[return] identifier[super] ( identifier[DevEmailBackend] , identifier[self] ). identifier[_send] ( identifier[email_message] )
identifier[from_email] = identifier[sanitize_address] ( identifier[email_message] . identifier[from_email] , identifier[email_message] . identifier[encoding] )
keyword[try] :
identifier[recipients] =[ identifier[sanitize_address] ( identifier[addr] , identifier[email_message] . identifier[encoding] )
keyword[for] identifier[addr] keyword[in] identifier[settings] . identifier[DEV_EMAIL_LIST] ]
keyword[except] :
keyword[raise] identifier[ImproperlyConfigured] ( literal[string] )
identifier[message] = identifier[email_message] . identifier[message] ()
identifier[charset] = identifier[message] . identifier[get_charset] (). identifier[get_output_charset] () keyword[if] identifier[message] . identifier[get_charset] () keyword[else] literal[string]
keyword[try] :
identifier[self] . identifier[connection] . identifier[sendmail] ( identifier[from_email] , identifier[recipients] ,
identifier[force_bytes] ( identifier[message] . identifier[as_string] (), identifier[charset] ))
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[fail_silently] :
keyword[raise]
keyword[return] keyword[False]
keyword[return] keyword[True] | def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False # depends on [control=['if'], data=[]]
if set(email_message.recipients()).issubset(admin_emails()):
# Skip on admin notifications
return super(DevEmailBackend, self)._send(email_message) # depends on [control=['if'], data=[]]
from_email = sanitize_address(email_message.from_email, email_message.encoding)
try:
recipients = [sanitize_address(addr, email_message.encoding) for addr in settings.DEV_EMAIL_LIST] # depends on [control=['try'], data=[]]
except:
raise ImproperlyConfigured('You must set a DEV_EMAIL_LIST setting to use the Dev Email Backend.') # depends on [control=['except'], data=[]]
message = email_message.message()
charset = message.get_charset().get_output_charset() if message.get_charset() else 'utf-8'
try:
self.connection.sendmail(from_email, recipients, force_bytes(message.as_string(), charset)) # depends on [control=['try'], data=[]]
except:
if not self.fail_silently:
raise # depends on [control=['if'], data=[]]
return False # depends on [control=['except'], data=[]]
return True |
def add_state_segments(self, *args, **kwargs):
"""DEPRECATED: use :meth:`Plot.add_segments_bar`
"""
warnings.warn('add_state_segments() was renamed add_segments_bar(), '
'this warning will result in an error in the future',
DeprecationWarning)
return self.add_segments_bar(*args, **kwargs) | def function[add_state_segments, parameter[self]]:
constant[DEPRECATED: use :meth:`Plot.add_segments_bar`
]
call[name[warnings].warn, parameter[constant[add_state_segments() was renamed add_segments_bar(), this warning will result in an error in the future], name[DeprecationWarning]]]
return[call[name[self].add_segments_bar, parameter[<ast.Starred object at 0x7da20e9553f0>]]] | keyword[def] identifier[add_state_segments] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] ,
identifier[DeprecationWarning] )
keyword[return] identifier[self] . identifier[add_segments_bar] (* identifier[args] ,** identifier[kwargs] ) | def add_state_segments(self, *args, **kwargs):
"""DEPRECATED: use :meth:`Plot.add_segments_bar`
"""
warnings.warn('add_state_segments() was renamed add_segments_bar(), this warning will result in an error in the future', DeprecationWarning)
return self.add_segments_bar(*args, **kwargs) |
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() | def function[foreachPartition, parameter[self, f]]:
constant[
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
]
def function[func, parameter[it]]:
variable[r] assign[=] call[name[f], parameter[name[it]]]
<ast.Try object at 0x7da18bc70af0>
call[call[name[self].mapPartitions, parameter[name[func]]].count, parameter[]] | keyword[def] identifier[foreachPartition] ( identifier[self] , identifier[f] ):
literal[string]
keyword[def] identifier[func] ( identifier[it] ):
identifier[r] = identifier[f] ( identifier[it] )
keyword[try] :
keyword[return] identifier[iter] ( identifier[r] )
keyword[except] identifier[TypeError] :
keyword[return] identifier[iter] ([])
identifier[self] . identifier[mapPartitions] ( identifier[func] ). identifier[count] () | def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r) # depends on [control=['try'], data=[]]
except TypeError:
return iter([]) # depends on [control=['except'], data=[]]
self.mapPartitions(func).count() |
def ApprovalSymlinkUrnBuilder(approval_type, subject_id, user, approval_id):
"""Build an approval symlink URN."""
return aff4.ROOT_URN.Add("users").Add(user).Add("approvals").Add(
approval_type).Add(subject_id).Add(approval_id) | def function[ApprovalSymlinkUrnBuilder, parameter[approval_type, subject_id, user, approval_id]]:
constant[Build an approval symlink URN.]
return[call[call[call[call[call[call[name[aff4].ROOT_URN.Add, parameter[constant[users]]].Add, parameter[name[user]]].Add, parameter[constant[approvals]]].Add, parameter[name[approval_type]]].Add, parameter[name[subject_id]]].Add, parameter[name[approval_id]]]] | keyword[def] identifier[ApprovalSymlinkUrnBuilder] ( identifier[approval_type] , identifier[subject_id] , identifier[user] , identifier[approval_id] ):
literal[string]
keyword[return] identifier[aff4] . identifier[ROOT_URN] . identifier[Add] ( literal[string] ). identifier[Add] ( identifier[user] ). identifier[Add] ( literal[string] ). identifier[Add] (
identifier[approval_type] ). identifier[Add] ( identifier[subject_id] ). identifier[Add] ( identifier[approval_id] ) | def ApprovalSymlinkUrnBuilder(approval_type, subject_id, user, approval_id):
"""Build an approval symlink URN."""
return aff4.ROOT_URN.Add('users').Add(user).Add('approvals').Add(approval_type).Add(subject_id).Add(approval_id) |
def available_partitions_for_topic(self, topic):
"""Return set of partitions with known leaders
Arguments:
topic (str): topic to check for partitions
Returns:
set: {partition (int), ...}
None if topic not found.
"""
if topic not in self._partitions:
return None
return set([partition for partition, metadata
in six.iteritems(self._partitions[topic])
if metadata.leader != -1]) | def function[available_partitions_for_topic, parameter[self, topic]]:
constant[Return set of partitions with known leaders
Arguments:
topic (str): topic to check for partitions
Returns:
set: {partition (int), ...}
None if topic not found.
]
if compare[name[topic] <ast.NotIn object at 0x7da2590d7190> name[self]._partitions] begin[:]
return[constant[None]]
return[call[name[set], parameter[<ast.ListComp object at 0x7da1b1c2aa10>]]] | keyword[def] identifier[available_partitions_for_topic] ( identifier[self] , identifier[topic] ):
literal[string]
keyword[if] identifier[topic] keyword[not] keyword[in] identifier[self] . identifier[_partitions] :
keyword[return] keyword[None]
keyword[return] identifier[set] ([ identifier[partition] keyword[for] identifier[partition] , identifier[metadata]
keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[_partitions] [ identifier[topic] ])
keyword[if] identifier[metadata] . identifier[leader] !=- literal[int] ]) | def available_partitions_for_topic(self, topic):
"""Return set of partitions with known leaders
Arguments:
topic (str): topic to check for partitions
Returns:
set: {partition (int), ...}
None if topic not found.
"""
if topic not in self._partitions:
return None # depends on [control=['if'], data=[]]
return set([partition for (partition, metadata) in six.iteritems(self._partitions[topic]) if metadata.leader != -1]) |
def reorder(args):
"""
%prog reorder tabfile 1,2,4,3 > newtabfile
Reorder columns in tab-delimited files. The above syntax will print out a
new file with col-1,2,4,3 from the old file.
"""
import csv
p = OptionParser(reorder.__doc__)
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, order = args
sep = opts.sep
order = [int(x) - 1 for x in order.split(",")]
reader = csv.reader(must_open(tabfile), delimiter=sep)
writer = csv.writer(sys.stdout, delimiter=sep)
for row in reader:
newrow = [row[x] for x in order]
writer.writerow(newrow) | def function[reorder, parameter[args]]:
constant[
%prog reorder tabfile 1,2,4,3 > newtabfile
Reorder columns in tab-delimited files. The above syntax will print out a
new file with col-1,2,4,3 from the old file.
]
import module[csv]
variable[p] assign[=] call[name[OptionParser], parameter[name[reorder].__doc__]]
call[name[p].set_sep, parameter[]]
<ast.Tuple object at 0x7da18ede6bc0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18ede5d20>]]
<ast.Tuple object at 0x7da18ede75e0> assign[=] name[args]
variable[sep] assign[=] name[opts].sep
variable[order] assign[=] <ast.ListComp object at 0x7da18ede6b00>
variable[reader] assign[=] call[name[csv].reader, parameter[call[name[must_open], parameter[name[tabfile]]]]]
variable[writer] assign[=] call[name[csv].writer, parameter[name[sys].stdout]]
for taget[name[row]] in starred[name[reader]] begin[:]
variable[newrow] assign[=] <ast.ListComp object at 0x7da18ede5150>
call[name[writer].writerow, parameter[name[newrow]]] | keyword[def] identifier[reorder] ( identifier[args] ):
literal[string]
keyword[import] identifier[csv]
identifier[p] = identifier[OptionParser] ( identifier[reorder] . identifier[__doc__] )
identifier[p] . identifier[set_sep] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[tabfile] , identifier[order] = identifier[args]
identifier[sep] = identifier[opts] . identifier[sep]
identifier[order] =[ identifier[int] ( identifier[x] )- literal[int] keyword[for] identifier[x] keyword[in] identifier[order] . identifier[split] ( literal[string] )]
identifier[reader] = identifier[csv] . identifier[reader] ( identifier[must_open] ( identifier[tabfile] ), identifier[delimiter] = identifier[sep] )
identifier[writer] = identifier[csv] . identifier[writer] ( identifier[sys] . identifier[stdout] , identifier[delimiter] = identifier[sep] )
keyword[for] identifier[row] keyword[in] identifier[reader] :
identifier[newrow] =[ identifier[row] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[order] ]
identifier[writer] . identifier[writerow] ( identifier[newrow] ) | def reorder(args):
"""
%prog reorder tabfile 1,2,4,3 > newtabfile
Reorder columns in tab-delimited files. The above syntax will print out a
new file with col-1,2,4,3 from the old file.
"""
import csv
p = OptionParser(reorder.__doc__)
p.set_sep()
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(tabfile, order) = args
sep = opts.sep
order = [int(x) - 1 for x in order.split(',')]
reader = csv.reader(must_open(tabfile), delimiter=sep)
writer = csv.writer(sys.stdout, delimiter=sep)
for row in reader:
newrow = [row[x] for x in order]
writer.writerow(newrow) # depends on [control=['for'], data=['row']] |
def set_state(self, vid, value=None, default=False, disable=False):
""" Configures the VLAN state
EosVersion:
4.13.7M
Args:
vid (str): The VLAN ID to configure
value (str): The value to set the vlan state to
default (bool): Configures the vlan state to its default value
disable (bool): Negates the vlan state
Returns:
True if the operation was successful otherwise False
"""
cmds = self.command_builder('state', value=value, default=default,
disable=disable)
return self.configure_vlan(vid, cmds) | def function[set_state, parameter[self, vid, value, default, disable]]:
constant[ Configures the VLAN state
EosVersion:
4.13.7M
Args:
vid (str): The VLAN ID to configure
value (str): The value to set the vlan state to
default (bool): Configures the vlan state to its default value
disable (bool): Negates the vlan state
Returns:
True if the operation was successful otherwise False
]
variable[cmds] assign[=] call[name[self].command_builder, parameter[constant[state]]]
return[call[name[self].configure_vlan, parameter[name[vid], name[cmds]]]] | keyword[def] identifier[set_state] ( identifier[self] , identifier[vid] , identifier[value] = keyword[None] , identifier[default] = keyword[False] , identifier[disable] = keyword[False] ):
literal[string]
identifier[cmds] = identifier[self] . identifier[command_builder] ( literal[string] , identifier[value] = identifier[value] , identifier[default] = identifier[default] ,
identifier[disable] = identifier[disable] )
keyword[return] identifier[self] . identifier[configure_vlan] ( identifier[vid] , identifier[cmds] ) | def set_state(self, vid, value=None, default=False, disable=False):
""" Configures the VLAN state
EosVersion:
4.13.7M
Args:
vid (str): The VLAN ID to configure
value (str): The value to set the vlan state to
default (bool): Configures the vlan state to its default value
disable (bool): Negates the vlan state
Returns:
True if the operation was successful otherwise False
"""
cmds = self.command_builder('state', value=value, default=default, disable=disable)
return self.configure_vlan(vid, cmds) |
def p_alias(self, p):
"""alias : KEYWORD ID EQ type_ref NL
| KEYWORD ID EQ type_ref NL INDENT annotation_ref_list docsection DEDENT"""
if p[1] == 'alias':
has_annotations = len(p) > 6 and p[7] is not None
doc = p[8] if len(p) > 6 else None
p[0] = AstAlias(
self.path, p.lineno(1), p.lexpos(1), p[2], p[4], doc)
if has_annotations:
p[0].set_annotations(p[7])
else:
raise ValueError('Expected alias keyword') | def function[p_alias, parameter[self, p]]:
constant[alias : KEYWORD ID EQ type_ref NL
| KEYWORD ID EQ type_ref NL INDENT annotation_ref_list docsection DEDENT]
if compare[call[name[p]][constant[1]] equal[==] constant[alias]] begin[:]
variable[has_annotations] assign[=] <ast.BoolOp object at 0x7da20c7caa70>
variable[doc] assign[=] <ast.IfExp object at 0x7da20c7cb5b0>
call[name[p]][constant[0]] assign[=] call[name[AstAlias], parameter[name[self].path, call[name[p].lineno, parameter[constant[1]]], call[name[p].lexpos, parameter[constant[1]]], call[name[p]][constant[2]], call[name[p]][constant[4]], name[doc]]]
if name[has_annotations] begin[:]
call[call[name[p]][constant[0]].set_annotations, parameter[call[name[p]][constant[7]]]] | keyword[def] identifier[p_alias] ( identifier[self] , identifier[p] ):
literal[string]
keyword[if] identifier[p] [ literal[int] ]== literal[string] :
identifier[has_annotations] = identifier[len] ( identifier[p] )> literal[int] keyword[and] identifier[p] [ literal[int] ] keyword[is] keyword[not] keyword[None]
identifier[doc] = identifier[p] [ literal[int] ] keyword[if] identifier[len] ( identifier[p] )> literal[int] keyword[else] keyword[None]
identifier[p] [ literal[int] ]= identifier[AstAlias] (
identifier[self] . identifier[path] , identifier[p] . identifier[lineno] ( literal[int] ), identifier[p] . identifier[lexpos] ( literal[int] ), identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[doc] )
keyword[if] identifier[has_annotations] :
identifier[p] [ literal[int] ]. identifier[set_annotations] ( identifier[p] [ literal[int] ])
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def p_alias(self, p):
"""alias : KEYWORD ID EQ type_ref NL
| KEYWORD ID EQ type_ref NL INDENT annotation_ref_list docsection DEDENT"""
if p[1] == 'alias':
has_annotations = len(p) > 6 and p[7] is not None
doc = p[8] if len(p) > 6 else None
p[0] = AstAlias(self.path, p.lineno(1), p.lexpos(1), p[2], p[4], doc)
if has_annotations:
p[0].set_annotations(p[7]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('Expected alias keyword') |
def note_create(self, post_id, coor_x, coor_y, width, height, body):
"""Function to create a note (Requires login) (UNTESTED).
Parameters:
post_id (int):
coor_x (int): The x coordinates of the note in pixels,
with respect to the top-left corner of the image.
coor_y (int): The y coordinates of the note in pixels,
with respect to the top-left corner of the image.
width (int): The width of the note in pixels.
height (int): The height of the note in pixels.
body (str): The body of the note.
"""
params = {
'note[post_id]': post_id,
'note[x]': coor_x,
'note[y]': coor_y,
'note[width]': width,
'note[height]': height,
'note[body]': body
}
return self._get('notes.json', params, method='POST', auth=True) | def function[note_create, parameter[self, post_id, coor_x, coor_y, width, height, body]]:
constant[Function to create a note (Requires login) (UNTESTED).
Parameters:
post_id (int):
coor_x (int): The x coordinates of the note in pixels,
with respect to the top-left corner of the image.
coor_y (int): The y coordinates of the note in pixels,
with respect to the top-left corner of the image.
width (int): The width of the note in pixels.
height (int): The height of the note in pixels.
body (str): The body of the note.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2041d9210>, <ast.Constant object at 0x7da20c991600>, <ast.Constant object at 0x7da20c993fa0>, <ast.Constant object at 0x7da20c9908b0>, <ast.Constant object at 0x7da207f037c0>, <ast.Constant object at 0x7da207f00070>], [<ast.Name object at 0x7da1b0d18d30>, <ast.Name object at 0x7da1b0d19c00>, <ast.Name object at 0x7da1b0d1b220>, <ast.Name object at 0x7da1b0d1a710>, <ast.Name object at 0x7da1b0d1b4c0>, <ast.Name object at 0x7da1b0d1b490>]]
return[call[name[self]._get, parameter[constant[notes.json], name[params]]]] | keyword[def] identifier[note_create] ( identifier[self] , identifier[post_id] , identifier[coor_x] , identifier[coor_y] , identifier[width] , identifier[height] , identifier[body] ):
literal[string]
identifier[params] ={
literal[string] : identifier[post_id] ,
literal[string] : identifier[coor_x] ,
literal[string] : identifier[coor_y] ,
literal[string] : identifier[width] ,
literal[string] : identifier[height] ,
literal[string] : identifier[body]
}
keyword[return] identifier[self] . identifier[_get] ( literal[string] , identifier[params] , identifier[method] = literal[string] , identifier[auth] = keyword[True] ) | def note_create(self, post_id, coor_x, coor_y, width, height, body):
"""Function to create a note (Requires login) (UNTESTED).
Parameters:
post_id (int):
coor_x (int): The x coordinates of the note in pixels,
with respect to the top-left corner of the image.
coor_y (int): The y coordinates of the note in pixels,
with respect to the top-left corner of the image.
width (int): The width of the note in pixels.
height (int): The height of the note in pixels.
body (str): The body of the note.
"""
params = {'note[post_id]': post_id, 'note[x]': coor_x, 'note[y]': coor_y, 'note[width]': width, 'note[height]': height, 'note[body]': body}
return self._get('notes.json', params, method='POST', auth=True) |
def safe_cd(path):
"""
Changes to a directory, yields, and changes back.
Additionally any error will also change the directory back.
Usage:
>>> with safe_cd('some/repo'):
... call('git status')
"""
starting_directory = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(starting_directory) | def function[safe_cd, parameter[path]]:
constant[
Changes to a directory, yields, and changes back.
Additionally any error will also change the directory back.
Usage:
>>> with safe_cd('some/repo'):
... call('git status')
]
variable[starting_directory] assign[=] call[name[os].getcwd, parameter[]]
<ast.Try object at 0x7da18f812350> | keyword[def] identifier[safe_cd] ( identifier[path] ):
literal[string]
identifier[starting_directory] = identifier[os] . identifier[getcwd] ()
keyword[try] :
identifier[os] . identifier[chdir] ( identifier[path] )
keyword[yield]
keyword[finally] :
identifier[os] . identifier[chdir] ( identifier[starting_directory] ) | def safe_cd(path):
"""
Changes to a directory, yields, and changes back.
Additionally any error will also change the directory back.
Usage:
>>> with safe_cd('some/repo'):
... call('git status')
"""
starting_directory = os.getcwd()
try:
os.chdir(path)
yield # depends on [control=['try'], data=[]]
finally:
os.chdir(starting_directory) |
def _write_service_config(self):
"""
Will write the config out to disk.
"""
with open(self.config_path, 'w') as output:
output.write(json.dumps(self.data, sort_keys=True, indent=4)) | def function[_write_service_config, parameter[self]]:
constant[
Will write the config out to disk.
]
with call[name[open], parameter[name[self].config_path, constant[w]]] begin[:]
call[name[output].write, parameter[call[name[json].dumps, parameter[name[self].data]]]] | keyword[def] identifier[_write_service_config] ( identifier[self] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[config_path] , literal[string] ) keyword[as] identifier[output] :
identifier[output] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[self] . identifier[data] , identifier[sort_keys] = keyword[True] , identifier[indent] = literal[int] )) | def _write_service_config(self):
"""
Will write the config out to disk.
"""
with open(self.config_path, 'w') as output:
output.write(json.dumps(self.data, sort_keys=True, indent=4)) # depends on [control=['with'], data=['output']] |
def reg_score_function(X, y, mean, scale, shape, skewness):
""" GAS Cauchy Regression Update term using gradient only - native Python function
Parameters
----------
X : float
datapoint for the right hand side variable
y : float
datapoint for the time series
mean : float
location parameter for the Cauchy distribution
scale : float
scale parameter for the Cauchy distribution
shape : float
tail thickness parameter for the Cauchy distribution
skewness : float
skewness parameter for the Cauchy distribution
Returns
----------
- Score of the Cauchy family
"""
return 2.0*((y-mean)*X)/(np.power(scale,2)+np.power((y-mean),2)) | def function[reg_score_function, parameter[X, y, mean, scale, shape, skewness]]:
constant[ GAS Cauchy Regression Update term using gradient only - native Python function
Parameters
----------
X : float
datapoint for the right hand side variable
y : float
datapoint for the time series
mean : float
location parameter for the Cauchy distribution
scale : float
scale parameter for the Cauchy distribution
shape : float
tail thickness parameter for the Cauchy distribution
skewness : float
skewness parameter for the Cauchy distribution
Returns
----------
- Score of the Cauchy family
]
return[binary_operation[binary_operation[constant[2.0] * binary_operation[binary_operation[name[y] - name[mean]] * name[X]]] / binary_operation[call[name[np].power, parameter[name[scale], constant[2]]] + call[name[np].power, parameter[binary_operation[name[y] - name[mean]], constant[2]]]]]] | keyword[def] identifier[reg_score_function] ( identifier[X] , identifier[y] , identifier[mean] , identifier[scale] , identifier[shape] , identifier[skewness] ):
literal[string]
keyword[return] literal[int] *(( identifier[y] - identifier[mean] )* identifier[X] )/( identifier[np] . identifier[power] ( identifier[scale] , literal[int] )+ identifier[np] . identifier[power] (( identifier[y] - identifier[mean] ), literal[int] )) | def reg_score_function(X, y, mean, scale, shape, skewness):
""" GAS Cauchy Regression Update term using gradient only - native Python function
Parameters
----------
X : float
datapoint for the right hand side variable
y : float
datapoint for the time series
mean : float
location parameter for the Cauchy distribution
scale : float
scale parameter for the Cauchy distribution
shape : float
tail thickness parameter for the Cauchy distribution
skewness : float
skewness parameter for the Cauchy distribution
Returns
----------
- Score of the Cauchy family
"""
return 2.0 * ((y - mean) * X) / (np.power(scale, 2) + np.power(y - mean, 2)) |
def query_by_admin(cls, admin):
"""Get all groups for for a specific admin."""
return cls.query.filter_by(
admin_type=resolve_admin_type(admin), admin_id=admin.get_id()) | def function[query_by_admin, parameter[cls, admin]]:
constant[Get all groups for for a specific admin.]
return[call[name[cls].query.filter_by, parameter[]]] | keyword[def] identifier[query_by_admin] ( identifier[cls] , identifier[admin] ):
literal[string]
keyword[return] identifier[cls] . identifier[query] . identifier[filter_by] (
identifier[admin_type] = identifier[resolve_admin_type] ( identifier[admin] ), identifier[admin_id] = identifier[admin] . identifier[get_id] ()) | def query_by_admin(cls, admin):
"""Get all groups for for a specific admin."""
return cls.query.filter_by(admin_type=resolve_admin_type(admin), admin_id=admin.get_id()) |
def get_go2sectiontxt(self):
"""Return a dict with actual header and user GO IDs as keys and their sections as values."""
go2txt = {}
_get_secs = self.hdrobj.get_sections
hdrgo2sectxt = {h:" ".join(_get_secs(h)) for h in self.get_hdrgos()}
usrgo2hdrgo = self.get_usrgo2hdrgo()
for goid, ntgo in self.go2nt.items():
hdrgo = ntgo.GO if ntgo.is_hdrgo else usrgo2hdrgo[ntgo.GO]
go2txt[goid] = hdrgo2sectxt[hdrgo]
return go2txt | def function[get_go2sectiontxt, parameter[self]]:
constant[Return a dict with actual header and user GO IDs as keys and their sections as values.]
variable[go2txt] assign[=] dictionary[[], []]
variable[_get_secs] assign[=] name[self].hdrobj.get_sections
variable[hdrgo2sectxt] assign[=] <ast.DictComp object at 0x7da1b23452d0>
variable[usrgo2hdrgo] assign[=] call[name[self].get_usrgo2hdrgo, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b2345b10>, <ast.Name object at 0x7da1b23441c0>]]] in starred[call[name[self].go2nt.items, parameter[]]] begin[:]
variable[hdrgo] assign[=] <ast.IfExp object at 0x7da1b2344460>
call[name[go2txt]][name[goid]] assign[=] call[name[hdrgo2sectxt]][name[hdrgo]]
return[name[go2txt]] | keyword[def] identifier[get_go2sectiontxt] ( identifier[self] ):
literal[string]
identifier[go2txt] ={}
identifier[_get_secs] = identifier[self] . identifier[hdrobj] . identifier[get_sections]
identifier[hdrgo2sectxt] ={ identifier[h] : literal[string] . identifier[join] ( identifier[_get_secs] ( identifier[h] )) keyword[for] identifier[h] keyword[in] identifier[self] . identifier[get_hdrgos] ()}
identifier[usrgo2hdrgo] = identifier[self] . identifier[get_usrgo2hdrgo] ()
keyword[for] identifier[goid] , identifier[ntgo] keyword[in] identifier[self] . identifier[go2nt] . identifier[items] ():
identifier[hdrgo] = identifier[ntgo] . identifier[GO] keyword[if] identifier[ntgo] . identifier[is_hdrgo] keyword[else] identifier[usrgo2hdrgo] [ identifier[ntgo] . identifier[GO] ]
identifier[go2txt] [ identifier[goid] ]= identifier[hdrgo2sectxt] [ identifier[hdrgo] ]
keyword[return] identifier[go2txt] | def get_go2sectiontxt(self):
"""Return a dict with actual header and user GO IDs as keys and their sections as values."""
go2txt = {}
_get_secs = self.hdrobj.get_sections
hdrgo2sectxt = {h: ' '.join(_get_secs(h)) for h in self.get_hdrgos()}
usrgo2hdrgo = self.get_usrgo2hdrgo()
for (goid, ntgo) in self.go2nt.items():
hdrgo = ntgo.GO if ntgo.is_hdrgo else usrgo2hdrgo[ntgo.GO]
go2txt[goid] = hdrgo2sectxt[hdrgo] # depends on [control=['for'], data=[]]
return go2txt |
def _FormatDescription(self, event):
"""Formats the description.
Args:
event (EventObject): event.
Returns:
str: formatted description field.
"""
date_time_string = timelib.Timestamp.CopyToIsoFormat(
event.timestamp, timezone=self._output_mediator.timezone)
timestamp_description = event.timestamp_desc or 'UNKNOWN'
message, _ = self._output_mediator.GetFormattedMessages(event)
if message is None:
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
description = '{0:s}; {1:s}; {2:s}'.format(
date_time_string, timestamp_description,
message.replace(self._DESCRIPTION_FIELD_DELIMITER, ' '))
return self._SanitizeField(description) | def function[_FormatDescription, parameter[self, event]]:
constant[Formats the description.
Args:
event (EventObject): event.
Returns:
str: formatted description field.
]
variable[date_time_string] assign[=] call[name[timelib].Timestamp.CopyToIsoFormat, parameter[name[event].timestamp]]
variable[timestamp_description] assign[=] <ast.BoolOp object at 0x7da20c7cb970>
<ast.Tuple object at 0x7da20c7cb4c0> assign[=] call[name[self]._output_mediator.GetFormattedMessages, parameter[name[event]]]
if compare[name[message] is constant[None]] begin[:]
variable[data_type] assign[=] call[name[getattr], parameter[name[event], constant[data_type], constant[UNKNOWN]]]
<ast.Raise object at 0x7da20c7caf80>
variable[description] assign[=] call[constant[{0:s}; {1:s}; {2:s}].format, parameter[name[date_time_string], name[timestamp_description], call[name[message].replace, parameter[name[self]._DESCRIPTION_FIELD_DELIMITER, constant[ ]]]]]
return[call[name[self]._SanitizeField, parameter[name[description]]]] | keyword[def] identifier[_FormatDescription] ( identifier[self] , identifier[event] ):
literal[string]
identifier[date_time_string] = identifier[timelib] . identifier[Timestamp] . identifier[CopyToIsoFormat] (
identifier[event] . identifier[timestamp] , identifier[timezone] = identifier[self] . identifier[_output_mediator] . identifier[timezone] )
identifier[timestamp_description] = identifier[event] . identifier[timestamp_desc] keyword[or] literal[string]
identifier[message] , identifier[_] = identifier[self] . identifier[_output_mediator] . identifier[GetFormattedMessages] ( identifier[event] )
keyword[if] identifier[message] keyword[is] keyword[None] :
identifier[data_type] = identifier[getattr] ( identifier[event] , literal[string] , literal[string] )
keyword[raise] identifier[errors] . identifier[NoFormatterFound] (
literal[string] . identifier[format] ( identifier[data_type] ))
identifier[description] = literal[string] . identifier[format] (
identifier[date_time_string] , identifier[timestamp_description] ,
identifier[message] . identifier[replace] ( identifier[self] . identifier[_DESCRIPTION_FIELD_DELIMITER] , literal[string] ))
keyword[return] identifier[self] . identifier[_SanitizeField] ( identifier[description] ) | def _FormatDescription(self, event):
"""Formats the description.
Args:
event (EventObject): event.
Returns:
str: formatted description field.
"""
date_time_string = timelib.Timestamp.CopyToIsoFormat(event.timestamp, timezone=self._output_mediator.timezone)
timestamp_description = event.timestamp_desc or 'UNKNOWN'
(message, _) = self._output_mediator.GetFormattedMessages(event)
if message is None:
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type)) # depends on [control=['if'], data=[]]
description = '{0:s}; {1:s}; {2:s}'.format(date_time_string, timestamp_description, message.replace(self._DESCRIPTION_FIELD_DELIMITER, ' '))
return self._SanitizeField(description) |
def _handle_input_request(self, msg):
""" Handle requests for raw_input.
"""
self.log.debug("input: %s", msg.get('content', ''))
if self._hidden:
raise RuntimeError('Request for raw input during hidden execution.')
# Make sure that all output from the SUB channel has been processed
# before entering readline mode.
self.kernel_manager.sub_channel.flush()
def callback(line):
self.kernel_manager.stdin_channel.input(line)
if self._reading:
self.log.debug("Got second input request, assuming first was interrupted.")
self._reading = False
self._readline(msg['content']['prompt'], callback=callback) | def function[_handle_input_request, parameter[self, msg]]:
constant[ Handle requests for raw_input.
]
call[name[self].log.debug, parameter[constant[input: %s], call[name[msg].get, parameter[constant[content], constant[]]]]]
if name[self]._hidden begin[:]
<ast.Raise object at 0x7da20c6c7e20>
call[name[self].kernel_manager.sub_channel.flush, parameter[]]
def function[callback, parameter[line]]:
call[name[self].kernel_manager.stdin_channel.input, parameter[name[line]]]
if name[self]._reading begin[:]
call[name[self].log.debug, parameter[constant[Got second input request, assuming first was interrupted.]]]
name[self]._reading assign[=] constant[False]
call[name[self]._readline, parameter[call[call[name[msg]][constant[content]]][constant[prompt]]]] | keyword[def] identifier[_handle_input_request] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[msg] . identifier[get] ( literal[string] , literal[string] ))
keyword[if] identifier[self] . identifier[_hidden] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[self] . identifier[kernel_manager] . identifier[sub_channel] . identifier[flush] ()
keyword[def] identifier[callback] ( identifier[line] ):
identifier[self] . identifier[kernel_manager] . identifier[stdin_channel] . identifier[input] ( identifier[line] )
keyword[if] identifier[self] . identifier[_reading] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_reading] = keyword[False]
identifier[self] . identifier[_readline] ( identifier[msg] [ literal[string] ][ literal[string] ], identifier[callback] = identifier[callback] ) | def _handle_input_request(self, msg):
""" Handle requests for raw_input.
"""
self.log.debug('input: %s', msg.get('content', ''))
if self._hidden:
raise RuntimeError('Request for raw input during hidden execution.') # depends on [control=['if'], data=[]]
# Make sure that all output from the SUB channel has been processed
# before entering readline mode.
self.kernel_manager.sub_channel.flush()
def callback(line):
self.kernel_manager.stdin_channel.input(line)
if self._reading:
self.log.debug('Got second input request, assuming first was interrupted.')
self._reading = False # depends on [control=['if'], data=[]]
self._readline(msg['content']['prompt'], callback=callback) |
def _get_headers(self, environ):
"""The list of headers for this response
"""
headers = self.headers
method = environ['REQUEST_METHOD']
if has_empty_content(self.status_code, method) and method != HEAD:
headers.pop('content-type', None)
headers.pop('content-length', None)
self._content = ()
else:
if not self.is_streamed():
cl = reduce(count_len, self._content, 0)
headers['content-length'] = str(cl)
ct = headers.get('content-type')
# content type encoding available
if self.encoding:
ct = ct or 'text/plain'
if ';' not in ct:
ct = '%s; charset=%s' % (ct, self.encoding)
if ct:
headers['content-type'] = ct
if method == HEAD:
self._content = ()
# Cookies
if (self.status_code < 400 and self._can_store_cookies and
self._cookies):
for c in self.cookies.values():
headers.add('set-cookie', c.OutputString())
return headers.items() | def function[_get_headers, parameter[self, environ]]:
constant[The list of headers for this response
]
variable[headers] assign[=] name[self].headers
variable[method] assign[=] call[name[environ]][constant[REQUEST_METHOD]]
if <ast.BoolOp object at 0x7da18bc703d0> begin[:]
call[name[headers].pop, parameter[constant[content-type], constant[None]]]
call[name[headers].pop, parameter[constant[content-length], constant[None]]]
name[self]._content assign[=] tuple[[]]
if <ast.BoolOp object at 0x7da20e9b2fb0> begin[:]
for taget[name[c]] in starred[call[name[self].cookies.values, parameter[]]] begin[:]
call[name[headers].add, parameter[constant[set-cookie], call[name[c].OutputString, parameter[]]]]
return[call[name[headers].items, parameter[]]] | keyword[def] identifier[_get_headers] ( identifier[self] , identifier[environ] ):
literal[string]
identifier[headers] = identifier[self] . identifier[headers]
identifier[method] = identifier[environ] [ literal[string] ]
keyword[if] identifier[has_empty_content] ( identifier[self] . identifier[status_code] , identifier[method] ) keyword[and] identifier[method] != identifier[HEAD] :
identifier[headers] . identifier[pop] ( literal[string] , keyword[None] )
identifier[headers] . identifier[pop] ( literal[string] , keyword[None] )
identifier[self] . identifier[_content] =()
keyword[else] :
keyword[if] keyword[not] identifier[self] . identifier[is_streamed] ():
identifier[cl] = identifier[reduce] ( identifier[count_len] , identifier[self] . identifier[_content] , literal[int] )
identifier[headers] [ literal[string] ]= identifier[str] ( identifier[cl] )
identifier[ct] = identifier[headers] . identifier[get] ( literal[string] )
keyword[if] identifier[self] . identifier[encoding] :
identifier[ct] = identifier[ct] keyword[or] literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[ct] :
identifier[ct] = literal[string] %( identifier[ct] , identifier[self] . identifier[encoding] )
keyword[if] identifier[ct] :
identifier[headers] [ literal[string] ]= identifier[ct]
keyword[if] identifier[method] == identifier[HEAD] :
identifier[self] . identifier[_content] =()
keyword[if] ( identifier[self] . identifier[status_code] < literal[int] keyword[and] identifier[self] . identifier[_can_store_cookies] keyword[and]
identifier[self] . identifier[_cookies] ):
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[cookies] . identifier[values] ():
identifier[headers] . identifier[add] ( literal[string] , identifier[c] . identifier[OutputString] ())
keyword[return] identifier[headers] . identifier[items] () | def _get_headers(self, environ):
"""The list of headers for this response
"""
headers = self.headers
method = environ['REQUEST_METHOD']
if has_empty_content(self.status_code, method) and method != HEAD:
headers.pop('content-type', None)
headers.pop('content-length', None)
self._content = () # depends on [control=['if'], data=[]]
else:
if not self.is_streamed():
cl = reduce(count_len, self._content, 0)
headers['content-length'] = str(cl) # depends on [control=['if'], data=[]]
ct = headers.get('content-type')
# content type encoding available
if self.encoding:
ct = ct or 'text/plain'
if ';' not in ct:
ct = '%s; charset=%s' % (ct, self.encoding) # depends on [control=['if'], data=['ct']] # depends on [control=['if'], data=[]]
if ct:
headers['content-type'] = ct # depends on [control=['if'], data=[]]
if method == HEAD:
self._content = () # depends on [control=['if'], data=[]]
# Cookies
if self.status_code < 400 and self._can_store_cookies and self._cookies:
for c in self.cookies.values():
headers.add('set-cookie', c.OutputString()) # depends on [control=['for'], data=['c']] # depends on [control=['if'], data=[]]
return headers.items() |
def pttl(self, key):
"""
Emulate pttl
:param key: key for which pttl is requested.
:returns: the number of milliseconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
"""
"""
Returns time to live in milliseconds if output_ms is True, else returns seconds.
"""
key = self._encode(key)
if key not in self.redis:
# as of redis 2.8, -2 is returned if the key does not exist
return long(-2) if self.strict else None
if key not in self.timeouts:
# as of redis 2.8, -1 is returned if the key is persistent
# redis-py returns None; command docs say -1
return long(-1) if self.strict else None
time_to_live = get_total_milliseconds(self.timeouts[key] - self.clock.now())
return long(max(-1, time_to_live)) | def function[pttl, parameter[self, key]]:
constant[
Emulate pttl
:param key: key for which pttl is requested.
:returns: the number of milliseconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
]
constant[
Returns time to live in milliseconds if output_ms is True, else returns seconds.
]
variable[key] assign[=] call[name[self]._encode, parameter[name[key]]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self].redis] begin[:]
return[<ast.IfExp object at 0x7da20c6a8a60>]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self].timeouts] begin[:]
return[<ast.IfExp object at 0x7da20c6aa710>]
variable[time_to_live] assign[=] call[name[get_total_milliseconds], parameter[binary_operation[call[name[self].timeouts][name[key]] - call[name[self].clock.now, parameter[]]]]]
return[call[name[long], parameter[call[name[max], parameter[<ast.UnaryOp object at 0x7da20c6a96f0>, name[time_to_live]]]]]] | keyword[def] identifier[pttl] ( identifier[self] , identifier[key] ):
literal[string]
literal[string]
identifier[key] = identifier[self] . identifier[_encode] ( identifier[key] )
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[redis] :
keyword[return] identifier[long] (- literal[int] ) keyword[if] identifier[self] . identifier[strict] keyword[else] keyword[None]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[timeouts] :
keyword[return] identifier[long] (- literal[int] ) keyword[if] identifier[self] . identifier[strict] keyword[else] keyword[None]
identifier[time_to_live] = identifier[get_total_milliseconds] ( identifier[self] . identifier[timeouts] [ identifier[key] ]- identifier[self] . identifier[clock] . identifier[now] ())
keyword[return] identifier[long] ( identifier[max] (- literal[int] , identifier[time_to_live] )) | def pttl(self, key):
"""
Emulate pttl
:param key: key for which pttl is requested.
:returns: the number of milliseconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
"""
'\n Returns time to live in milliseconds if output_ms is True, else returns seconds.\n '
key = self._encode(key)
if key not in self.redis:
# as of redis 2.8, -2 is returned if the key does not exist
return long(-2) if self.strict else None # depends on [control=['if'], data=[]]
if key not in self.timeouts:
# as of redis 2.8, -1 is returned if the key is persistent
# redis-py returns None; command docs say -1
return long(-1) if self.strict else None # depends on [control=['if'], data=[]]
time_to_live = get_total_milliseconds(self.timeouts[key] - self.clock.now())
return long(max(-1, time_to_live)) |
def bump_version(project, source, force_init): # type: (str, str, bool, bool) ->int
"""
Entry point
:return:
"""
file_opener = FileOpener()
# logger.debug("Starting version jiggler...")
jiggler = JiggleVersion(project, source, file_opener, force_init)
logger.debug(
"Current, next : {0} -> {1} : {2}".format(
jiggler.current_version, jiggler.version, jiggler.schema
)
)
if not jiggler.version_finder.validate_current_versions():
logger.debug(unicode(jiggler.version_finder.all_current_versions()))
logger.error("Versions not in sync, won't continue")
die(-1, "Versions not in sync, won't continue")
changed = jiggler.jiggle_all()
logger.debug("Changed {0} files".format(changed))
return changed | def function[bump_version, parameter[project, source, force_init]]:
constant[
Entry point
:return:
]
variable[file_opener] assign[=] call[name[FileOpener], parameter[]]
variable[jiggler] assign[=] call[name[JiggleVersion], parameter[name[project], name[source], name[file_opener], name[force_init]]]
call[name[logger].debug, parameter[call[constant[Current, next : {0} -> {1} : {2}].format, parameter[name[jiggler].current_version, name[jiggler].version, name[jiggler].schema]]]]
if <ast.UnaryOp object at 0x7da18dc9bac0> begin[:]
call[name[logger].debug, parameter[call[name[unicode], parameter[call[name[jiggler].version_finder.all_current_versions, parameter[]]]]]]
call[name[logger].error, parameter[constant[Versions not in sync, won't continue]]]
call[name[die], parameter[<ast.UnaryOp object at 0x7da18dc999f0>, constant[Versions not in sync, won't continue]]]
variable[changed] assign[=] call[name[jiggler].jiggle_all, parameter[]]
call[name[logger].debug, parameter[call[constant[Changed {0} files].format, parameter[name[changed]]]]]
return[name[changed]] | keyword[def] identifier[bump_version] ( identifier[project] , identifier[source] , identifier[force_init] ):
literal[string]
identifier[file_opener] = identifier[FileOpener] ()
identifier[jiggler] = identifier[JiggleVersion] ( identifier[project] , identifier[source] , identifier[file_opener] , identifier[force_init] )
identifier[logger] . identifier[debug] (
literal[string] . identifier[format] (
identifier[jiggler] . identifier[current_version] , identifier[jiggler] . identifier[version] , identifier[jiggler] . identifier[schema]
)
)
keyword[if] keyword[not] identifier[jiggler] . identifier[version_finder] . identifier[validate_current_versions] ():
identifier[logger] . identifier[debug] ( identifier[unicode] ( identifier[jiggler] . identifier[version_finder] . identifier[all_current_versions] ()))
identifier[logger] . identifier[error] ( literal[string] )
identifier[die] (- literal[int] , literal[string] )
identifier[changed] = identifier[jiggler] . identifier[jiggle_all] ()
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[changed] ))
keyword[return] identifier[changed] | def bump_version(project, source, force_init): # type: (str, str, bool, bool) ->int
'\n Entry point\n :return:\n '
file_opener = FileOpener()
# logger.debug("Starting version jiggler...")
jiggler = JiggleVersion(project, source, file_opener, force_init)
logger.debug('Current, next : {0} -> {1} : {2}'.format(jiggler.current_version, jiggler.version, jiggler.schema))
if not jiggler.version_finder.validate_current_versions():
logger.debug(unicode(jiggler.version_finder.all_current_versions()))
logger.error("Versions not in sync, won't continue")
die(-1, "Versions not in sync, won't continue") # depends on [control=['if'], data=[]]
changed = jiggler.jiggle_all()
logger.debug('Changed {0} files'.format(changed))
return changed |
def diff(self, first_row: List[Row], second_row: List[Row], column: NumberColumn) -> Number:
"""
Takes a two rows and a number column and returns the difference between the values under
that column in those two rows.
"""
if not first_row or not second_row:
return 0.0 # type: ignore
first_value = first_row[0].values[column.name]
second_value = second_row[0].values[column.name]
if isinstance(first_value, float) and isinstance(second_value, float):
return first_value - second_value # type: ignore
else:
raise ExecutionError(f"Invalid column for diff: {column.name}") | def function[diff, parameter[self, first_row, second_row, column]]:
constant[
Takes a two rows and a number column and returns the difference between the values under
that column in those two rows.
]
if <ast.BoolOp object at 0x7da1b1f96d10> begin[:]
return[constant[0.0]]
variable[first_value] assign[=] call[call[name[first_row]][constant[0]].values][name[column].name]
variable[second_value] assign[=] call[call[name[second_row]][constant[0]].values][name[column].name]
if <ast.BoolOp object at 0x7da1b1f94400> begin[:]
return[binary_operation[name[first_value] - name[second_value]]] | keyword[def] identifier[diff] ( identifier[self] , identifier[first_row] : identifier[List] [ identifier[Row] ], identifier[second_row] : identifier[List] [ identifier[Row] ], identifier[column] : identifier[NumberColumn] )-> identifier[Number] :
literal[string]
keyword[if] keyword[not] identifier[first_row] keyword[or] keyword[not] identifier[second_row] :
keyword[return] literal[int]
identifier[first_value] = identifier[first_row] [ literal[int] ]. identifier[values] [ identifier[column] . identifier[name] ]
identifier[second_value] = identifier[second_row] [ literal[int] ]. identifier[values] [ identifier[column] . identifier[name] ]
keyword[if] identifier[isinstance] ( identifier[first_value] , identifier[float] ) keyword[and] identifier[isinstance] ( identifier[second_value] , identifier[float] ):
keyword[return] identifier[first_value] - identifier[second_value]
keyword[else] :
keyword[raise] identifier[ExecutionError] ( literal[string] ) | def diff(self, first_row: List[Row], second_row: List[Row], column: NumberColumn) -> Number:
"""
Takes a two rows and a number column and returns the difference between the values under
that column in those two rows.
"""
if not first_row or not second_row:
return 0.0 # type: ignore # depends on [control=['if'], data=[]]
first_value = first_row[0].values[column.name]
second_value = second_row[0].values[column.name]
if isinstance(first_value, float) and isinstance(second_value, float):
return first_value - second_value # type: ignore # depends on [control=['if'], data=[]]
else:
raise ExecutionError(f'Invalid column for diff: {column.name}') |
def buy_product(self, product_pk):
"""
determina si el customer ha comprado un producto
"""
if self.invoice_sales.filter(line_invoice_sales__line_order__product__pk=product_pk).exists() \
or self.ticket_sales.filter(line_ticket_sales__line_order__product__pk=product_pk).exists():
return True
else:
return False | def function[buy_product, parameter[self, product_pk]]:
constant[
determina si el customer ha comprado un producto
]
if <ast.BoolOp object at 0x7da18dc04130> begin[:]
return[constant[True]] | keyword[def] identifier[buy_product] ( identifier[self] , identifier[product_pk] ):
literal[string]
keyword[if] identifier[self] . identifier[invoice_sales] . identifier[filter] ( identifier[line_invoice_sales__line_order__product__pk] = identifier[product_pk] ). identifier[exists] () keyword[or] identifier[self] . identifier[ticket_sales] . identifier[filter] ( identifier[line_ticket_sales__line_order__product__pk] = identifier[product_pk] ). identifier[exists] ():
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def buy_product(self, product_pk):
"""
determina si el customer ha comprado un producto
"""
if self.invoice_sales.filter(line_invoice_sales__line_order__product__pk=product_pk).exists() or self.ticket_sales.filter(line_ticket_sales__line_order__product__pk=product_pk).exists():
return True # depends on [control=['if'], data=[]]
else:
return False |
def shear_vel_at_depth(self, y_c):
"""
Get the shear wave velocity at a depth.
:param y_c: float, depth from surface
:return:
"""
sl = self.get_soil_at_depth(y_c)
if y_c <= self.gwl:
saturation = False
else:
saturation = True
if hasattr(sl, "get_shear_vel_at_v_eff_stress"):
v_eff = self.get_v_eff_stress_at_depth(y_c)
vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation)
else:
vs = sl.get_shear_vel(saturation)
return vs | def function[shear_vel_at_depth, parameter[self, y_c]]:
constant[
Get the shear wave velocity at a depth.
:param y_c: float, depth from surface
:return:
]
variable[sl] assign[=] call[name[self].get_soil_at_depth, parameter[name[y_c]]]
if compare[name[y_c] less_or_equal[<=] name[self].gwl] begin[:]
variable[saturation] assign[=] constant[False]
if call[name[hasattr], parameter[name[sl], constant[get_shear_vel_at_v_eff_stress]]] begin[:]
variable[v_eff] assign[=] call[name[self].get_v_eff_stress_at_depth, parameter[name[y_c]]]
variable[vs] assign[=] call[name[sl].get_shear_vel_at_v_eff_stress, parameter[name[v_eff], name[saturation]]]
return[name[vs]] | keyword[def] identifier[shear_vel_at_depth] ( identifier[self] , identifier[y_c] ):
literal[string]
identifier[sl] = identifier[self] . identifier[get_soil_at_depth] ( identifier[y_c] )
keyword[if] identifier[y_c] <= identifier[self] . identifier[gwl] :
identifier[saturation] = keyword[False]
keyword[else] :
identifier[saturation] = keyword[True]
keyword[if] identifier[hasattr] ( identifier[sl] , literal[string] ):
identifier[v_eff] = identifier[self] . identifier[get_v_eff_stress_at_depth] ( identifier[y_c] )
identifier[vs] = identifier[sl] . identifier[get_shear_vel_at_v_eff_stress] ( identifier[v_eff] , identifier[saturation] )
keyword[else] :
identifier[vs] = identifier[sl] . identifier[get_shear_vel] ( identifier[saturation] )
keyword[return] identifier[vs] | def shear_vel_at_depth(self, y_c):
"""
Get the shear wave velocity at a depth.
:param y_c: float, depth from surface
:return:
"""
sl = self.get_soil_at_depth(y_c)
if y_c <= self.gwl:
saturation = False # depends on [control=['if'], data=[]]
else:
saturation = True
if hasattr(sl, 'get_shear_vel_at_v_eff_stress'):
v_eff = self.get_v_eff_stress_at_depth(y_c)
vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation) # depends on [control=['if'], data=[]]
else:
vs = sl.get_shear_vel(saturation)
return vs |
def js(self, name=None):
"""
Returns all needed Javascript filepaths for given config name (if
given) or every registred config instead (if no name is given).
Keyword Arguments:
name (string): Specific config name to use instead of all.
Returns:
list: List of Javascript file paths.
"""
filepaths = copy.copy(settings.CODEMIRROR_BASE_JS)
configs = self.get_configs(name)
names = sorted(configs)
# Addons first
for name in names:
opts = configs[name]
for item in opts.get('addons', []):
if item not in filepaths:
filepaths.append(item)
# Process modes
for name in names:
opts = configs[name]
for item in opts['modes']:
resolved = self.resolve_mode(item)
if resolved not in filepaths:
filepaths.append(resolved)
return filepaths | def function[js, parameter[self, name]]:
constant[
Returns all needed Javascript filepaths for given config name (if
given) or every registred config instead (if no name is given).
Keyword Arguments:
name (string): Specific config name to use instead of all.
Returns:
list: List of Javascript file paths.
]
variable[filepaths] assign[=] call[name[copy].copy, parameter[name[settings].CODEMIRROR_BASE_JS]]
variable[configs] assign[=] call[name[self].get_configs, parameter[name[name]]]
variable[names] assign[=] call[name[sorted], parameter[name[configs]]]
for taget[name[name]] in starred[name[names]] begin[:]
variable[opts] assign[=] call[name[configs]][name[name]]
for taget[name[item]] in starred[call[name[opts].get, parameter[constant[addons], list[[]]]]] begin[:]
if compare[name[item] <ast.NotIn object at 0x7da2590d7190> name[filepaths]] begin[:]
call[name[filepaths].append, parameter[name[item]]]
for taget[name[name]] in starred[name[names]] begin[:]
variable[opts] assign[=] call[name[configs]][name[name]]
for taget[name[item]] in starred[call[name[opts]][constant[modes]]] begin[:]
variable[resolved] assign[=] call[name[self].resolve_mode, parameter[name[item]]]
if compare[name[resolved] <ast.NotIn object at 0x7da2590d7190> name[filepaths]] begin[:]
call[name[filepaths].append, parameter[name[resolved]]]
return[name[filepaths]] | keyword[def] identifier[js] ( identifier[self] , identifier[name] = keyword[None] ):
literal[string]
identifier[filepaths] = identifier[copy] . identifier[copy] ( identifier[settings] . identifier[CODEMIRROR_BASE_JS] )
identifier[configs] = identifier[self] . identifier[get_configs] ( identifier[name] )
identifier[names] = identifier[sorted] ( identifier[configs] )
keyword[for] identifier[name] keyword[in] identifier[names] :
identifier[opts] = identifier[configs] [ identifier[name] ]
keyword[for] identifier[item] keyword[in] identifier[opts] . identifier[get] ( literal[string] ,[]):
keyword[if] identifier[item] keyword[not] keyword[in] identifier[filepaths] :
identifier[filepaths] . identifier[append] ( identifier[item] )
keyword[for] identifier[name] keyword[in] identifier[names] :
identifier[opts] = identifier[configs] [ identifier[name] ]
keyword[for] identifier[item] keyword[in] identifier[opts] [ literal[string] ]:
identifier[resolved] = identifier[self] . identifier[resolve_mode] ( identifier[item] )
keyword[if] identifier[resolved] keyword[not] keyword[in] identifier[filepaths] :
identifier[filepaths] . identifier[append] ( identifier[resolved] )
keyword[return] identifier[filepaths] | def js(self, name=None):
"""
Returns all needed Javascript filepaths for given config name (if
given) or every registred config instead (if no name is given).
Keyword Arguments:
name (string): Specific config name to use instead of all.
Returns:
list: List of Javascript file paths.
"""
filepaths = copy.copy(settings.CODEMIRROR_BASE_JS)
configs = self.get_configs(name)
names = sorted(configs)
# Addons first
for name in names:
opts = configs[name]
for item in opts.get('addons', []):
if item not in filepaths:
filepaths.append(item) # depends on [control=['if'], data=['item', 'filepaths']] # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['name']]
# Process modes
for name in names:
opts = configs[name]
for item in opts['modes']:
resolved = self.resolve_mode(item)
if resolved not in filepaths:
filepaths.append(resolved) # depends on [control=['if'], data=['resolved', 'filepaths']] # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['name']]
return filepaths |
def auto_kwargs(function):
"""Modifies the provided function to support kwargs by only passing along kwargs for parameters it accepts"""
supported = introspect.arguments(function)
@wraps(function)
def call_function(*args, **kwargs):
return function(*args, **{key: value for key, value in kwargs.items() if key in supported})
return call_function | def function[auto_kwargs, parameter[function]]:
constant[Modifies the provided function to support kwargs by only passing along kwargs for parameters it accepts]
variable[supported] assign[=] call[name[introspect].arguments, parameter[name[function]]]
def function[call_function, parameter[]]:
return[call[name[function], parameter[<ast.Starred object at 0x7da18bcca6b0>]]]
return[name[call_function]] | keyword[def] identifier[auto_kwargs] ( identifier[function] ):
literal[string]
identifier[supported] = identifier[introspect] . identifier[arguments] ( identifier[function] )
@ identifier[wraps] ( identifier[function] )
keyword[def] identifier[call_function] (* identifier[args] ,** identifier[kwargs] ):
keyword[return] identifier[function] (* identifier[args] ,**{ identifier[key] : identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[kwargs] . identifier[items] () keyword[if] identifier[key] keyword[in] identifier[supported] })
keyword[return] identifier[call_function] | def auto_kwargs(function):
"""Modifies the provided function to support kwargs by only passing along kwargs for parameters it accepts"""
supported = introspect.arguments(function)
@wraps(function)
def call_function(*args, **kwargs):
return function(*args, **{key: value for (key, value) in kwargs.items() if key in supported})
return call_function |
def module_config(self, settings_module):
"""
Optional function
"""
assert hasattr(settings_module, '__file__'), 'settings must be a module'
# set root_path according to module file
self.set_root_path(settings_module=settings_module)
app_log.debug('Set root_path: %s', self.root_path)
global settings
self.update_settings(dict(
[(i, getattr(settings_module, i)) for i in dir(settings_module)
if not i.startswith('_') and i == i.upper()]))
settings._module = settings_module
# keep a mapping to app on settings object
settings._app = self | def function[module_config, parameter[self, settings_module]]:
constant[
Optional function
]
assert[call[name[hasattr], parameter[name[settings_module], constant[__file__]]]]
call[name[self].set_root_path, parameter[]]
call[name[app_log].debug, parameter[constant[Set root_path: %s], name[self].root_path]]
<ast.Global object at 0x7da1b23445e0>
call[name[self].update_settings, parameter[call[name[dict], parameter[<ast.ListComp object at 0x7da1b2346d70>]]]]
name[settings]._module assign[=] name[settings_module]
name[settings]._app assign[=] name[self] | keyword[def] identifier[module_config] ( identifier[self] , identifier[settings_module] ):
literal[string]
keyword[assert] identifier[hasattr] ( identifier[settings_module] , literal[string] ), literal[string]
identifier[self] . identifier[set_root_path] ( identifier[settings_module] = identifier[settings_module] )
identifier[app_log] . identifier[debug] ( literal[string] , identifier[self] . identifier[root_path] )
keyword[global] identifier[settings]
identifier[self] . identifier[update_settings] ( identifier[dict] (
[( identifier[i] , identifier[getattr] ( identifier[settings_module] , identifier[i] )) keyword[for] identifier[i] keyword[in] identifier[dir] ( identifier[settings_module] )
keyword[if] keyword[not] identifier[i] . identifier[startswith] ( literal[string] ) keyword[and] identifier[i] == identifier[i] . identifier[upper] ()]))
identifier[settings] . identifier[_module] = identifier[settings_module]
identifier[settings] . identifier[_app] = identifier[self] | def module_config(self, settings_module):
"""
Optional function
"""
assert hasattr(settings_module, '__file__'), 'settings must be a module'
# set root_path according to module file
self.set_root_path(settings_module=settings_module)
app_log.debug('Set root_path: %s', self.root_path)
global settings
self.update_settings(dict([(i, getattr(settings_module, i)) for i in dir(settings_module) if not i.startswith('_') and i == i.upper()]))
settings._module = settings_module
# keep a mapping to app on settings object
settings._app = self |
def wrap_deepmind(env, dim=84, framestack=True):
"""Configure environment for DeepMind-style Atari.
Note that we assume reward clipping is done outside the wrapper.
Args:
dim (int): Dimension to resize observations to (dim x dim).
framestack (bool): Whether to framestack observations.
"""
env = MonitorEnv(env)
env = NoopResetEnv(env, noop_max=30)
if "NoFrameskip" in env.spec.id:
env = MaxAndSkipEnv(env, skip=4)
env = EpisodicLifeEnv(env)
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env, dim)
# env = ScaledFloatFrame(env) # TODO: use for dqn?
# env = ClipRewardEnv(env) # reward clipping is handled by policy eval
if framestack:
env = FrameStack(env, 4)
return env | def function[wrap_deepmind, parameter[env, dim, framestack]]:
constant[Configure environment for DeepMind-style Atari.
Note that we assume reward clipping is done outside the wrapper.
Args:
dim (int): Dimension to resize observations to (dim x dim).
framestack (bool): Whether to framestack observations.
]
variable[env] assign[=] call[name[MonitorEnv], parameter[name[env]]]
variable[env] assign[=] call[name[NoopResetEnv], parameter[name[env]]]
if compare[constant[NoFrameskip] in name[env].spec.id] begin[:]
variable[env] assign[=] call[name[MaxAndSkipEnv], parameter[name[env]]]
variable[env] assign[=] call[name[EpisodicLifeEnv], parameter[name[env]]]
if compare[constant[FIRE] in call[name[env].unwrapped.get_action_meanings, parameter[]]] begin[:]
variable[env] assign[=] call[name[FireResetEnv], parameter[name[env]]]
variable[env] assign[=] call[name[WarpFrame], parameter[name[env], name[dim]]]
if name[framestack] begin[:]
variable[env] assign[=] call[name[FrameStack], parameter[name[env], constant[4]]]
return[name[env]] | keyword[def] identifier[wrap_deepmind] ( identifier[env] , identifier[dim] = literal[int] , identifier[framestack] = keyword[True] ):
literal[string]
identifier[env] = identifier[MonitorEnv] ( identifier[env] )
identifier[env] = identifier[NoopResetEnv] ( identifier[env] , identifier[noop_max] = literal[int] )
keyword[if] literal[string] keyword[in] identifier[env] . identifier[spec] . identifier[id] :
identifier[env] = identifier[MaxAndSkipEnv] ( identifier[env] , identifier[skip] = literal[int] )
identifier[env] = identifier[EpisodicLifeEnv] ( identifier[env] )
keyword[if] literal[string] keyword[in] identifier[env] . identifier[unwrapped] . identifier[get_action_meanings] ():
identifier[env] = identifier[FireResetEnv] ( identifier[env] )
identifier[env] = identifier[WarpFrame] ( identifier[env] , identifier[dim] )
keyword[if] identifier[framestack] :
identifier[env] = identifier[FrameStack] ( identifier[env] , literal[int] )
keyword[return] identifier[env] | def wrap_deepmind(env, dim=84, framestack=True):
"""Configure environment for DeepMind-style Atari.
Note that we assume reward clipping is done outside the wrapper.
Args:
dim (int): Dimension to resize observations to (dim x dim).
framestack (bool): Whether to framestack observations.
"""
env = MonitorEnv(env)
env = NoopResetEnv(env, noop_max=30)
if 'NoFrameskip' in env.spec.id:
env = MaxAndSkipEnv(env, skip=4) # depends on [control=['if'], data=[]]
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env) # depends on [control=['if'], data=[]]
env = WarpFrame(env, dim)
# env = ScaledFloatFrame(env) # TODO: use for dqn?
# env = ClipRewardEnv(env) # reward clipping is handled by policy eval
if framestack:
env = FrameStack(env, 4) # depends on [control=['if'], data=[]]
return env |
def patch_for(self, path):
"""Returns the ``Patch`` for the target path, creating it if necessary.
:param str path: The absolute module path to the target.
:return: The mapped ``Patch``.
:rtype: Patch
"""
if path not in self._patches:
self._patches[path] = Patch(path)
return self._patches[path] | def function[patch_for, parameter[self, path]]:
constant[Returns the ``Patch`` for the target path, creating it if necessary.
:param str path: The absolute module path to the target.
:return: The mapped ``Patch``.
:rtype: Patch
]
if compare[name[path] <ast.NotIn object at 0x7da2590d7190> name[self]._patches] begin[:]
call[name[self]._patches][name[path]] assign[=] call[name[Patch], parameter[name[path]]]
return[call[name[self]._patches][name[path]]] | keyword[def] identifier[patch_for] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] identifier[path] keyword[not] keyword[in] identifier[self] . identifier[_patches] :
identifier[self] . identifier[_patches] [ identifier[path] ]= identifier[Patch] ( identifier[path] )
keyword[return] identifier[self] . identifier[_patches] [ identifier[path] ] | def patch_for(self, path):
"""Returns the ``Patch`` for the target path, creating it if necessary.
:param str path: The absolute module path to the target.
:return: The mapped ``Patch``.
:rtype: Patch
"""
if path not in self._patches:
self._patches[path] = Patch(path) # depends on [control=['if'], data=['path']]
return self._patches[path] |
def _construct_linebreak_token(self, d: Dict) -> List[Dict]:
"""
Construct a shape token
Args:
d: Dict
Returns: List[Dict]
"""
result = []
num_break = int(d["length"][0]) if d["length"] else 1
if num_break:
s = ''
for i in range(num_break):
s += '\n'
this_token = {attrs.LOWER: s}
result.append(this_token)
s += ' '
this_token = {attrs.LOWER: s}
result.append(this_token)
result = self._add_common_constrain(result, d)
return result | def function[_construct_linebreak_token, parameter[self, d]]:
constant[
Construct a shape token
Args:
d: Dict
Returns: List[Dict]
]
variable[result] assign[=] list[[]]
variable[num_break] assign[=] <ast.IfExp object at 0x7da1b0bd9ae0>
if name[num_break] begin[:]
variable[s] assign[=] constant[]
for taget[name[i]] in starred[call[name[range], parameter[name[num_break]]]] begin[:]
<ast.AugAssign object at 0x7da1b0bdba90>
variable[this_token] assign[=] dictionary[[<ast.Attribute object at 0x7da1b0bd8f10>], [<ast.Name object at 0x7da1b0bd96c0>]]
call[name[result].append, parameter[name[this_token]]]
<ast.AugAssign object at 0x7da1b0bd8c40>
variable[this_token] assign[=] dictionary[[<ast.Attribute object at 0x7da1b0bc8640>], [<ast.Name object at 0x7da1b0bc9ba0>]]
call[name[result].append, parameter[name[this_token]]]
variable[result] assign[=] call[name[self]._add_common_constrain, parameter[name[result], name[d]]]
return[name[result]] | keyword[def] identifier[_construct_linebreak_token] ( identifier[self] , identifier[d] : identifier[Dict] )-> identifier[List] [ identifier[Dict] ]:
literal[string]
identifier[result] =[]
identifier[num_break] = identifier[int] ( identifier[d] [ literal[string] ][ literal[int] ]) keyword[if] identifier[d] [ literal[string] ] keyword[else] literal[int]
keyword[if] identifier[num_break] :
identifier[s] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_break] ):
identifier[s] += literal[string]
identifier[this_token] ={ identifier[attrs] . identifier[LOWER] : identifier[s] }
identifier[result] . identifier[append] ( identifier[this_token] )
identifier[s] += literal[string]
identifier[this_token] ={ identifier[attrs] . identifier[LOWER] : identifier[s] }
identifier[result] . identifier[append] ( identifier[this_token] )
identifier[result] = identifier[self] . identifier[_add_common_constrain] ( identifier[result] , identifier[d] )
keyword[return] identifier[result] | def _construct_linebreak_token(self, d: Dict) -> List[Dict]:
"""
Construct a shape token
Args:
d: Dict
Returns: List[Dict]
"""
result = []
num_break = int(d['length'][0]) if d['length'] else 1
if num_break:
s = ''
for i in range(num_break):
s += '\n' # depends on [control=['for'], data=[]]
this_token = {attrs.LOWER: s}
result.append(this_token)
s += ' '
this_token = {attrs.LOWER: s}
result.append(this_token) # depends on [control=['if'], data=[]]
result = self._add_common_constrain(result, d)
return result |
def asobject(self):
"""
Return object Series which contains boxed values.
.. deprecated :: 0.23.0
Use ``astype(object)`` instead.
*this is an internal non-public method*
"""
warnings.warn("'asobject' is deprecated. Use 'astype(object)'"
" instead", FutureWarning, stacklevel=2)
return self.astype(object).values | def function[asobject, parameter[self]]:
constant[
Return object Series which contains boxed values.
.. deprecated :: 0.23.0
Use ``astype(object)`` instead.
*this is an internal non-public method*
]
call[name[warnings].warn, parameter[constant['asobject' is deprecated. Use 'astype(object)' instead], name[FutureWarning]]]
return[call[name[self].astype, parameter[name[object]]].values] | keyword[def] identifier[asobject] ( identifier[self] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] , identifier[FutureWarning] , identifier[stacklevel] = literal[int] )
keyword[return] identifier[self] . identifier[astype] ( identifier[object] ). identifier[values] | def asobject(self):
"""
Return object Series which contains boxed values.
.. deprecated :: 0.23.0
Use ``astype(object)`` instead.
*this is an internal non-public method*
"""
warnings.warn("'asobject' is deprecated. Use 'astype(object)' instead", FutureWarning, stacklevel=2)
return self.astype(object).values |
def distort(self, x1=0,y1=0, x2=0,y2=0, x3=0,y3=0, x4=0,y4=0):
"""Distorts the layer.
Distorts the layer by translating
the four corners of its bounding box to the given coordinates:
upper left (x1,y1), upper right(x2,y2),
lower right (x3,y3) and lower left (x4,y4).
"""
w, h = self.img.size
quad = (-x1,-y1, -x4,h-y4, w-x3,w-y3, w-x2,-y2)
self.img = self.img.transform(self.img.size, Image.QUAD, quad, INTERPOLATION) | def function[distort, parameter[self, x1, y1, x2, y2, x3, y3, x4, y4]]:
constant[Distorts the layer.
Distorts the layer by translating
the four corners of its bounding box to the given coordinates:
upper left (x1,y1), upper right(x2,y2),
lower right (x3,y3) and lower left (x4,y4).
]
<ast.Tuple object at 0x7da20c991e40> assign[=] name[self].img.size
variable[quad] assign[=] tuple[[<ast.UnaryOp object at 0x7da20c993280>, <ast.UnaryOp object at 0x7da20c992dd0>, <ast.UnaryOp object at 0x7da20c993760>, <ast.BinOp object at 0x7da20c990070>, <ast.BinOp object at 0x7da20c991d80>, <ast.BinOp object at 0x7da20c991840>, <ast.BinOp object at 0x7da20c9915a0>, <ast.UnaryOp object at 0x7da20c990df0>]]
name[self].img assign[=] call[name[self].img.transform, parameter[name[self].img.size, name[Image].QUAD, name[quad], name[INTERPOLATION]]] | keyword[def] identifier[distort] ( identifier[self] , identifier[x1] = literal[int] , identifier[y1] = literal[int] , identifier[x2] = literal[int] , identifier[y2] = literal[int] , identifier[x3] = literal[int] , identifier[y3] = literal[int] , identifier[x4] = literal[int] , identifier[y4] = literal[int] ):
literal[string]
identifier[w] , identifier[h] = identifier[self] . identifier[img] . identifier[size]
identifier[quad] =(- identifier[x1] ,- identifier[y1] ,- identifier[x4] , identifier[h] - identifier[y4] , identifier[w] - identifier[x3] , identifier[w] - identifier[y3] , identifier[w] - identifier[x2] ,- identifier[y2] )
identifier[self] . identifier[img] = identifier[self] . identifier[img] . identifier[transform] ( identifier[self] . identifier[img] . identifier[size] , identifier[Image] . identifier[QUAD] , identifier[quad] , identifier[INTERPOLATION] ) | def distort(self, x1=0, y1=0, x2=0, y2=0, x3=0, y3=0, x4=0, y4=0):
"""Distorts the layer.
Distorts the layer by translating
the four corners of its bounding box to the given coordinates:
upper left (x1,y1), upper right(x2,y2),
lower right (x3,y3) and lower left (x4,y4).
"""
(w, h) = self.img.size
quad = (-x1, -y1, -x4, h - y4, w - x3, w - y3, w - x2, -y2)
self.img = self.img.transform(self.img.size, Image.QUAD, quad, INTERPOLATION) |
def solveMDP():
"""Solve the problem as a finite horizon Markov decision process.
The optimal policy at each stage is found using backwards induction.
Possingham and Tuck report strategies for a 50 year time horizon, so the
number of stages for the finite horizon algorithm is set to 50. There is no
discount factor reported, so we set it to 0.96 rather arbitrarily.
Returns
-------
mdp : mdptoolbox.mdp.FiniteHorizon
The PyMDPtoolbox object that represents a finite horizon MDP. The
optimal policy for each stage is accessed with mdp.policy, which is a
numpy array with 50 columns (one for each stage).
"""
P, R = getTransitionAndRewardArrays(0.5)
sdp = mdp.FiniteHorizon(P, R, 0.96, 50)
sdp.run()
return(sdp) | def function[solveMDP, parameter[]]:
constant[Solve the problem as a finite horizon Markov decision process.
The optimal policy at each stage is found using backwards induction.
Possingham and Tuck report strategies for a 50 year time horizon, so the
number of stages for the finite horizon algorithm is set to 50. There is no
discount factor reported, so we set it to 0.96 rather arbitrarily.
Returns
-------
mdp : mdptoolbox.mdp.FiniteHorizon
The PyMDPtoolbox object that represents a finite horizon MDP. The
optimal policy for each stage is accessed with mdp.policy, which is a
numpy array with 50 columns (one for each stage).
]
<ast.Tuple object at 0x7da1b08893c0> assign[=] call[name[getTransitionAndRewardArrays], parameter[constant[0.5]]]
variable[sdp] assign[=] call[name[mdp].FiniteHorizon, parameter[name[P], name[R], constant[0.96], constant[50]]]
call[name[sdp].run, parameter[]]
return[name[sdp]] | keyword[def] identifier[solveMDP] ():
literal[string]
identifier[P] , identifier[R] = identifier[getTransitionAndRewardArrays] ( literal[int] )
identifier[sdp] = identifier[mdp] . identifier[FiniteHorizon] ( identifier[P] , identifier[R] , literal[int] , literal[int] )
identifier[sdp] . identifier[run] ()
keyword[return] ( identifier[sdp] ) | def solveMDP():
"""Solve the problem as a finite horizon Markov decision process.
The optimal policy at each stage is found using backwards induction.
Possingham and Tuck report strategies for a 50 year time horizon, so the
number of stages for the finite horizon algorithm is set to 50. There is no
discount factor reported, so we set it to 0.96 rather arbitrarily.
Returns
-------
mdp : mdptoolbox.mdp.FiniteHorizon
The PyMDPtoolbox object that represents a finite horizon MDP. The
optimal policy for each stage is accessed with mdp.policy, which is a
numpy array with 50 columns (one for each stage).
"""
(P, R) = getTransitionAndRewardArrays(0.5)
sdp = mdp.FiniteHorizon(P, R, 0.96, 50)
sdp.run()
return sdp |
def bcc(self, bcc):
'''
:param bcc: Email addresses for the 'Bcc' API field.
:type bcc: :keyword:`list` or `str`
'''
if isinstance(bcc, basestring):
bcc = bcc.split(',')
self._bcc = bcc | def function[bcc, parameter[self, bcc]]:
constant[
:param bcc: Email addresses for the 'Bcc' API field.
:type bcc: :keyword:`list` or `str`
]
if call[name[isinstance], parameter[name[bcc], name[basestring]]] begin[:]
variable[bcc] assign[=] call[name[bcc].split, parameter[constant[,]]]
name[self]._bcc assign[=] name[bcc] | keyword[def] identifier[bcc] ( identifier[self] , identifier[bcc] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[bcc] , identifier[basestring] ):
identifier[bcc] = identifier[bcc] . identifier[split] ( literal[string] )
identifier[self] . identifier[_bcc] = identifier[bcc] | def bcc(self, bcc):
"""
:param bcc: Email addresses for the 'Bcc' API field.
:type bcc: :keyword:`list` or `str`
"""
if isinstance(bcc, basestring):
bcc = bcc.split(',') # depends on [control=['if'], data=[]]
self._bcc = bcc |
def must_contain(tag_name, tag_content, container_tag_name):
"""
Generate function, which checks if given element contains `tag_name` with
string content `tag_content` and also another tag named
`container_tag_name`.
This function can be used as parameter for .find() method in HTMLElement.
"""
def must_contain_closure(element):
# containing in first level of childs <tag_name> tag
matching_tags = element.match(tag_name, absolute=True)
if not matching_tags:
return False
# which's content match `tag_content`
if matching_tags[0].getContent() != tag_content:
return False
# and also contains <container_tag_name> tag
if container_tag_name and \
not element.match(container_tag_name, absolute=True):
return False
return True
return must_contain_closure | def function[must_contain, parameter[tag_name, tag_content, container_tag_name]]:
constant[
Generate function, which checks if given element contains `tag_name` with
string content `tag_content` and also another tag named
`container_tag_name`.
This function can be used as parameter for .find() method in HTMLElement.
]
def function[must_contain_closure, parameter[element]]:
variable[matching_tags] assign[=] call[name[element].match, parameter[name[tag_name]]]
if <ast.UnaryOp object at 0x7da1b13b9e10> begin[:]
return[constant[False]]
if compare[call[call[name[matching_tags]][constant[0]].getContent, parameter[]] not_equal[!=] name[tag_content]] begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b13b8820> begin[:]
return[constant[False]]
return[constant[True]]
return[name[must_contain_closure]] | keyword[def] identifier[must_contain] ( identifier[tag_name] , identifier[tag_content] , identifier[container_tag_name] ):
literal[string]
keyword[def] identifier[must_contain_closure] ( identifier[element] ):
identifier[matching_tags] = identifier[element] . identifier[match] ( identifier[tag_name] , identifier[absolute] = keyword[True] )
keyword[if] keyword[not] identifier[matching_tags] :
keyword[return] keyword[False]
keyword[if] identifier[matching_tags] [ literal[int] ]. identifier[getContent] ()!= identifier[tag_content] :
keyword[return] keyword[False]
keyword[if] identifier[container_tag_name] keyword[and] keyword[not] identifier[element] . identifier[match] ( identifier[container_tag_name] , identifier[absolute] = keyword[True] ):
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[return] identifier[must_contain_closure] | def must_contain(tag_name, tag_content, container_tag_name):
"""
Generate function, which checks if given element contains `tag_name` with
string content `tag_content` and also another tag named
`container_tag_name`.
This function can be used as parameter for .find() method in HTMLElement.
"""
def must_contain_closure(element):
# containing in first level of childs <tag_name> tag
matching_tags = element.match(tag_name, absolute=True)
if not matching_tags:
return False # depends on [control=['if'], data=[]]
# which's content match `tag_content`
if matching_tags[0].getContent() != tag_content:
return False # depends on [control=['if'], data=[]]
# and also contains <container_tag_name> tag
if container_tag_name and (not element.match(container_tag_name, absolute=True)):
return False # depends on [control=['if'], data=[]]
return True
return must_contain_closure |
def get_experiment_summ_ids( self, coinc_event_id ):
"""
Gets all the experiment_summ_ids that map to a given coinc_event_id.
"""
experiment_summ_ids = []
for row in self:
if row.coinc_event_id == coinc_event_id:
experiment_summ_ids.append(row.experiment_summ_id)
if len(experiment_summ_ids) == 0:
raise ValueError("'%s' could not be found in the experiment_map table" % coinc_event_id)
return experiment_summ_ids | def function[get_experiment_summ_ids, parameter[self, coinc_event_id]]:
constant[
Gets all the experiment_summ_ids that map to a given coinc_event_id.
]
variable[experiment_summ_ids] assign[=] list[[]]
for taget[name[row]] in starred[name[self]] begin[:]
if compare[name[row].coinc_event_id equal[==] name[coinc_event_id]] begin[:]
call[name[experiment_summ_ids].append, parameter[name[row].experiment_summ_id]]
if compare[call[name[len], parameter[name[experiment_summ_ids]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20c7c8370>
return[name[experiment_summ_ids]] | keyword[def] identifier[get_experiment_summ_ids] ( identifier[self] , identifier[coinc_event_id] ):
literal[string]
identifier[experiment_summ_ids] =[]
keyword[for] identifier[row] keyword[in] identifier[self] :
keyword[if] identifier[row] . identifier[coinc_event_id] == identifier[coinc_event_id] :
identifier[experiment_summ_ids] . identifier[append] ( identifier[row] . identifier[experiment_summ_id] )
keyword[if] identifier[len] ( identifier[experiment_summ_ids] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[coinc_event_id] )
keyword[return] identifier[experiment_summ_ids] | def get_experiment_summ_ids(self, coinc_event_id):
"""
Gets all the experiment_summ_ids that map to a given coinc_event_id.
"""
experiment_summ_ids = []
for row in self:
if row.coinc_event_id == coinc_event_id:
experiment_summ_ids.append(row.experiment_summ_id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']]
if len(experiment_summ_ids) == 0:
raise ValueError("'%s' could not be found in the experiment_map table" % coinc_event_id) # depends on [control=['if'], data=[]]
return experiment_summ_ids |
def timer(module, name, delta, duration_units='milliseconds'):
"""
Record a timing delta:
::
start_time_s = time.time()
do_some_operation()
end_time_s = time.time()
delta_s = end_time_s - start_time_s
delta_ms = delta_s * 1000
timer(__name__, 'my_timer', delta_ms)
"""
timer = get_metric('timers', module, name, Timer(duration_units))
timer.update(delta) | def function[timer, parameter[module, name, delta, duration_units]]:
constant[
Record a timing delta:
::
start_time_s = time.time()
do_some_operation()
end_time_s = time.time()
delta_s = end_time_s - start_time_s
delta_ms = delta_s * 1000
timer(__name__, 'my_timer', delta_ms)
]
variable[timer] assign[=] call[name[get_metric], parameter[constant[timers], name[module], name[name], call[name[Timer], parameter[name[duration_units]]]]]
call[name[timer].update, parameter[name[delta]]] | keyword[def] identifier[timer] ( identifier[module] , identifier[name] , identifier[delta] , identifier[duration_units] = literal[string] ):
literal[string]
identifier[timer] = identifier[get_metric] ( literal[string] , identifier[module] , identifier[name] , identifier[Timer] ( identifier[duration_units] ))
identifier[timer] . identifier[update] ( identifier[delta] ) | def timer(module, name, delta, duration_units='milliseconds'):
"""
Record a timing delta:
::
start_time_s = time.time()
do_some_operation()
end_time_s = time.time()
delta_s = end_time_s - start_time_s
delta_ms = delta_s * 1000
timer(__name__, 'my_timer', delta_ms)
"""
timer = get_metric('timers', module, name, Timer(duration_units))
timer.update(delta) |
def file_contains(self, *args, **kwargs):
"""
filename text
http://docs.fabfile.org/en/1.13/api/contrib/files.html#fabric.contrib.files.contains
"""
from fabric.contrib.files import contains
return contains(*args, **kwargs) | def function[file_contains, parameter[self]]:
constant[
filename text
http://docs.fabfile.org/en/1.13/api/contrib/files.html#fabric.contrib.files.contains
]
from relative_module[fabric.contrib.files] import module[contains]
return[call[name[contains], parameter[<ast.Starred object at 0x7da1b003c8b0>]]] | keyword[def] identifier[file_contains] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[fabric] . identifier[contrib] . identifier[files] keyword[import] identifier[contains]
keyword[return] identifier[contains] (* identifier[args] ,** identifier[kwargs] ) | def file_contains(self, *args, **kwargs):
"""
filename text
http://docs.fabfile.org/en/1.13/api/contrib/files.html#fabric.contrib.files.contains
"""
from fabric.contrib.files import contains
return contains(*args, **kwargs) |
def get_tags_of_incoming_per_page(self, incoming_id, per_page=1000, page=1):
"""
Get tags of incoming per page
:param incoming_id: the incoming id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(
resource=INCOMING_TAGS,
per_page=per_page,
page=page,
params={'incoming_id': incoming_id},
) | def function[get_tags_of_incoming_per_page, parameter[self, incoming_id, per_page, page]]:
constant[
Get tags of incoming per page
:param incoming_id: the incoming id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
]
return[call[name[self]._get_resource_per_page, parameter[]]] | keyword[def] identifier[get_tags_of_incoming_per_page] ( identifier[self] , identifier[incoming_id] , identifier[per_page] = literal[int] , identifier[page] = literal[int] ):
literal[string]
keyword[return] identifier[self] . identifier[_get_resource_per_page] (
identifier[resource] = identifier[INCOMING_TAGS] ,
identifier[per_page] = identifier[per_page] ,
identifier[page] = identifier[page] ,
identifier[params] ={ literal[string] : identifier[incoming_id] },
) | def get_tags_of_incoming_per_page(self, incoming_id, per_page=1000, page=1):
"""
Get tags of incoming per page
:param incoming_id: the incoming id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(resource=INCOMING_TAGS, per_page=per_page, page=page, params={'incoming_id': incoming_id}) |
def shell(self):
"""
Opens a Django focussed Python shell.
Essentially the equivalent of running `manage.py shell`.
"""
r = self.local_renderer
if '@' in self.genv.host_string:
r.env.shell_host_string = self.genv.host_string
else:
r.env.shell_host_string = '{user}@{host_string}'
r.env.shell_default_dir = self.genv.shell_default_dir_template
r.env.shell_interactive_djshell_str = self.genv.interactive_shell_template
r.run_or_local('ssh -t -i {key_filename} {shell_host_string} "{shell_interactive_djshell_str}"') | def function[shell, parameter[self]]:
constant[
Opens a Django focussed Python shell.
Essentially the equivalent of running `manage.py shell`.
]
variable[r] assign[=] name[self].local_renderer
if compare[constant[@] in name[self].genv.host_string] begin[:]
name[r].env.shell_host_string assign[=] name[self].genv.host_string
name[r].env.shell_default_dir assign[=] name[self].genv.shell_default_dir_template
name[r].env.shell_interactive_djshell_str assign[=] name[self].genv.interactive_shell_template
call[name[r].run_or_local, parameter[constant[ssh -t -i {key_filename} {shell_host_string} "{shell_interactive_djshell_str}"]]] | keyword[def] identifier[shell] ( identifier[self] ):
literal[string]
identifier[r] = identifier[self] . identifier[local_renderer]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[genv] . identifier[host_string] :
identifier[r] . identifier[env] . identifier[shell_host_string] = identifier[self] . identifier[genv] . identifier[host_string]
keyword[else] :
identifier[r] . identifier[env] . identifier[shell_host_string] = literal[string]
identifier[r] . identifier[env] . identifier[shell_default_dir] = identifier[self] . identifier[genv] . identifier[shell_default_dir_template]
identifier[r] . identifier[env] . identifier[shell_interactive_djshell_str] = identifier[self] . identifier[genv] . identifier[interactive_shell_template]
identifier[r] . identifier[run_or_local] ( literal[string] ) | def shell(self):
"""
Opens a Django focussed Python shell.
Essentially the equivalent of running `manage.py shell`.
"""
r = self.local_renderer
if '@' in self.genv.host_string:
r.env.shell_host_string = self.genv.host_string # depends on [control=['if'], data=[]]
else:
r.env.shell_host_string = '{user}@{host_string}'
r.env.shell_default_dir = self.genv.shell_default_dir_template
r.env.shell_interactive_djshell_str = self.genv.interactive_shell_template
r.run_or_local('ssh -t -i {key_filename} {shell_host_string} "{shell_interactive_djshell_str}"') |
def reset(self):
"Close the current failed connection and prepare for a new one"
log.info("resetting client")
rpc_client = self._rpc_client
self._addrs.append(self._peer.addr)
self.__init__(self._addrs)
self._rpc_client = rpc_client
self._dispatcher.rpc_client = rpc_client
rpc_client._client = weakref.ref(self) | def function[reset, parameter[self]]:
constant[Close the current failed connection and prepare for a new one]
call[name[log].info, parameter[constant[resetting client]]]
variable[rpc_client] assign[=] name[self]._rpc_client
call[name[self]._addrs.append, parameter[name[self]._peer.addr]]
call[name[self].__init__, parameter[name[self]._addrs]]
name[self]._rpc_client assign[=] name[rpc_client]
name[self]._dispatcher.rpc_client assign[=] name[rpc_client]
name[rpc_client]._client assign[=] call[name[weakref].ref, parameter[name[self]]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] )
identifier[rpc_client] = identifier[self] . identifier[_rpc_client]
identifier[self] . identifier[_addrs] . identifier[append] ( identifier[self] . identifier[_peer] . identifier[addr] )
identifier[self] . identifier[__init__] ( identifier[self] . identifier[_addrs] )
identifier[self] . identifier[_rpc_client] = identifier[rpc_client]
identifier[self] . identifier[_dispatcher] . identifier[rpc_client] = identifier[rpc_client]
identifier[rpc_client] . identifier[_client] = identifier[weakref] . identifier[ref] ( identifier[self] ) | def reset(self):
"""Close the current failed connection and prepare for a new one"""
log.info('resetting client')
rpc_client = self._rpc_client
self._addrs.append(self._peer.addr)
self.__init__(self._addrs)
self._rpc_client = rpc_client
self._dispatcher.rpc_client = rpc_client
rpc_client._client = weakref.ref(self) |
def process_sums(self):
"""
A redefined version of :func:`RC2.process_sums`. The only
modification affects the clauses whose weight after
splitting becomes less than the weight of the current
optimization level. Such clauses are deactivated and to be
reactivated at a later stage.
"""
# sums that should be deactivated (but not removed completely)
to_deactivate = set([])
for l in self.core_sums:
if self.wght[l] == self.minw:
# marking variable as being a part of the core
# so that next time it is not used as an assump
self.garbage.add(l)
else:
# do not remove this variable from assumps
# since it has a remaining non-zero weight
self.wght[l] -= self.minw
# deactivate this assumption and put at a lower level
if self.wght[l] < self.blop[self.levl]:
self.wstr[self.wght[l]].append(l)
to_deactivate.add(l)
# increase bound for the sum
t, b = self.update_sum(l)
# updating bounds and weights
if b < len(t.rhs):
lnew = -t.rhs[b]
if lnew in self.garbage:
self.garbage.remove(lnew)
self.wght[lnew] = 0
if lnew not in self.wght:
self.set_bound(t, b)
else:
self.wght[lnew] += self.minw
# put this assumption to relaxation vars
self.rels.append(-l)
# deactivating unnecessary sums
self.sums = list(filter(lambda x: x not in to_deactivate, self.sums)) | def function[process_sums, parameter[self]]:
constant[
A redefined version of :func:`RC2.process_sums`. The only
modification affects the clauses whose weight after
splitting becomes less than the weight of the current
optimization level. Such clauses are deactivated and to be
reactivated at a later stage.
]
variable[to_deactivate] assign[=] call[name[set], parameter[list[[]]]]
for taget[name[l]] in starred[name[self].core_sums] begin[:]
if compare[call[name[self].wght][name[l]] equal[==] name[self].minw] begin[:]
call[name[self].garbage.add, parameter[name[l]]]
<ast.Tuple object at 0x7da1b128a5c0> assign[=] call[name[self].update_sum, parameter[name[l]]]
if compare[name[b] less[<] call[name[len], parameter[name[t].rhs]]] begin[:]
variable[lnew] assign[=] <ast.UnaryOp object at 0x7da1b128bfd0>
if compare[name[lnew] in name[self].garbage] begin[:]
call[name[self].garbage.remove, parameter[name[lnew]]]
call[name[self].wght][name[lnew]] assign[=] constant[0]
if compare[name[lnew] <ast.NotIn object at 0x7da2590d7190> name[self].wght] begin[:]
call[name[self].set_bound, parameter[name[t], name[b]]]
call[name[self].rels.append, parameter[<ast.UnaryOp object at 0x7da20c6e7c10>]]
name[self].sums assign[=] call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da1b11a3070>, name[self].sums]]]] | keyword[def] identifier[process_sums] ( identifier[self] ):
literal[string]
identifier[to_deactivate] = identifier[set] ([])
keyword[for] identifier[l] keyword[in] identifier[self] . identifier[core_sums] :
keyword[if] identifier[self] . identifier[wght] [ identifier[l] ]== identifier[self] . identifier[minw] :
identifier[self] . identifier[garbage] . identifier[add] ( identifier[l] )
keyword[else] :
identifier[self] . identifier[wght] [ identifier[l] ]-= identifier[self] . identifier[minw]
keyword[if] identifier[self] . identifier[wght] [ identifier[l] ]< identifier[self] . identifier[blop] [ identifier[self] . identifier[levl] ]:
identifier[self] . identifier[wstr] [ identifier[self] . identifier[wght] [ identifier[l] ]]. identifier[append] ( identifier[l] )
identifier[to_deactivate] . identifier[add] ( identifier[l] )
identifier[t] , identifier[b] = identifier[self] . identifier[update_sum] ( identifier[l] )
keyword[if] identifier[b] < identifier[len] ( identifier[t] . identifier[rhs] ):
identifier[lnew] =- identifier[t] . identifier[rhs] [ identifier[b] ]
keyword[if] identifier[lnew] keyword[in] identifier[self] . identifier[garbage] :
identifier[self] . identifier[garbage] . identifier[remove] ( identifier[lnew] )
identifier[self] . identifier[wght] [ identifier[lnew] ]= literal[int]
keyword[if] identifier[lnew] keyword[not] keyword[in] identifier[self] . identifier[wght] :
identifier[self] . identifier[set_bound] ( identifier[t] , identifier[b] )
keyword[else] :
identifier[self] . identifier[wght] [ identifier[lnew] ]+= identifier[self] . identifier[minw]
identifier[self] . identifier[rels] . identifier[append] (- identifier[l] )
identifier[self] . identifier[sums] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] keyword[not] keyword[in] identifier[to_deactivate] , identifier[self] . identifier[sums] )) | def process_sums(self):
"""
A redefined version of :func:`RC2.process_sums`. The only
modification affects the clauses whose weight after
splitting becomes less than the weight of the current
optimization level. Such clauses are deactivated and to be
reactivated at a later stage.
"""
# sums that should be deactivated (but not removed completely)
to_deactivate = set([])
for l in self.core_sums:
if self.wght[l] == self.minw:
# marking variable as being a part of the core
# so that next time it is not used as an assump
self.garbage.add(l) # depends on [control=['if'], data=[]]
else:
# do not remove this variable from assumps
# since it has a remaining non-zero weight
self.wght[l] -= self.minw
# deactivate this assumption and put at a lower level
if self.wght[l] < self.blop[self.levl]:
self.wstr[self.wght[l]].append(l)
to_deactivate.add(l) # depends on [control=['if'], data=[]]
# increase bound for the sum
(t, b) = self.update_sum(l)
# updating bounds and weights
if b < len(t.rhs):
lnew = -t.rhs[b]
if lnew in self.garbage:
self.garbage.remove(lnew)
self.wght[lnew] = 0 # depends on [control=['if'], data=['lnew']]
if lnew not in self.wght:
self.set_bound(t, b) # depends on [control=['if'], data=[]]
else:
self.wght[lnew] += self.minw # depends on [control=['if'], data=['b']]
# put this assumption to relaxation vars
self.rels.append(-l) # depends on [control=['for'], data=['l']]
# deactivating unnecessary sums
self.sums = list(filter(lambda x: x not in to_deactivate, self.sums)) |
def parseJuiceHeaders(lines):
"""
Create a JuiceBox from a list of header lines.
@param lines: a list of lines.
"""
b = JuiceBox()
bodylen = 0
key = None
for L in lines:
if L[0] == ' ':
# continuation
assert key is not None
b[key] += '\r\n'+L[1:]
continue
parts = L.split(': ', 1)
if len(parts) != 2:
raise MalformedJuiceBox("Wrong number of parts: %r" % (L,))
key, value = parts
key = normalizeKey(key)
b[key] = value
return int(b.pop(LENGTH, 0)), b | def function[parseJuiceHeaders, parameter[lines]]:
constant[
Create a JuiceBox from a list of header lines.
@param lines: a list of lines.
]
variable[b] assign[=] call[name[JuiceBox], parameter[]]
variable[bodylen] assign[=] constant[0]
variable[key] assign[=] constant[None]
for taget[name[L]] in starred[name[lines]] begin[:]
if compare[call[name[L]][constant[0]] equal[==] constant[ ]] begin[:]
assert[compare[name[key] is_not constant[None]]]
<ast.AugAssign object at 0x7da18eb573d0>
continue
variable[parts] assign[=] call[name[L].split, parameter[constant[: ], constant[1]]]
if compare[call[name[len], parameter[name[parts]]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da18eb56500>
<ast.Tuple object at 0x7da18eb575b0> assign[=] name[parts]
variable[key] assign[=] call[name[normalizeKey], parameter[name[key]]]
call[name[b]][name[key]] assign[=] name[value]
return[tuple[[<ast.Call object at 0x7da18eb55a50>, <ast.Name object at 0x7da20c794c10>]]] | keyword[def] identifier[parseJuiceHeaders] ( identifier[lines] ):
literal[string]
identifier[b] = identifier[JuiceBox] ()
identifier[bodylen] = literal[int]
identifier[key] = keyword[None]
keyword[for] identifier[L] keyword[in] identifier[lines] :
keyword[if] identifier[L] [ literal[int] ]== literal[string] :
keyword[assert] identifier[key] keyword[is] keyword[not] keyword[None]
identifier[b] [ identifier[key] ]+= literal[string] + identifier[L] [ literal[int] :]
keyword[continue]
identifier[parts] = identifier[L] . identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[len] ( identifier[parts] )!= literal[int] :
keyword[raise] identifier[MalformedJuiceBox] ( literal[string] %( identifier[L] ,))
identifier[key] , identifier[value] = identifier[parts]
identifier[key] = identifier[normalizeKey] ( identifier[key] )
identifier[b] [ identifier[key] ]= identifier[value]
keyword[return] identifier[int] ( identifier[b] . identifier[pop] ( identifier[LENGTH] , literal[int] )), identifier[b] | def parseJuiceHeaders(lines):
"""
Create a JuiceBox from a list of header lines.
@param lines: a list of lines.
"""
b = JuiceBox()
bodylen = 0
key = None
for L in lines:
if L[0] == ' ':
# continuation
assert key is not None
b[key] += '\r\n' + L[1:]
continue # depends on [control=['if'], data=[]]
parts = L.split(': ', 1)
if len(parts) != 2:
raise MalformedJuiceBox('Wrong number of parts: %r' % (L,)) # depends on [control=['if'], data=[]]
(key, value) = parts
key = normalizeKey(key)
b[key] = value # depends on [control=['for'], data=['L']]
return (int(b.pop(LENGTH, 0)), b) |
def run_cleanup(build_ext_cmd):
"""Cleanup after ``BuildFortranThenExt.run``.
For in-place builds, moves the built shared library into the source
directory.
"""
if not build_ext_cmd.inplace:
return
bezier_dir = os.path.join("src", "bezier")
shutil.move(os.path.join(build_ext_cmd.build_lib, LIB_DIR), bezier_dir)
shutil.move(os.path.join(build_ext_cmd.build_lib, DLL_DIR), bezier_dir) | def function[run_cleanup, parameter[build_ext_cmd]]:
constant[Cleanup after ``BuildFortranThenExt.run``.
For in-place builds, moves the built shared library into the source
directory.
]
if <ast.UnaryOp object at 0x7da18eb567d0> begin[:]
return[None]
variable[bezier_dir] assign[=] call[name[os].path.join, parameter[constant[src], constant[bezier]]]
call[name[shutil].move, parameter[call[name[os].path.join, parameter[name[build_ext_cmd].build_lib, name[LIB_DIR]]], name[bezier_dir]]]
call[name[shutil].move, parameter[call[name[os].path.join, parameter[name[build_ext_cmd].build_lib, name[DLL_DIR]]], name[bezier_dir]]] | keyword[def] identifier[run_cleanup] ( identifier[build_ext_cmd] ):
literal[string]
keyword[if] keyword[not] identifier[build_ext_cmd] . identifier[inplace] :
keyword[return]
identifier[bezier_dir] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , literal[string] )
identifier[shutil] . identifier[move] ( identifier[os] . identifier[path] . identifier[join] ( identifier[build_ext_cmd] . identifier[build_lib] , identifier[LIB_DIR] ), identifier[bezier_dir] )
identifier[shutil] . identifier[move] ( identifier[os] . identifier[path] . identifier[join] ( identifier[build_ext_cmd] . identifier[build_lib] , identifier[DLL_DIR] ), identifier[bezier_dir] ) | def run_cleanup(build_ext_cmd):
"""Cleanup after ``BuildFortranThenExt.run``.
For in-place builds, moves the built shared library into the source
directory.
"""
if not build_ext_cmd.inplace:
return # depends on [control=['if'], data=[]]
bezier_dir = os.path.join('src', 'bezier')
shutil.move(os.path.join(build_ext_cmd.build_lib, LIB_DIR), bezier_dir)
shutil.move(os.path.join(build_ext_cmd.build_lib, DLL_DIR), bezier_dir) |
def get_derived_from(self, address):
"""Get the target the specified target was derived from.
If a Target was injected programmatically, e.g. from codegen, this allows us to trace its
ancestry. If a Target is not derived, default to returning itself.
:API: public
"""
parent_address = self._derived_from_by_derivative.get(address, address)
return self.get_target(parent_address) | def function[get_derived_from, parameter[self, address]]:
constant[Get the target the specified target was derived from.
If a Target was injected programmatically, e.g. from codegen, this allows us to trace its
ancestry. If a Target is not derived, default to returning itself.
:API: public
]
variable[parent_address] assign[=] call[name[self]._derived_from_by_derivative.get, parameter[name[address], name[address]]]
return[call[name[self].get_target, parameter[name[parent_address]]]] | keyword[def] identifier[get_derived_from] ( identifier[self] , identifier[address] ):
literal[string]
identifier[parent_address] = identifier[self] . identifier[_derived_from_by_derivative] . identifier[get] ( identifier[address] , identifier[address] )
keyword[return] identifier[self] . identifier[get_target] ( identifier[parent_address] ) | def get_derived_from(self, address):
"""Get the target the specified target was derived from.
If a Target was injected programmatically, e.g. from codegen, this allows us to trace its
ancestry. If a Target is not derived, default to returning itself.
:API: public
"""
parent_address = self._derived_from_by_derivative.get(address, address)
return self.get_target(parent_address) |
def remove(self, container, force=True, volumes=True):
"""
Remove a container.
:param container: The container to remove.
:param force:
Whether to force the removal of the container, even if it is
running. Note that this defaults to True, unlike the Docker
default.
:param volumes:
Whether to remove any volumes that were created implicitly with
this container, i.e. any volumes that were created due to
``VOLUME`` directives in the Dockerfile. External volumes that were
manually created will not be removed. Note that this defaults to
True, unlike the Docker default (where the equivalent parameter,
``v``, defaults to False).
"""
super().remove(container, force=force, v=volumes) | def function[remove, parameter[self, container, force, volumes]]:
constant[
Remove a container.
:param container: The container to remove.
:param force:
Whether to force the removal of the container, even if it is
running. Note that this defaults to True, unlike the Docker
default.
:param volumes:
Whether to remove any volumes that were created implicitly with
this container, i.e. any volumes that were created due to
``VOLUME`` directives in the Dockerfile. External volumes that were
manually created will not be removed. Note that this defaults to
True, unlike the Docker default (where the equivalent parameter,
``v``, defaults to False).
]
call[call[name[super], parameter[]].remove, parameter[name[container]]] | keyword[def] identifier[remove] ( identifier[self] , identifier[container] , identifier[force] = keyword[True] , identifier[volumes] = keyword[True] ):
literal[string]
identifier[super] (). identifier[remove] ( identifier[container] , identifier[force] = identifier[force] , identifier[v] = identifier[volumes] ) | def remove(self, container, force=True, volumes=True):
"""
Remove a container.
:param container: The container to remove.
:param force:
Whether to force the removal of the container, even if it is
running. Note that this defaults to True, unlike the Docker
default.
:param volumes:
Whether to remove any volumes that were created implicitly with
this container, i.e. any volumes that were created due to
``VOLUME`` directives in the Dockerfile. External volumes that were
manually created will not be removed. Note that this defaults to
True, unlike the Docker default (where the equivalent parameter,
``v``, defaults to False).
"""
super().remove(container, force=force, v=volumes) |
def to_dict(self, *args, **kwargs):
"""Override `falcon.HTTPError` to include error messages in responses."""
ret = super(HTTPError, self).to_dict(*args, **kwargs)
if self.errors is not None:
ret["errors"] = self.errors
return ret | def function[to_dict, parameter[self]]:
constant[Override `falcon.HTTPError` to include error messages in responses.]
variable[ret] assign[=] call[call[name[super], parameter[name[HTTPError], name[self]]].to_dict, parameter[<ast.Starred object at 0x7da2054a5150>]]
if compare[name[self].errors is_not constant[None]] begin[:]
call[name[ret]][constant[errors]] assign[=] name[self].errors
return[name[ret]] | keyword[def] identifier[to_dict] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[ret] = identifier[super] ( identifier[HTTPError] , identifier[self] ). identifier[to_dict] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[self] . identifier[errors] keyword[is] keyword[not] keyword[None] :
identifier[ret] [ literal[string] ]= identifier[self] . identifier[errors]
keyword[return] identifier[ret] | def to_dict(self, *args, **kwargs):
"""Override `falcon.HTTPError` to include error messages in responses."""
ret = super(HTTPError, self).to_dict(*args, **kwargs)
if self.errors is not None:
ret['errors'] = self.errors # depends on [control=['if'], data=[]]
return ret |
def visit_BoolOp(self, node):
""" Return type may come from any boolop operand. """
return sum((self.visit(value) for value in node.values), []) | def function[visit_BoolOp, parameter[self, node]]:
constant[ Return type may come from any boolop operand. ]
return[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da204565960>, list[[]]]]] | keyword[def] identifier[visit_BoolOp] ( identifier[self] , identifier[node] ):
literal[string]
keyword[return] identifier[sum] (( identifier[self] . identifier[visit] ( identifier[value] ) keyword[for] identifier[value] keyword[in] identifier[node] . identifier[values] ),[]) | def visit_BoolOp(self, node):
""" Return type may come from any boolop operand. """
return sum((self.visit(value) for value in node.values), []) |
def _tmp_session(self, session, close=True):
"""If provided session is None, lend a temporary session."""
if session:
# Don't call end_session.
yield session
return
s = self._ensure_session(session)
if s and close:
with s:
# Call end_session when we exit this scope.
yield s
elif s:
try:
# Only call end_session on error.
yield s
except Exception:
s.end_session()
raise
else:
yield None | def function[_tmp_session, parameter[self, session, close]]:
constant[If provided session is None, lend a temporary session.]
if name[session] begin[:]
<ast.Yield object at 0x7da20e9600d0>
return[None]
variable[s] assign[=] call[name[self]._ensure_session, parameter[name[session]]]
if <ast.BoolOp object at 0x7da20e962920> begin[:]
with name[s] begin[:]
<ast.Yield object at 0x7da18bc71150> | keyword[def] identifier[_tmp_session] ( identifier[self] , identifier[session] , identifier[close] = keyword[True] ):
literal[string]
keyword[if] identifier[session] :
keyword[yield] identifier[session]
keyword[return]
identifier[s] = identifier[self] . identifier[_ensure_session] ( identifier[session] )
keyword[if] identifier[s] keyword[and] identifier[close] :
keyword[with] identifier[s] :
keyword[yield] identifier[s]
keyword[elif] identifier[s] :
keyword[try] :
keyword[yield] identifier[s]
keyword[except] identifier[Exception] :
identifier[s] . identifier[end_session] ()
keyword[raise]
keyword[else] :
keyword[yield] keyword[None] | def _tmp_session(self, session, close=True):
"""If provided session is None, lend a temporary session."""
if session:
# Don't call end_session.
yield session
return # depends on [control=['if'], data=[]]
s = self._ensure_session(session)
if s and close:
with s:
# Call end_session when we exit this scope.
yield s # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
elif s:
try:
# Only call end_session on error.
yield s # depends on [control=['try'], data=[]]
except Exception:
s.end_session()
raise # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
yield None |
def _get_version(addon_dir, manifest, odoo_version_override=None,
git_post_version=True):
""" Get addon version information from an addon directory """
version = manifest.get('version')
if not version:
warn("No version in manifest in %s" % addon_dir)
version = '0.0.0'
if not odoo_version_override:
if len(version.split('.')) < 5:
raise DistutilsSetupError("Version in manifest must have at least "
"5 components and start with "
"the Odoo series number in %s" %
addon_dir)
odoo_version = '.'.join(version.split('.')[:2])
else:
odoo_version = odoo_version_override
if odoo_version not in ODOO_VERSION_INFO:
raise DistutilsSetupError("Unsupported odoo version '%s' in %s" %
(odoo_version, addon_dir))
odoo_version_info = ODOO_VERSION_INFO[odoo_version]
if git_post_version:
version = get_git_postversion(addon_dir)
return version, odoo_version, odoo_version_info | def function[_get_version, parameter[addon_dir, manifest, odoo_version_override, git_post_version]]:
constant[ Get addon version information from an addon directory ]
variable[version] assign[=] call[name[manifest].get, parameter[constant[version]]]
if <ast.UnaryOp object at 0x7da1b28b90f0> begin[:]
call[name[warn], parameter[binary_operation[constant[No version in manifest in %s] <ast.Mod object at 0x7da2590d6920> name[addon_dir]]]]
variable[version] assign[=] constant[0.0.0]
if <ast.UnaryOp object at 0x7da1b28b88b0> begin[:]
if compare[call[name[len], parameter[call[name[version].split, parameter[constant[.]]]]] less[<] constant[5]] begin[:]
<ast.Raise object at 0x7da1b28b9720>
variable[odoo_version] assign[=] call[constant[.].join, parameter[call[call[name[version].split, parameter[constant[.]]]][<ast.Slice object at 0x7da1b28b9930>]]]
if compare[name[odoo_version] <ast.NotIn object at 0x7da2590d7190> name[ODOO_VERSION_INFO]] begin[:]
<ast.Raise object at 0x7da1b28b9480>
variable[odoo_version_info] assign[=] call[name[ODOO_VERSION_INFO]][name[odoo_version]]
if name[git_post_version] begin[:]
variable[version] assign[=] call[name[get_git_postversion], parameter[name[addon_dir]]]
return[tuple[[<ast.Name object at 0x7da1b28b8970>, <ast.Name object at 0x7da1b28dc1f0>, <ast.Name object at 0x7da1b28df4f0>]]] | keyword[def] identifier[_get_version] ( identifier[addon_dir] , identifier[manifest] , identifier[odoo_version_override] = keyword[None] ,
identifier[git_post_version] = keyword[True] ):
literal[string]
identifier[version] = identifier[manifest] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[version] :
identifier[warn] ( literal[string] % identifier[addon_dir] )
identifier[version] = literal[string]
keyword[if] keyword[not] identifier[odoo_version_override] :
keyword[if] identifier[len] ( identifier[version] . identifier[split] ( literal[string] ))< literal[int] :
keyword[raise] identifier[DistutilsSetupError] ( literal[string]
literal[string]
literal[string] %
identifier[addon_dir] )
identifier[odoo_version] = literal[string] . identifier[join] ( identifier[version] . identifier[split] ( literal[string] )[: literal[int] ])
keyword[else] :
identifier[odoo_version] = identifier[odoo_version_override]
keyword[if] identifier[odoo_version] keyword[not] keyword[in] identifier[ODOO_VERSION_INFO] :
keyword[raise] identifier[DistutilsSetupError] ( literal[string] %
( identifier[odoo_version] , identifier[addon_dir] ))
identifier[odoo_version_info] = identifier[ODOO_VERSION_INFO] [ identifier[odoo_version] ]
keyword[if] identifier[git_post_version] :
identifier[version] = identifier[get_git_postversion] ( identifier[addon_dir] )
keyword[return] identifier[version] , identifier[odoo_version] , identifier[odoo_version_info] | def _get_version(addon_dir, manifest, odoo_version_override=None, git_post_version=True):
""" Get addon version information from an addon directory """
version = manifest.get('version')
if not version:
warn('No version in manifest in %s' % addon_dir)
version = '0.0.0' # depends on [control=['if'], data=[]]
if not odoo_version_override:
if len(version.split('.')) < 5:
raise DistutilsSetupError('Version in manifest must have at least 5 components and start with the Odoo series number in %s' % addon_dir) # depends on [control=['if'], data=[]]
odoo_version = '.'.join(version.split('.')[:2]) # depends on [control=['if'], data=[]]
else:
odoo_version = odoo_version_override
if odoo_version not in ODOO_VERSION_INFO:
raise DistutilsSetupError("Unsupported odoo version '%s' in %s" % (odoo_version, addon_dir)) # depends on [control=['if'], data=['odoo_version']]
odoo_version_info = ODOO_VERSION_INFO[odoo_version]
if git_post_version:
version = get_git_postversion(addon_dir) # depends on [control=['if'], data=[]]
return (version, odoo_version, odoo_version_info) |
def route(app_or_blueprint, context=default_context, **kwargs):
""" attach a transmute route. """
def decorator(fn):
fn = describe(**kwargs)(fn)
transmute_func = TransmuteFunction(fn)
routes, handler = create_routes_and_handler(transmute_func, context)
for r in routes:
# push swagger info.
if not hasattr(app_or_blueprint, SWAGGER_ATTR_NAME):
setattr(app_or_blueprint, SWAGGER_ATTR_NAME, SwaggerSpec())
swagger_obj = getattr(app_or_blueprint, SWAGGER_ATTR_NAME)
swagger_obj.add_func(transmute_func, context)
app_or_blueprint.route(r, methods=transmute_func.methods)(handler)
return handler
return decorator | def function[route, parameter[app_or_blueprint, context]]:
constant[ attach a transmute route. ]
def function[decorator, parameter[fn]]:
variable[fn] assign[=] call[call[name[describe], parameter[]], parameter[name[fn]]]
variable[transmute_func] assign[=] call[name[TransmuteFunction], parameter[name[fn]]]
<ast.Tuple object at 0x7da1b19919c0> assign[=] call[name[create_routes_and_handler], parameter[name[transmute_func], name[context]]]
for taget[name[r]] in starred[name[routes]] begin[:]
if <ast.UnaryOp object at 0x7da1b19914e0> begin[:]
call[name[setattr], parameter[name[app_or_blueprint], name[SWAGGER_ATTR_NAME], call[name[SwaggerSpec], parameter[]]]]
variable[swagger_obj] assign[=] call[name[getattr], parameter[name[app_or_blueprint], name[SWAGGER_ATTR_NAME]]]
call[name[swagger_obj].add_func, parameter[name[transmute_func], name[context]]]
call[call[name[app_or_blueprint].route, parameter[name[r]]], parameter[name[handler]]]
return[name[handler]]
return[name[decorator]] | keyword[def] identifier[route] ( identifier[app_or_blueprint] , identifier[context] = identifier[default_context] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[fn] ):
identifier[fn] = identifier[describe] (** identifier[kwargs] )( identifier[fn] )
identifier[transmute_func] = identifier[TransmuteFunction] ( identifier[fn] )
identifier[routes] , identifier[handler] = identifier[create_routes_and_handler] ( identifier[transmute_func] , identifier[context] )
keyword[for] identifier[r] keyword[in] identifier[routes] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[app_or_blueprint] , identifier[SWAGGER_ATTR_NAME] ):
identifier[setattr] ( identifier[app_or_blueprint] , identifier[SWAGGER_ATTR_NAME] , identifier[SwaggerSpec] ())
identifier[swagger_obj] = identifier[getattr] ( identifier[app_or_blueprint] , identifier[SWAGGER_ATTR_NAME] )
identifier[swagger_obj] . identifier[add_func] ( identifier[transmute_func] , identifier[context] )
identifier[app_or_blueprint] . identifier[route] ( identifier[r] , identifier[methods] = identifier[transmute_func] . identifier[methods] )( identifier[handler] )
keyword[return] identifier[handler]
keyword[return] identifier[decorator] | def route(app_or_blueprint, context=default_context, **kwargs):
""" attach a transmute route. """
def decorator(fn):
fn = describe(**kwargs)(fn)
transmute_func = TransmuteFunction(fn)
(routes, handler) = create_routes_and_handler(transmute_func, context)
for r in routes:
# push swagger info.
if not hasattr(app_or_blueprint, SWAGGER_ATTR_NAME):
setattr(app_or_blueprint, SWAGGER_ATTR_NAME, SwaggerSpec()) # depends on [control=['if'], data=[]]
swagger_obj = getattr(app_or_blueprint, SWAGGER_ATTR_NAME)
swagger_obj.add_func(transmute_func, context)
app_or_blueprint.route(r, methods=transmute_func.methods)(handler) # depends on [control=['for'], data=['r']]
return handler
return decorator |
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False | def function[get_pending_computer_name, parameter[]]:
constant[
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
]
variable[current] assign[=] call[name[get_computer_name], parameter[]]
variable[pending] assign[=] call[call[call[name[__utils__]][constant[reg.read_value]], parameter[constant[HKLM], constant[SYSTEM\CurrentControlSet\Services\Tcpip\Parameters], constant[NV Hostname]]]][constant[vdata]]
if name[pending] begin[:]
return[<ast.IfExp object at 0x7da2044c1e40>]
return[constant[False]] | keyword[def] identifier[get_pending_computer_name] ():
literal[string]
identifier[current] = identifier[get_computer_name] ()
identifier[pending] = identifier[__utils__] [ literal[string] ](
literal[string] ,
literal[string] ,
literal[string] )[ literal[string] ]
keyword[if] identifier[pending] :
keyword[return] identifier[pending] keyword[if] identifier[pending] != identifier[current] keyword[else] keyword[None]
keyword[return] keyword[False] | def get_pending_computer_name():
"""
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
"""
current = get_computer_name()
pending = __utils__['reg.read_value']('HKLM', 'SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters', 'NV Hostname')['vdata']
if pending:
return pending if pending != current else None # depends on [control=['if'], data=[]]
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.