code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List Load Activation Profiles (requires classic mode)."""
cpc_oid = uri_parms[0]
query_str = uri_parms[1]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
assert not cpc.dpm_enabled # TODO: Verify error or empty result?
result_profiles = []
filter_args = parse_query_parms(method, uri, query_str)
for profile in cpc.load_activation_profiles.list(filter_args):
result_profile = {}
for prop in profile.properties:
if prop in ('element-uri', 'name'):
result_profile[prop] = profile.properties[prop]
result_profiles.append(result_profile)
return {'load-activation-profiles': result_profiles} | def function[get, parameter[method, hmc, uri, uri_parms, logon_required]]:
constant[Operation: List Load Activation Profiles (requires classic mode).]
variable[cpc_oid] assign[=] call[name[uri_parms]][constant[0]]
variable[query_str] assign[=] call[name[uri_parms]][constant[1]]
<ast.Try object at 0x7da20c992110>
assert[<ast.UnaryOp object at 0x7da20c990fd0>]
variable[result_profiles] assign[=] list[[]]
variable[filter_args] assign[=] call[name[parse_query_parms], parameter[name[method], name[uri], name[query_str]]]
for taget[name[profile]] in starred[call[name[cpc].load_activation_profiles.list, parameter[name[filter_args]]]] begin[:]
variable[result_profile] assign[=] dictionary[[], []]
for taget[name[prop]] in starred[name[profile].properties] begin[:]
if compare[name[prop] in tuple[[<ast.Constant object at 0x7da20c991000>, <ast.Constant object at 0x7da20c992e90>]]] begin[:]
call[name[result_profile]][name[prop]] assign[=] call[name[profile].properties][name[prop]]
call[name[result_profiles].append, parameter[name[result_profile]]]
return[dictionary[[<ast.Constant object at 0x7da18f721030>], [<ast.Name object at 0x7da18f722260>]]] | keyword[def] identifier[get] ( identifier[method] , identifier[hmc] , identifier[uri] , identifier[uri_parms] , identifier[logon_required] ):
literal[string]
identifier[cpc_oid] = identifier[uri_parms] [ literal[int] ]
identifier[query_str] = identifier[uri_parms] [ literal[int] ]
keyword[try] :
identifier[cpc] = identifier[hmc] . identifier[cpcs] . identifier[lookup_by_oid] ( identifier[cpc_oid] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[InvalidResourceError] ( identifier[method] , identifier[uri] )
keyword[assert] keyword[not] identifier[cpc] . identifier[dpm_enabled]
identifier[result_profiles] =[]
identifier[filter_args] = identifier[parse_query_parms] ( identifier[method] , identifier[uri] , identifier[query_str] )
keyword[for] identifier[profile] keyword[in] identifier[cpc] . identifier[load_activation_profiles] . identifier[list] ( identifier[filter_args] ):
identifier[result_profile] ={}
keyword[for] identifier[prop] keyword[in] identifier[profile] . identifier[properties] :
keyword[if] identifier[prop] keyword[in] ( literal[string] , literal[string] ):
identifier[result_profile] [ identifier[prop] ]= identifier[profile] . identifier[properties] [ identifier[prop] ]
identifier[result_profiles] . identifier[append] ( identifier[result_profile] )
keyword[return] { literal[string] : identifier[result_profiles] } | def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List Load Activation Profiles (requires classic mode)."""
cpc_oid = uri_parms[0]
query_str = uri_parms[1]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid) # depends on [control=['try'], data=[]]
except KeyError:
raise InvalidResourceError(method, uri) # depends on [control=['except'], data=[]]
assert not cpc.dpm_enabled # TODO: Verify error or empty result?
result_profiles = []
filter_args = parse_query_parms(method, uri, query_str)
for profile in cpc.load_activation_profiles.list(filter_args):
result_profile = {}
for prop in profile.properties:
if prop in ('element-uri', 'name'):
result_profile[prop] = profile.properties[prop] # depends on [control=['if'], data=['prop']] # depends on [control=['for'], data=['prop']]
result_profiles.append(result_profile) # depends on [control=['for'], data=['profile']]
return {'load-activation-profiles': result_profiles} |
def __collect_fields(self):
""" Use field values from config.json and collect from request """
form = FormData()
form.add_field(self.__username_field, required=True,
error=self.__username_error)
form.add_field(self.__password_field, required=True,
error=self.__password_error)
form.parse()
self.username = form.values[self.__username_field]
self.password = form.values[self.__password_field]
return | def function[__collect_fields, parameter[self]]:
constant[ Use field values from config.json and collect from request ]
variable[form] assign[=] call[name[FormData], parameter[]]
call[name[form].add_field, parameter[name[self].__username_field]]
call[name[form].add_field, parameter[name[self].__password_field]]
call[name[form].parse, parameter[]]
name[self].username assign[=] call[name[form].values][name[self].__username_field]
name[self].password assign[=] call[name[form].values][name[self].__password_field]
return[None] | keyword[def] identifier[__collect_fields] ( identifier[self] ):
literal[string]
identifier[form] = identifier[FormData] ()
identifier[form] . identifier[add_field] ( identifier[self] . identifier[__username_field] , identifier[required] = keyword[True] ,
identifier[error] = identifier[self] . identifier[__username_error] )
identifier[form] . identifier[add_field] ( identifier[self] . identifier[__password_field] , identifier[required] = keyword[True] ,
identifier[error] = identifier[self] . identifier[__password_error] )
identifier[form] . identifier[parse] ()
identifier[self] . identifier[username] = identifier[form] . identifier[values] [ identifier[self] . identifier[__username_field] ]
identifier[self] . identifier[password] = identifier[form] . identifier[values] [ identifier[self] . identifier[__password_field] ]
keyword[return] | def __collect_fields(self):
""" Use field values from config.json and collect from request """
form = FormData()
form.add_field(self.__username_field, required=True, error=self.__username_error)
form.add_field(self.__password_field, required=True, error=self.__password_error)
form.parse()
self.username = form.values[self.__username_field]
self.password = form.values[self.__password_field]
return |
def is_archive(self):
""" Given a parsed feed, returns True if this is an archive feed """
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True
if ns_prefix + '_current' in self.feed.feed:
# This is declared to be the current view
return False
# Either we don't have the namespace, or the view wasn't declared.
rels = collections.defaultdict(list)
for link in self.feed.feed.links:
rels[link.rel].append(link.href)
return ('current' in rels and
('self' not in rels or
rels['self'] != rels['current'])) | def function[is_archive, parameter[self]]:
constant[ Given a parsed feed, returns True if this is an archive feed ]
variable[ns_prefix] assign[=] name[self].archive_namespace
if name[ns_prefix] begin[:]
if compare[binary_operation[name[ns_prefix] + constant[_archive]] in name[self].feed.feed] begin[:]
return[constant[True]]
if compare[binary_operation[name[ns_prefix] + constant[_current]] in name[self].feed.feed] begin[:]
return[constant[False]]
variable[rels] assign[=] call[name[collections].defaultdict, parameter[name[list]]]
for taget[name[link]] in starred[name[self].feed.feed.links] begin[:]
call[call[name[rels]][name[link].rel].append, parameter[name[link].href]]
return[<ast.BoolOp object at 0x7da18f00d180>] | keyword[def] identifier[is_archive] ( identifier[self] ):
literal[string]
identifier[ns_prefix] = identifier[self] . identifier[archive_namespace]
keyword[if] identifier[ns_prefix] :
keyword[if] identifier[ns_prefix] + literal[string] keyword[in] identifier[self] . identifier[feed] . identifier[feed] :
keyword[return] keyword[True]
keyword[if] identifier[ns_prefix] + literal[string] keyword[in] identifier[self] . identifier[feed] . identifier[feed] :
keyword[return] keyword[False]
identifier[rels] = identifier[collections] . identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[link] keyword[in] identifier[self] . identifier[feed] . identifier[feed] . identifier[links] :
identifier[rels] [ identifier[link] . identifier[rel] ]. identifier[append] ( identifier[link] . identifier[href] )
keyword[return] ( literal[string] keyword[in] identifier[rels] keyword[and]
( literal[string] keyword[not] keyword[in] identifier[rels] keyword[or]
identifier[rels] [ literal[string] ]!= identifier[rels] [ literal[string] ])) | def is_archive(self):
""" Given a parsed feed, returns True if this is an archive feed """
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True # depends on [control=['if'], data=[]]
if ns_prefix + '_current' in self.feed.feed:
# This is declared to be the current view
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Either we don't have the namespace, or the view wasn't declared.
rels = collections.defaultdict(list)
for link in self.feed.feed.links:
rels[link.rel].append(link.href) # depends on [control=['for'], data=['link']]
return 'current' in rels and ('self' not in rels or rels['self'] != rels['current']) |
def draw_help(self, surf):
"""Draw the help dialog."""
if not self._help:
return
def write(loc, text):
surf.write_screen(self._font_large, colors.black, loc, text)
surf.surf.fill(colors.white * 0.8)
write((1, 1), "Shortcuts:")
max_len = max(len(s) for s, _ in self.shortcuts)
for i, (hotkey, description) in enumerate(self.shortcuts, start=2):
write((2, i), hotkey)
write((3 + max_len * 0.7, i), description) | def function[draw_help, parameter[self, surf]]:
constant[Draw the help dialog.]
if <ast.UnaryOp object at 0x7da2047eb3d0> begin[:]
return[None]
def function[write, parameter[loc, text]]:
call[name[surf].write_screen, parameter[name[self]._font_large, name[colors].black, name[loc], name[text]]]
call[name[surf].surf.fill, parameter[binary_operation[name[colors].white * constant[0.8]]]]
call[name[write], parameter[tuple[[<ast.Constant object at 0x7da2047e8d90>, <ast.Constant object at 0x7da2047e9840>]], constant[Shortcuts:]]]
variable[max_len] assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da2047ebbb0>]]
for taget[tuple[[<ast.Name object at 0x7da2047e8e20>, <ast.Tuple object at 0x7da2047ebcd0>]]] in starred[call[name[enumerate], parameter[name[self].shortcuts]]] begin[:]
call[name[write], parameter[tuple[[<ast.Constant object at 0x7da2047e87f0>, <ast.Name object at 0x7da2047ebdf0>]], name[hotkey]]]
call[name[write], parameter[tuple[[<ast.BinOp object at 0x7da2047eb0a0>, <ast.Name object at 0x7da2047eac20>]], name[description]]] | keyword[def] identifier[draw_help] ( identifier[self] , identifier[surf] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_help] :
keyword[return]
keyword[def] identifier[write] ( identifier[loc] , identifier[text] ):
identifier[surf] . identifier[write_screen] ( identifier[self] . identifier[_font_large] , identifier[colors] . identifier[black] , identifier[loc] , identifier[text] )
identifier[surf] . identifier[surf] . identifier[fill] ( identifier[colors] . identifier[white] * literal[int] )
identifier[write] (( literal[int] , literal[int] ), literal[string] )
identifier[max_len] = identifier[max] ( identifier[len] ( identifier[s] ) keyword[for] identifier[s] , identifier[_] keyword[in] identifier[self] . identifier[shortcuts] )
keyword[for] identifier[i] ,( identifier[hotkey] , identifier[description] ) keyword[in] identifier[enumerate] ( identifier[self] . identifier[shortcuts] , identifier[start] = literal[int] ):
identifier[write] (( literal[int] , identifier[i] ), identifier[hotkey] )
identifier[write] (( literal[int] + identifier[max_len] * literal[int] , identifier[i] ), identifier[description] ) | def draw_help(self, surf):
"""Draw the help dialog."""
if not self._help:
return # depends on [control=['if'], data=[]]
def write(loc, text):
surf.write_screen(self._font_large, colors.black, loc, text)
surf.surf.fill(colors.white * 0.8)
write((1, 1), 'Shortcuts:')
max_len = max((len(s) for (s, _) in self.shortcuts))
for (i, (hotkey, description)) in enumerate(self.shortcuts, start=2):
write((2, i), hotkey)
write((3 + max_len * 0.7, i), description) # depends on [control=['for'], data=[]] |
def _set_link_error_disable(self, v, load=False):
"""
Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_error_disable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_error_disable() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_error_disable must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)""",
})
self.__link_error_disable = t
if hasattr(self, '_set'):
self._set() | def function[_set_link_error_disable, parameter[self, v, load]]:
constant[
Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_error_disable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_error_disable() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20e9b0a30>
name[self].__link_error_disable assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_link_error_disable] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[link_error_disable] . identifier[link_error_disable] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__link_error_disable] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_link_error_disable(self, v, load=False):
"""
Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_error_disable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_error_disable() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name='link-error-disable', rest_name='link-error-disable', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'link_error_disable must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=link_error_disable.link_error_disable, is_container=\'container\', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'port link dampening\', u\'callpoint\': u\'Pld\', u\'cli-compact-syntax\': None, u\'cli-sequence-commands\': None, u\'cli-incomplete-command\': None, u\'cli-full-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-pld\', defining_module=\'brocade-pld\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__link_error_disable = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def create_monitor(hostname, username, password, monitor_type, name, **kwargs):
'''
A function to connect to a bigip device and create a monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to create
name
The name of the monitor to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if __opts__['test']:
params = {
'hostname': hostname,
'username': username,
'password': password,
'monitor_type': monitor_type,
'name': name
}
for key, value in six.iteritems(kwargs):
params[key] = value
return _test_output(ret, 'create', params)
#is this monitor currently configured?
existing = __salt__['bigip.list_monitor'](hostname, username, password, monitor_type, name)
# if it exists
if existing['code'] == 200:
ret['result'] = True
ret['comment'] = 'A monitor by this name currently exists. No change made.'
# if it doesn't exist
elif existing['code'] == 404:
response = __salt__['bigip.create_monitor'](hostname, username, password, monitor_type, name, **kwargs)
if response['code'] == 200:
ret['result'] = True
ret['changes']['old'] = {}
ret['changes']['new'] = response['content']
ret['comment'] = 'Monitor was successfully created.'
else:
ret = _load_result(response, ret)
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret | def function[create_monitor, parameter[hostname, username, password, monitor_type, name]]:
constant[
A function to connect to a bigip device and create a monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to create
name
The name of the monitor to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da20c794130>, <ast.Constant object at 0x7da20c795870>, <ast.Constant object at 0x7da20c796b00>, <ast.Constant object at 0x7da20c7969b0>], [<ast.Name object at 0x7da20c7952a0>, <ast.Dict object at 0x7da20c7947f0>, <ast.Constant object at 0x7da20c794220>, <ast.Constant object at 0x7da20c795570>]]
if call[name[__opts__]][constant[test]] begin[:]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2046237f0>, <ast.Constant object at 0x7da2046225f0>, <ast.Constant object at 0x7da2046219c0>, <ast.Constant object at 0x7da204622e60>, <ast.Constant object at 0x7da2046205b0>], [<ast.Name object at 0x7da204621960>, <ast.Name object at 0x7da2046239a0>, <ast.Name object at 0x7da204622f80>, <ast.Name object at 0x7da2046212a0>, <ast.Name object at 0x7da204623a90>]]
for taget[tuple[[<ast.Name object at 0x7da2046205e0>, <ast.Name object at 0x7da204622a10>]]] in starred[call[name[six].iteritems, parameter[name[kwargs]]]] begin[:]
call[name[params]][name[key]] assign[=] name[value]
return[call[name[_test_output], parameter[name[ret], constant[create], name[params]]]]
variable[existing] assign[=] call[call[name[__salt__]][constant[bigip.list_monitor]], parameter[name[hostname], name[username], name[password], name[monitor_type], name[name]]]
if compare[call[name[existing]][constant[code]] equal[==] constant[200]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] constant[A monitor by this name currently exists. No change made.]
return[name[ret]] | keyword[def] identifier[create_monitor] ( identifier[hostname] , identifier[username] , identifier[password] , identifier[monitor_type] , identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] , literal[string] :{}, literal[string] : keyword[False] , literal[string] : literal[string] }
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[params] ={
literal[string] : identifier[hostname] ,
literal[string] : identifier[username] ,
literal[string] : identifier[password] ,
literal[string] : identifier[monitor_type] ,
literal[string] : identifier[name]
}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[six] . identifier[iteritems] ( identifier[kwargs] ):
identifier[params] [ identifier[key] ]= identifier[value]
keyword[return] identifier[_test_output] ( identifier[ret] , literal[string] , identifier[params] )
identifier[existing] = identifier[__salt__] [ literal[string] ]( identifier[hostname] , identifier[username] , identifier[password] , identifier[monitor_type] , identifier[name] )
keyword[if] identifier[existing] [ literal[string] ]== literal[int] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string]
keyword[elif] identifier[existing] [ literal[string] ]== literal[int] :
identifier[response] = identifier[__salt__] [ literal[string] ]( identifier[hostname] , identifier[username] , identifier[password] , identifier[monitor_type] , identifier[name] ,** identifier[kwargs] )
keyword[if] identifier[response] [ literal[string] ]== literal[int] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ][ literal[string] ]={}
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[response] [ literal[string] ]
identifier[ret] [ literal[string] ]= literal[string]
keyword[else] :
identifier[ret] = identifier[_load_result] ( identifier[response] , identifier[ret] )
keyword[else] :
identifier[ret] = identifier[_load_result] ( identifier[existing] , identifier[ret] )
keyword[return] identifier[ret] | def create_monitor(hostname, username, password, monitor_type, name, **kwargs):
"""
A function to connect to a bigip device and create a monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to create
name
The name of the monitor to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
"""
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if __opts__['test']:
params = {'hostname': hostname, 'username': username, 'password': password, 'monitor_type': monitor_type, 'name': name}
for (key, value) in six.iteritems(kwargs):
params[key] = value # depends on [control=['for'], data=[]]
return _test_output(ret, 'create', params) # depends on [control=['if'], data=[]]
#is this monitor currently configured?
existing = __salt__['bigip.list_monitor'](hostname, username, password, monitor_type, name)
# if it exists
if existing['code'] == 200:
ret['result'] = True
ret['comment'] = 'A monitor by this name currently exists. No change made.' # depends on [control=['if'], data=[]]
# if it doesn't exist
elif existing['code'] == 404:
response = __salt__['bigip.create_monitor'](hostname, username, password, monitor_type, name, **kwargs)
if response['code'] == 200:
ret['result'] = True
ret['changes']['old'] = {}
ret['changes']['new'] = response['content']
ret['comment'] = 'Monitor was successfully created.' # depends on [control=['if'], data=[]]
else:
ret = _load_result(response, ret) # depends on [control=['if'], data=[]]
else:
# else something else was returned
ret = _load_result(existing, ret)
return ret |
def p_field_expr(p):
"""
expr : expr FIELD
"""
p[0] = node.expr(
op=".",
args=node.expr_list([
p[1], node.ident(
name=p[2], lineno=p.lineno(2), lexpos=p.lexpos(2))
])) | def function[p_field_expr, parameter[p]]:
constant[
expr : expr FIELD
]
call[name[p]][constant[0]] assign[=] call[name[node].expr, parameter[]] | keyword[def] identifier[p_field_expr] ( identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[node] . identifier[expr] (
identifier[op] = literal[string] ,
identifier[args] = identifier[node] . identifier[expr_list] ([
identifier[p] [ literal[int] ], identifier[node] . identifier[ident] (
identifier[name] = identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ), identifier[lexpos] = identifier[p] . identifier[lexpos] ( literal[int] ))
])) | def p_field_expr(p):
"""
expr : expr FIELD
"""
p[0] = node.expr(op='.', args=node.expr_list([p[1], node.ident(name=p[2], lineno=p.lineno(2), lexpos=p.lexpos(2))])) |
def datasets_list(self, project_id=None, max_results=0, page_token=None):
"""Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | def function[datasets_list, parameter[self, project_id, max_results, page_token]]:
constant[Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
]
if compare[name[project_id] is constant[None]] begin[:]
variable[project_id] assign[=] name[self]._project_id
variable[url] assign[=] binary_operation[name[Api]._ENDPOINT + binary_operation[name[Api]._DATASETS_PATH <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00c850>, <ast.Constant object at 0x7da18f00d4e0>]]]]
variable[args] assign[=] dictionary[[], []]
if compare[name[max_results] not_equal[!=] constant[0]] begin[:]
call[name[args]][constant[maxResults]] assign[=] name[max_results]
if compare[name[page_token] is_not constant[None]] begin[:]
call[name[args]][constant[pageToken]] assign[=] name[page_token]
return[call[name[datalab].utils.Http.request, parameter[name[url]]]] | keyword[def] identifier[datasets_list] ( identifier[self] , identifier[project_id] = keyword[None] , identifier[max_results] = literal[int] , identifier[page_token] = keyword[None] ):
literal[string]
keyword[if] identifier[project_id] keyword[is] keyword[None] :
identifier[project_id] = identifier[self] . identifier[_project_id]
identifier[url] = identifier[Api] . identifier[_ENDPOINT] +( identifier[Api] . identifier[_DATASETS_PATH] %( identifier[project_id] , literal[string] ))
identifier[args] ={}
keyword[if] identifier[max_results] != literal[int] :
identifier[args] [ literal[string] ]= identifier[max_results]
keyword[if] identifier[page_token] keyword[is] keyword[not] keyword[None] :
identifier[args] [ literal[string] ]= identifier[page_token]
keyword[return] identifier[datalab] . identifier[utils] . identifier[Http] . identifier[request] ( identifier[url] , identifier[args] = identifier[args] , identifier[credentials] = identifier[self] . identifier[_credentials] ) | def datasets_list(self, project_id=None, max_results=0, page_token=None):
"""Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id # depends on [control=['if'], data=['project_id']]
url = Api._ENDPOINT + Api._DATASETS_PATH % (project_id, '')
args = {}
if max_results != 0:
args['maxResults'] = max_results # depends on [control=['if'], data=['max_results']]
if page_token is not None:
args['pageToken'] = page_token # depends on [control=['if'], data=['page_token']]
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) |
def get_relationship_form_for_update(self, relationship_id=None):
"""Gets the relationship form for updating an existing relationship.
A new relationship form should be requested for each update
transaction.
arg: relationship_id (osid.id.Id): the ``Id`` of the
``Relationship``
return: (osid.relationship.RelationshipForm) - the relationship
form
raise: NotFound - ``relationship_id`` is not found
raise: NullArgument - ``relationship_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if relationship_id is None:
raise NullArgument()
try:
url_path = ('/handcar/services/relationship/families/' +
self._catalog_idstr + '/relationships/' + str(relationship_id))
relationship = objects.Relationship(self._get_request(url_path))
except Exception:
raise
relationship_form = objects.RelationshipForm(relationship._my_map)
self._forms[relationship_form.get_id().get_identifier()] = not UPDATED
return relationship_form | def function[get_relationship_form_for_update, parameter[self, relationship_id]]:
constant[Gets the relationship form for updating an existing relationship.
A new relationship form should be requested for each update
transaction.
arg: relationship_id (osid.id.Id): the ``Id`` of the
``Relationship``
return: (osid.relationship.RelationshipForm) - the relationship
form
raise: NotFound - ``relationship_id`` is not found
raise: NullArgument - ``relationship_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if compare[name[relationship_id] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c6e6d70>
<ast.Try object at 0x7da20c6e7df0>
variable[relationship_form] assign[=] call[name[objects].RelationshipForm, parameter[name[relationship]._my_map]]
call[name[self]._forms][call[call[name[relationship_form].get_id, parameter[]].get_identifier, parameter[]]] assign[=] <ast.UnaryOp object at 0x7da20c6e6890>
return[name[relationship_form]] | keyword[def] identifier[get_relationship_form_for_update] ( identifier[self] , identifier[relationship_id] = keyword[None] ):
literal[string]
keyword[if] identifier[relationship_id] keyword[is] keyword[None] :
keyword[raise] identifier[NullArgument] ()
keyword[try] :
identifier[url_path] =( literal[string] +
identifier[self] . identifier[_catalog_idstr] + literal[string] + identifier[str] ( identifier[relationship_id] ))
identifier[relationship] = identifier[objects] . identifier[Relationship] ( identifier[self] . identifier[_get_request] ( identifier[url_path] ))
keyword[except] identifier[Exception] :
keyword[raise]
identifier[relationship_form] = identifier[objects] . identifier[RelationshipForm] ( identifier[relationship] . identifier[_my_map] )
identifier[self] . identifier[_forms] [ identifier[relationship_form] . identifier[get_id] (). identifier[get_identifier] ()]= keyword[not] identifier[UPDATED]
keyword[return] identifier[relationship_form] | def get_relationship_form_for_update(self, relationship_id=None):
"""Gets the relationship form for updating an existing relationship.
A new relationship form should be requested for each update
transaction.
arg: relationship_id (osid.id.Id): the ``Id`` of the
``Relationship``
return: (osid.relationship.RelationshipForm) - the relationship
form
raise: NotFound - ``relationship_id`` is not found
raise: NullArgument - ``relationship_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if relationship_id is None:
raise NullArgument() # depends on [control=['if'], data=[]]
try:
url_path = '/handcar/services/relationship/families/' + self._catalog_idstr + '/relationships/' + str(relationship_id)
relationship = objects.Relationship(self._get_request(url_path)) # depends on [control=['try'], data=[]]
except Exception:
raise # depends on [control=['except'], data=[]]
relationship_form = objects.RelationshipForm(relationship._my_map)
self._forms[relationship_form.get_id().get_identifier()] = not UPDATED
return relationship_form |
def landm(self, val):
'''
Sets landmarks given any of:
- ppfile
- ldmk file
- dict of {name:inds} (i.e. mesh.landm)
- dict of {name:xyz} (i.e. mesh.landm_xyz)
- Nx1 array or list of ints (treated as landm, given sequential integers as names)
- Nx3 array or list of floats (treated as landm_xyz, given sequential integers as names)
- pkl, json, yaml file containing either of the above dicts or arrays
'''
import numpy as np
if val is None:
self._landm = None
self._raw_landmarks = None
elif isinstance(val, basestring):
self.landm = load_landmarks(val)
else:
if not hasattr(val, 'keys'):
val = {str(ii): v for ii, v in enumerate(val)}
landm = {}
landm_xyz = {}
filtered_landmarks = []
for k, v in val.iteritems():
if isinstance(v, (int, long)):
landm[k] = v
elif len(v) == 3:
if np.all(v == [0.0, 0.0, 0.0]):
filtered_landmarks.append(k)
landm_xyz[k] = v
else:
raise Exception("Can't parse landmark %s: %s" % (k, v))
if len(filtered_landmarks) > 0:
import warnings
warnings.warn("WARNING: the following landmarks are positioned at (0.0, 0.0, 0.0) and were ignored: %s" % ", ".join(filtered_landmarks))
# We preserve these and calculate everything seperately so that we can recompute_landmarks if v changes
self._raw_landmarks = {
'landm': landm,
'landm_xyz': landm_xyz
}
self.recompute_landmarks() | def function[landm, parameter[self, val]]:
constant[
Sets landmarks given any of:
- ppfile
- ldmk file
- dict of {name:inds} (i.e. mesh.landm)
- dict of {name:xyz} (i.e. mesh.landm_xyz)
- Nx1 array or list of ints (treated as landm, given sequential integers as names)
- Nx3 array or list of floats (treated as landm_xyz, given sequential integers as names)
- pkl, json, yaml file containing either of the above dicts or arrays
]
import module[numpy] as alias[np]
if compare[name[val] is constant[None]] begin[:]
name[self]._landm assign[=] constant[None]
name[self]._raw_landmarks assign[=] constant[None] | keyword[def] identifier[landm] ( identifier[self] , identifier[val] ):
literal[string]
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[if] identifier[val] keyword[is] keyword[None] :
identifier[self] . identifier[_landm] = keyword[None]
identifier[self] . identifier[_raw_landmarks] = keyword[None]
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[basestring] ):
identifier[self] . identifier[landm] = identifier[load_landmarks] ( identifier[val] )
keyword[else] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[val] , literal[string] ):
identifier[val] ={ identifier[str] ( identifier[ii] ): identifier[v] keyword[for] identifier[ii] , identifier[v] keyword[in] identifier[enumerate] ( identifier[val] )}
identifier[landm] ={}
identifier[landm_xyz] ={}
identifier[filtered_landmarks] =[]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[val] . identifier[iteritems] ():
keyword[if] identifier[isinstance] ( identifier[v] ,( identifier[int] , identifier[long] )):
identifier[landm] [ identifier[k] ]= identifier[v]
keyword[elif] identifier[len] ( identifier[v] )== literal[int] :
keyword[if] identifier[np] . identifier[all] ( identifier[v] ==[ literal[int] , literal[int] , literal[int] ]):
identifier[filtered_landmarks] . identifier[append] ( identifier[k] )
identifier[landm_xyz] [ identifier[k] ]= identifier[v]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] %( identifier[k] , identifier[v] ))
keyword[if] identifier[len] ( identifier[filtered_landmarks] )> literal[int] :
keyword[import] identifier[warnings]
identifier[warnings] . identifier[warn] ( literal[string] % literal[string] . identifier[join] ( identifier[filtered_landmarks] ))
identifier[self] . identifier[_raw_landmarks] ={
literal[string] : identifier[landm] ,
literal[string] : identifier[landm_xyz]
}
identifier[self] . identifier[recompute_landmarks] () | def landm(self, val):
"""
Sets landmarks given any of:
- ppfile
- ldmk file
- dict of {name:inds} (i.e. mesh.landm)
- dict of {name:xyz} (i.e. mesh.landm_xyz)
- Nx1 array or list of ints (treated as landm, given sequential integers as names)
- Nx3 array or list of floats (treated as landm_xyz, given sequential integers as names)
- pkl, json, yaml file containing either of the above dicts or arrays
"""
import numpy as np
if val is None:
self._landm = None
self._raw_landmarks = None # depends on [control=['if'], data=[]]
elif isinstance(val, basestring):
self.landm = load_landmarks(val) # depends on [control=['if'], data=[]]
else:
if not hasattr(val, 'keys'):
val = {str(ii): v for (ii, v) in enumerate(val)} # depends on [control=['if'], data=[]]
landm = {}
landm_xyz = {}
filtered_landmarks = []
for (k, v) in val.iteritems():
if isinstance(v, (int, long)):
landm[k] = v # depends on [control=['if'], data=[]]
elif len(v) == 3:
if np.all(v == [0.0, 0.0, 0.0]):
filtered_landmarks.append(k) # depends on [control=['if'], data=[]]
landm_xyz[k] = v # depends on [control=['if'], data=[]]
else:
raise Exception("Can't parse landmark %s: %s" % (k, v)) # depends on [control=['for'], data=[]]
if len(filtered_landmarks) > 0:
import warnings
warnings.warn('WARNING: the following landmarks are positioned at (0.0, 0.0, 0.0) and were ignored: %s' % ', '.join(filtered_landmarks)) # depends on [control=['if'], data=[]]
# We preserve these and calculate everything seperately so that we can recompute_landmarks if v changes
self._raw_landmarks = {'landm': landm, 'landm_xyz': landm_xyz}
self.recompute_landmarks() |
def search_next(self):
"""
Searchs the next search pattern in the document.
:return: Method success.
:rtype: bool
"""
pattern = self.get_selected_text() or self.__search_pattern
if not pattern:
return False
return self.search(pattern, **{"case_sensitive": True,
"whole_word": False,
"regular_expressions": False,
"backward_search": False,
"wrap_around": True}) | def function[search_next, parameter[self]]:
constant[
Searchs the next search pattern in the document.
:return: Method success.
:rtype: bool
]
variable[pattern] assign[=] <ast.BoolOp object at 0x7da1b086c160>
if <ast.UnaryOp object at 0x7da1b086f8b0> begin[:]
return[constant[False]]
return[call[name[self].search, parameter[name[pattern]]]] | keyword[def] identifier[search_next] ( identifier[self] ):
literal[string]
identifier[pattern] = identifier[self] . identifier[get_selected_text] () keyword[or] identifier[self] . identifier[__search_pattern]
keyword[if] keyword[not] identifier[pattern] :
keyword[return] keyword[False]
keyword[return] identifier[self] . identifier[search] ( identifier[pattern] ,**{ literal[string] : keyword[True] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[True] }) | def search_next(self):
"""
Searchs the next search pattern in the document.
:return: Method success.
:rtype: bool
"""
pattern = self.get_selected_text() or self.__search_pattern
if not pattern:
return False # depends on [control=['if'], data=[]]
return self.search(pattern, **{'case_sensitive': True, 'whole_word': False, 'regular_expressions': False, 'backward_search': False, 'wrap_around': True}) |
def match(self, name):
"""
Returns True if name matches one of the patterns.
"""
for pat in self.pats:
if fnmatch.fnmatch(name, pat):
return True
return False | def function[match, parameter[self, name]]:
constant[
Returns True if name matches one of the patterns.
]
for taget[name[pat]] in starred[name[self].pats] begin[:]
if call[name[fnmatch].fnmatch, parameter[name[name], name[pat]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[match] ( identifier[self] , identifier[name] ):
literal[string]
keyword[for] identifier[pat] keyword[in] identifier[self] . identifier[pats] :
keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[name] , identifier[pat] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def match(self, name):
"""
Returns True if name matches one of the patterns.
"""
for pat in self.pats:
if fnmatch.fnmatch(name, pat):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pat']]
return False |
def _compute_all_files(self):
"""Handles lazy evaluation of self.all_files"""
self._all_files = any(pat.all_files() for pat in self.patterns) | def function[_compute_all_files, parameter[self]]:
constant[Handles lazy evaluation of self.all_files]
name[self]._all_files assign[=] call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b0a40070>]] | keyword[def] identifier[_compute_all_files] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_all_files] = identifier[any] ( identifier[pat] . identifier[all_files] () keyword[for] identifier[pat] keyword[in] identifier[self] . identifier[patterns] ) | def _compute_all_files(self):
"""Handles lazy evaluation of self.all_files"""
self._all_files = any((pat.all_files() for pat in self.patterns)) |
async def reboot(self):
"""Reboot the device."""
endpoint = '/setup/reboot'
url = API.format(ip=self._ipaddress, endpoint=endpoint)
data = {'params': 'now'}
returnvalue = False
try:
async with async_timeout.timeout(5, loop=self._loop):
result = await self._session.post(url, json=data,
headers=HEADERS)
if result.status == 200:
returnvalue = True
except (asyncio.TimeoutError,
aiohttp.ClientError, socket.gaierror) as error:
_LOGGER.error('Error connecting to GHLocalApi, %s', error)
return returnvalue | <ast.AsyncFunctionDef object at 0x7da20e955750> | keyword[async] keyword[def] identifier[reboot] ( identifier[self] ):
literal[string]
identifier[endpoint] = literal[string]
identifier[url] = identifier[API] . identifier[format] ( identifier[ip] = identifier[self] . identifier[_ipaddress] , identifier[endpoint] = identifier[endpoint] )
identifier[data] ={ literal[string] : literal[string] }
identifier[returnvalue] = keyword[False]
keyword[try] :
keyword[async] keyword[with] identifier[async_timeout] . identifier[timeout] ( literal[int] , identifier[loop] = identifier[self] . identifier[_loop] ):
identifier[result] = keyword[await] identifier[self] . identifier[_session] . identifier[post] ( identifier[url] , identifier[json] = identifier[data] ,
identifier[headers] = identifier[HEADERS] )
keyword[if] identifier[result] . identifier[status] == literal[int] :
identifier[returnvalue] = keyword[True]
keyword[except] ( identifier[asyncio] . identifier[TimeoutError] ,
identifier[aiohttp] . identifier[ClientError] , identifier[socket] . identifier[gaierror] ) keyword[as] identifier[error] :
identifier[_LOGGER] . identifier[error] ( literal[string] , identifier[error] )
keyword[return] identifier[returnvalue] | async def reboot(self):
"""Reboot the device."""
endpoint = '/setup/reboot'
url = API.format(ip=self._ipaddress, endpoint=endpoint)
data = {'params': 'now'}
returnvalue = False
try:
async with async_timeout.timeout(5, loop=self._loop):
result = await self._session.post(url, json=data, headers=HEADERS)
if result.status == 200:
returnvalue = True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror) as error:
_LOGGER.error('Error connecting to GHLocalApi, %s', error) # depends on [control=['except'], data=['error']]
return returnvalue |
def parse(self, configManager, config):
"""
Parse configuration options out of a YAML configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
"""
configFile = self._getConfigFile(config)
if not configFile:
return dict()
yamlConfigs = yaml.load(configFile)
if isinstance(yamlConfigs, dict):
return yamlConfigs
raise self.subparserException("YAML config parsed did not result in a dictionary, but instead a: %s"
% type(yamlConfigs)) | def function[parse, parameter[self, configManager, config]]:
constant[
Parse configuration options out of a YAML configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
]
variable[configFile] assign[=] call[name[self]._getConfigFile, parameter[name[config]]]
if <ast.UnaryOp object at 0x7da18bcc8f10> begin[:]
return[call[name[dict], parameter[]]]
variable[yamlConfigs] assign[=] call[name[yaml].load, parameter[name[configFile]]]
if call[name[isinstance], parameter[name[yamlConfigs], name[dict]]] begin[:]
return[name[yamlConfigs]]
<ast.Raise object at 0x7da18bcc8dc0> | keyword[def] identifier[parse] ( identifier[self] , identifier[configManager] , identifier[config] ):
literal[string]
identifier[configFile] = identifier[self] . identifier[_getConfigFile] ( identifier[config] )
keyword[if] keyword[not] identifier[configFile] :
keyword[return] identifier[dict] ()
identifier[yamlConfigs] = identifier[yaml] . identifier[load] ( identifier[configFile] )
keyword[if] identifier[isinstance] ( identifier[yamlConfigs] , identifier[dict] ):
keyword[return] identifier[yamlConfigs]
keyword[raise] identifier[self] . identifier[subparserException] ( literal[string]
% identifier[type] ( identifier[yamlConfigs] )) | def parse(self, configManager, config):
"""
Parse configuration options out of a YAML configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
"""
configFile = self._getConfigFile(config)
if not configFile:
return dict() # depends on [control=['if'], data=[]]
yamlConfigs = yaml.load(configFile)
if isinstance(yamlConfigs, dict):
return yamlConfigs # depends on [control=['if'], data=[]]
raise self.subparserException('YAML config parsed did not result in a dictionary, but instead a: %s' % type(yamlConfigs)) |
def lookup(self, short_url):
'''
Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url
:param short_url: the url shortened with .gd service
:type short_url: str.
:returns: str. -- The original url that was shortened with .gd service
:raises: **IOError** when timeout with .gd service occurs
**ValueError** if .gd response is malformed
:class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed
:class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service
:class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service
:class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance)
'''
if short_url is None or not isinstance(short_url, basestring) or len(short_url.strip()) == 0:
raise GDMalformedURLError('The shortened URL must be a non empty string')
# Build data for porst
data = {
'format': 'json',
'shorturl': short_url
}
opener = urllib2.build_opener()
headers = { 'User-Agent' : self._user_agent }
req = urllib2.Request("{0}/forward.php".format(self.shortener_url), urllib.urlencode(data), headers)
f_desc = opener.open(req, timeout = self._timeout)
response = json.loads(f_desc.read())
if 'url' in response:
# Success!
return HTMLParser.HTMLParser().unescape(urllib.unquote(response['url']))
else:
# Error
error_code = int(response['errorcode'])
error_description = str(response['errormessage'])
if error_code == 1:
raise GDMalformedURLError(error_description)
if error_code == 2:
raise GDShortURLError(error_description)
if error_code == 3:
raise GDRateLimitError(error_description)
if error_code == 4:
raise GDGenericError(error_description) | def function[lookup, parameter[self, short_url]]:
constant[
Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url
:param short_url: the url shortened with .gd service
:type short_url: str.
:returns: str. -- The original url that was shortened with .gd service
:raises: **IOError** when timeout with .gd service occurs
**ValueError** if .gd response is malformed
:class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed
:class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service
:class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service
:class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance)
]
if <ast.BoolOp object at 0x7da1b2492e60> begin[:]
<ast.Raise object at 0x7da1b24937f0>
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b2493430>, <ast.Constant object at 0x7da1b2492740>], [<ast.Constant object at 0x7da1b2490d60>, <ast.Name object at 0x7da1b2492ad0>]]
variable[opener] assign[=] call[name[urllib2].build_opener, parameter[]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b2492410>], [<ast.Attribute object at 0x7da1b2493640>]]
variable[req] assign[=] call[name[urllib2].Request, parameter[call[constant[{0}/forward.php].format, parameter[name[self].shortener_url]], call[name[urllib].urlencode, parameter[name[data]]], name[headers]]]
variable[f_desc] assign[=] call[name[opener].open, parameter[name[req]]]
variable[response] assign[=] call[name[json].loads, parameter[call[name[f_desc].read, parameter[]]]]
if compare[constant[url] in name[response]] begin[:]
return[call[call[name[HTMLParser].HTMLParser, parameter[]].unescape, parameter[call[name[urllib].unquote, parameter[call[name[response]][constant[url]]]]]]] | keyword[def] identifier[lookup] ( identifier[self] , identifier[short_url] ):
literal[string]
keyword[if] identifier[short_url] keyword[is] keyword[None] keyword[or] keyword[not] identifier[isinstance] ( identifier[short_url] , identifier[basestring] ) keyword[or] identifier[len] ( identifier[short_url] . identifier[strip] ())== literal[int] :
keyword[raise] identifier[GDMalformedURLError] ( literal[string] )
identifier[data] ={
literal[string] : literal[string] ,
literal[string] : identifier[short_url]
}
identifier[opener] = identifier[urllib2] . identifier[build_opener] ()
identifier[headers] ={ literal[string] : identifier[self] . identifier[_user_agent] }
identifier[req] = identifier[urllib2] . identifier[Request] ( literal[string] . identifier[format] ( identifier[self] . identifier[shortener_url] ), identifier[urllib] . identifier[urlencode] ( identifier[data] ), identifier[headers] )
identifier[f_desc] = identifier[opener] . identifier[open] ( identifier[req] , identifier[timeout] = identifier[self] . identifier[_timeout] )
identifier[response] = identifier[json] . identifier[loads] ( identifier[f_desc] . identifier[read] ())
keyword[if] literal[string] keyword[in] identifier[response] :
keyword[return] identifier[HTMLParser] . identifier[HTMLParser] (). identifier[unescape] ( identifier[urllib] . identifier[unquote] ( identifier[response] [ literal[string] ]))
keyword[else] :
identifier[error_code] = identifier[int] ( identifier[response] [ literal[string] ])
identifier[error_description] = identifier[str] ( identifier[response] [ literal[string] ])
keyword[if] identifier[error_code] == literal[int] :
keyword[raise] identifier[GDMalformedURLError] ( identifier[error_description] )
keyword[if] identifier[error_code] == literal[int] :
keyword[raise] identifier[GDShortURLError] ( identifier[error_description] )
keyword[if] identifier[error_code] == literal[int] :
keyword[raise] identifier[GDRateLimitError] ( identifier[error_description] )
keyword[if] identifier[error_code] == literal[int] :
keyword[raise] identifier[GDGenericError] ( identifier[error_description] ) | def lookup(self, short_url):
"""
Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url
:param short_url: the url shortened with .gd service
:type short_url: str.
:returns: str. -- The original url that was shortened with .gd service
:raises: **IOError** when timeout with .gd service occurs
**ValueError** if .gd response is malformed
:class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed
:class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service
:class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service
:class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance)
"""
if short_url is None or not isinstance(short_url, basestring) or len(short_url.strip()) == 0:
raise GDMalformedURLError('The shortened URL must be a non empty string') # depends on [control=['if'], data=[]]
# Build data for porst
data = {'format': 'json', 'shorturl': short_url}
opener = urllib2.build_opener()
headers = {'User-Agent': self._user_agent}
req = urllib2.Request('{0}/forward.php'.format(self.shortener_url), urllib.urlencode(data), headers)
f_desc = opener.open(req, timeout=self._timeout)
response = json.loads(f_desc.read())
if 'url' in response:
# Success!
return HTMLParser.HTMLParser().unescape(urllib.unquote(response['url'])) # depends on [control=['if'], data=['response']]
else:
# Error
error_code = int(response['errorcode'])
error_description = str(response['errormessage'])
if error_code == 1:
raise GDMalformedURLError(error_description) # depends on [control=['if'], data=[]]
if error_code == 2:
raise GDShortURLError(error_description) # depends on [control=['if'], data=[]]
if error_code == 3:
raise GDRateLimitError(error_description) # depends on [control=['if'], data=[]]
if error_code == 4:
raise GDGenericError(error_description) # depends on [control=['if'], data=[]] |
def consume_service(service_agreement_id, service_endpoint, account, files,
destination_folder, index=None):
"""
Call the brizo endpoint to get access to the different files that form the asset.
:param service_agreement_id: Service Agreement Id, str
:param service_endpoint: Url to consume, str
:param account: Account instance of the consumer signing this agreement, hex-str
:param files: List containing the files to be consumed, list
:param index: Index of the document that is going to be downloaded, int
:param destination_folder: Path, str
:return: True if was downloaded, bool
"""
signature = Keeper.get_instance().sign_hash(service_agreement_id, account)
if index is not None:
assert isinstance(index, int), logger.error('index has to be an integer.')
assert index >= 0, logger.error('index has to be 0 or a positive integer.')
assert index < len(files), logger.error(
'index can not be bigger than the number of files')
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id, account,
None, signature, index)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name)
else:
for i, _file in enumerate(files):
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id,
account, _file,
signature, i)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name) | def function[consume_service, parameter[service_agreement_id, service_endpoint, account, files, destination_folder, index]]:
constant[
Call the brizo endpoint to get access to the different files that form the asset.
:param service_agreement_id: Service Agreement Id, str
:param service_endpoint: Url to consume, str
:param account: Account instance of the consumer signing this agreement, hex-str
:param files: List containing the files to be consumed, list
:param index: Index of the document that is going to be downloaded, int
:param destination_folder: Path, str
:return: True if was downloaded, bool
]
variable[signature] assign[=] call[call[name[Keeper].get_instance, parameter[]].sign_hash, parameter[name[service_agreement_id], name[account]]]
if compare[name[index] is_not constant[None]] begin[:]
assert[call[name[isinstance], parameter[name[index], name[int]]]]
assert[compare[name[index] greater_or_equal[>=] constant[0]]]
assert[compare[name[index] less[<] call[name[len], parameter[name[files]]]]]
variable[consume_url] assign[=] call[name[Brizo]._create_consume_url, parameter[name[service_endpoint], name[service_agreement_id], name[account], constant[None], name[signature], name[index]]]
call[name[logger].info, parameter[<ast.JoinedStr object at 0x7da20e9b12a0>]]
variable[response] assign[=] call[name[Brizo]._http_client.get, parameter[name[consume_url]]]
variable[file_name] assign[=] call[name[Brizo]._get_file_name, parameter[name[response]]]
call[name[Brizo].write_file, parameter[name[response], name[destination_folder], name[file_name]]] | keyword[def] identifier[consume_service] ( identifier[service_agreement_id] , identifier[service_endpoint] , identifier[account] , identifier[files] ,
identifier[destination_folder] , identifier[index] = keyword[None] ):
literal[string]
identifier[signature] = identifier[Keeper] . identifier[get_instance] (). identifier[sign_hash] ( identifier[service_agreement_id] , identifier[account] )
keyword[if] identifier[index] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[isinstance] ( identifier[index] , identifier[int] ), identifier[logger] . identifier[error] ( literal[string] )
keyword[assert] identifier[index] >= literal[int] , identifier[logger] . identifier[error] ( literal[string] )
keyword[assert] identifier[index] < identifier[len] ( identifier[files] ), identifier[logger] . identifier[error] (
literal[string] )
identifier[consume_url] = identifier[Brizo] . identifier[_create_consume_url] ( identifier[service_endpoint] , identifier[service_agreement_id] , identifier[account] ,
keyword[None] , identifier[signature] , identifier[index] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[response] = identifier[Brizo] . identifier[_http_client] . identifier[get] ( identifier[consume_url] , identifier[stream] = keyword[True] )
identifier[file_name] = identifier[Brizo] . identifier[_get_file_name] ( identifier[response] )
identifier[Brizo] . identifier[write_file] ( identifier[response] , identifier[destination_folder] , identifier[file_name] )
keyword[else] :
keyword[for] identifier[i] , identifier[_file] keyword[in] identifier[enumerate] ( identifier[files] ):
identifier[consume_url] = identifier[Brizo] . identifier[_create_consume_url] ( identifier[service_endpoint] , identifier[service_agreement_id] ,
identifier[account] , identifier[_file] ,
identifier[signature] , identifier[i] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[response] = identifier[Brizo] . identifier[_http_client] . identifier[get] ( identifier[consume_url] , identifier[stream] = keyword[True] )
identifier[file_name] = identifier[Brizo] . identifier[_get_file_name] ( identifier[response] )
identifier[Brizo] . identifier[write_file] ( identifier[response] , identifier[destination_folder] , identifier[file_name] ) | def consume_service(service_agreement_id, service_endpoint, account, files, destination_folder, index=None):
"""
Call the brizo endpoint to get access to the different files that form the asset.
:param service_agreement_id: Service Agreement Id, str
:param service_endpoint: Url to consume, str
:param account: Account instance of the consumer signing this agreement, hex-str
:param files: List containing the files to be consumed, list
:param index: Index of the document that is going to be downloaded, int
:param destination_folder: Path, str
:return: True if was downloaded, bool
"""
signature = Keeper.get_instance().sign_hash(service_agreement_id, account)
if index is not None:
assert isinstance(index, int), logger.error('index has to be an integer.')
assert index >= 0, logger.error('index has to be 0 or a positive integer.')
assert index < len(files), logger.error('index can not be bigger than the number of files')
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id, account, None, signature, index)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name) # depends on [control=['if'], data=['index']]
else:
for (i, _file) in enumerate(files):
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id, account, _file, signature, i)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name) # depends on [control=['for'], data=[]] |
def running(name,
image=None,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
force=False,
watch_action='force',
start=True,
shutdown_timeout=None,
client_timeout=salt.utils.docker.CLIENT_TIMEOUT,
networks=None,
**kwargs):
'''
Ensure that a container with a specific configuration is present and
running
name
Name of the container
image
Image to use for the container
.. note::
This state will pull the image if it is not present. However, if
the image needs to be built from a Dockerfile or loaded from a
saved image, or if you would like to use requisites to trigger a
replacement of the container when the image is updated, then the
:py:func:`docker_image.present
<salt.states.dockermod.image_present>` state should be used to
manage the image.
.. versionchanged:: 2018.3.0
If no tag is specified in the image name, and nothing matching the
specified image is pulled on the minion, the ``docker pull`` that
retrieves the image will pull *all tags* for the image. A tag of
``latest`` is no longer implicit for the pull. For this reason, it
is recommended to specify the image in ``repo:tag`` notation.
.. _docker-container-running-skip-translate:
skip_translate
This function translates Salt CLI or SLS input into the format which
docker-py_ expects. However, in the event that Salt's translation logic
fails (due to potential changes in the Docker Remote API, or to bugs in
the translation code), this argument can be used to exert granular
control over which arguments are translated and which are not.
Pass this argument as a comma-separated list (or Python list) of
arguments, and translation for each passed argument name will be
skipped. Alternatively, pass ``True`` and *all* translation will be
skipped.
Skipping tranlsation allows for arguments to be formatted directly in
the format which docker-py_ expects. This allows for API changes and
other issues to be more easily worked around. An example of using this
option to skip translation would be:
For example, imagine that there is an issue with processing the
``port_bindings`` argument, and the following configuration no longer
works as expected:
.. code-block:: yaml
mycontainer:
docker_container.running:
- image: 7.3.1611
- port_bindings:
- 10.2.9.10:8080:80
By using ``skip_translate``, you can forego the input translation and
configure the port binding in the format docker-py_ needs:
.. code-block:: yaml
mycontainer:
docker_container.running:
- image: 7.3.1611
- skip_translate: port_bindings
- port_bindings: {8080: [('10.2.9.10', 80)], '4193/udp': 9314}
See the following links for more information:
- `docker-py Low-level API`_
- `Docker Engine API`_
.. _docker-py: https://pypi.python.org/pypi/docker-py
.. _`docker-py Low-level API`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container
.. _`Docker Engine API`: https://docs.docker.com/engine/api/v1.33/#operation/ContainerCreate
ignore_collisions : False
Since many of docker-py_'s arguments differ in name from their CLI
counterparts (with which most Docker users are more familiar), Salt
detects usage of these and aliases them to the docker-py_ version of
that argument so that both CLI and API versions of a given argument are
supported. However, if both the alias and the docker-py_ version of the
same argument (e.g. ``env`` and ``environment``) are used, an error
will be raised. Set this argument to ``True`` to suppress these errors
and keep the docker-py_ version of the argument.
validate_ip_addrs : True
For parameters which accept IP addresses as input, IP address
validation will be performed. To disable, set this to ``False``
force : False
Set this parameter to ``True`` to force Salt to re-create the container
irrespective of whether or not it is configured as desired.
watch_action : force
Control what type of action is taken when this state :ref:`watches
<requisites-watch>` another state that has changes. The default action
is ``force``, which runs the state with ``force`` set to ``True``,
triggering a rebuild of the container.
If any other value is passed, it will be assumed to be a kill signal.
If the container matches the specified configuration, and is running,
then the action will be to send that signal to the container. Kill
signals can be either strings or numbers, and are defined in the
**Standard Signals** section of the ``signal(7)`` manpage. Run ``man 7
signal`` on a Linux host to browse this manpage. For example:
.. code-block:: yaml
mycontainer:
docker_container.running:
- image: busybox
- watch_action: SIGHUP
- watch:
- file: some_file
.. note::
If the container differs from the specified configuration, or is
not running, then instead of sending a signal to the container, the
container will be re-created/started and no signal will be sent.
start : True
Set to ``False`` to suppress starting of the container if it exists,
matches the desired configuration, but is not running. This is useful
for data-only containers, or for non-daemonized container processes,
such as the Django ``migrate`` and ``collectstatic`` commands. In
instances such as this, the container only needs to be started the
first time.
shutdown_timeout
If the container needs to be replaced, the container will be stopped
using :py:func:`docker.stop <salt.modules.dockermod.stop>`. If a
``shutdown_timout`` is not set, and the container was created using
``stop_timeout``, that timeout will be used. If neither of these values
were set, then a timeout of 10 seconds will be used.
.. versionchanged:: 2017.7.0
This option was renamed from ``stop_timeout`` to
``shutdown_timeout`` to accommodate the ``stop_timeout`` container
configuration setting.
client_timeout : 60
Timeout in seconds for the Docker client. This is not a timeout for
this function, but for receiving a response from the API.
.. note::
This is only used if Salt needs to pull the requested image.
.. _salt-states-docker-container-network-management:
**NETWORK MANAGEMENT**
.. versionadded:: 2018.3.0
.. versionchanged:: 2019.2.0
If the ``networks`` option is used, any networks (including the default
``bridge`` network) which are not specified will be disconnected.
The ``networks`` argument can be used to ensure that a container is
attached to one or more networks. Optionally, arguments can be passed to
the networks. In the example below, ``net1`` is being configured with
arguments, while ``net2`` and ``bridge`` are being configured *without*
arguments:
.. code-block:: yaml
foo:
docker_container.running:
- image: myuser/myimage:foo
- networks:
- net1:
- aliases:
- bar
- baz
- ipv4_address: 10.0.20.50
- net2
- bridge
- require:
- docker_network: net1
- docker_network: net2
The supported arguments are the ones from the docker-py's
`connect_container_to_network`_ function (other than ``container`` and
``net_id``).
.. important::
Unlike with the arguments described in the **CONTAINER CONFIGURATION
PARAMETERS** section below, these network configuration parameters are
not translated at all. Consult the `connect_container_to_network`_
documentation for the correct type/format of data to pass.
.. _`connect_container_to_network`: https://docker-py.readthedocs.io/en/stable/api.html#docker.api.network.NetworkApiMixin.connect_container_to_network
To start a container with no network connectivity (only possible in
2019.2.0 and later) pass this option as an empty list. For example:
.. code-block:: yaml
foo:
docker_container.running:
- image: myuser/myimage:foo
- networks: []
**CONTAINER CONFIGURATION PARAMETERS**
auto_remove (or *rm*) : False
Enable auto-removal of the container on daemon side when the
container’s process exits (analogous to running a docker container with
``--rm`` on the CLI).
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- auto_remove: True
binds
Files/directories to bind mount. Each bind mount should be passed in
one of the following formats:
- ``<host_path>:<container_path>`` - ``host_path`` is mounted within
the container as ``container_path`` with read-write access.
- ``<host_path>:<container_path>:<selinux_context>`` - ``host_path`` is
mounted within the container as ``container_path`` with read-write
access. Additionally, the specified selinux context will be set
within the container.
- ``<host_path>:<container_path>:<read_only>`` - ``host_path`` is
mounted within the container as ``container_path``, with the
read-only or read-write setting explicitly defined.
- ``<host_path>:<container_path>:<read_only>,<selinux_context>`` -
``host_path`` is mounted within the container as ``container_path``,
with the read-only or read-write setting explicitly defined.
Additionally, the specified selinux context will be set within the
container.
``<read_only>`` can be either ``rw`` for read-write access, or ``ro``
for read-only access. When omitted, it is assumed to be read-write.
``<selinux_context>`` can be ``z`` if the volume is shared between
multiple containers, or ``Z`` if the volume should be private.
.. note::
When both ``<read_only>`` and ``<selinux_context>`` are specified,
there must be a comma before ``<selinux_context>``.
Binds can be expressed as a comma-separated list or a YAML list. The
below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds: /srv/www:/var/www:ro,/etc/foo.conf:/usr/local/etc/foo.conf:rw
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds:
- /srv/www:/var/www:ro
- /home/myuser/conf/foo.conf:/etc/foo.conf:rw
However, in cases where both ro/rw and an selinux context are combined,
the only option is to use a YAML list, like so:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds:
- /srv/www:/var/www:ro,Z
- /home/myuser/conf/foo.conf:/etc/foo.conf:rw,Z
Since the second bind in the previous example is mounted read-write,
the ``rw`` and comma can be dropped. For example:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds:
- /srv/www:/var/www:ro,Z
- /home/myuser/conf/foo.conf:/etc/foo.conf:Z
blkio_weight
Block IO weight (relative weight), accepts a weight value between 10
and 1000.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- blkio_weight: 100
blkio_weight_device
Block IO weight (relative device weight), specified as a list of
expressions in the format ``PATH:RATE``
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- blkio_weight_device: /dev/sda:100
cap_add
List of capabilities to add within the container. Can be expressed as a
comma-separated list or a Python list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_add: SYS_ADMIN,MKNOD
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_add:
- SYS_ADMIN
- MKNOD
.. note::
This option requires Docker 1.2.0 or newer.
cap_drop
List of capabilities to drop within the container. Can be expressed as
a comma-separated list or a Python list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_drop: SYS_ADMIN,MKNOD
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_drop:
- SYS_ADMIN
- MKNOD
.. note::
This option requires Docker 1.2.0 or newer.
command (or *cmd*)
Command to run in the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- command: bash
cpuset_cpus (or *cpuset*)
CPUs on which which to allow execution, specified as a string
containing a range (e.g. ``0-3``) or a comma-separated list of CPUs
(e.g. ``0,1``).
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpuset_cpus: "0,1"
cpuset_mems
Memory nodes on which which to allow execution, specified as a string
containing a range (e.g. ``0-3``) or a comma-separated list of MEMs
(e.g. ``0,1``). Only effective on NUMA systems.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpuset_mems: "0,1"
cpu_group
The length of a CPU period in microseconds
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpu_group: 100000
cpu_period
Microseconds of CPU time that the container can get in a CPU period
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpu_period: 50000
cpu_shares
CPU shares (relative weight), specified as an integer between 2 and 1024.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpu_shares: 512
detach : False
If ``True``, run the container's command in the background (daemon
mode)
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- detach: True
devices
List of host devices to expose within the container. Can be expressed
as a comma-separated list or a YAML list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices: /dev/net/tun,/dev/xvda1:/dev/xvda1,/dev/xvdb1:/dev/xvdb1:r
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices:
- /dev/net/tun
- /dev/xvda1:/dev/xvda1
- /dev/xvdb1:/dev/xvdb1:r
device_read_bps
Limit read rate (bytes per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is either an
integer number of bytes, or a string ending in ``kb``, ``mb``, or
``gb``. Can be expressed as a comma-separated list or a YAML list. The
below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_bps: /dev/sda:1mb,/dev/sdb:5mb
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_bps:
- /dev/sda:1mb
- /dev/sdb:5mb
device_read_iops
Limit read rate (I/O per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is a number
of I/O operations. Can be expressed as a comma-separated list or a YAML
list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops: /dev/sda:1000,/dev/sdb:500
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops:
- /dev/sda:1000
- /dev/sdb:500
device_write_bps
Limit write rate (bytes per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is either an
integer number of bytes, or a string ending in ``kb``, ``mb``, or
``gb``. Can be expressed as a comma-separated list or a YAML list. The
below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_write_bps: /dev/sda:1mb,/dev/sdb:5mb
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_write_bps:
- /dev/sda:1mb
- /dev/sdb:5mb
device_read_iops
Limit write rate (I/O per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is a number
of I/O operations. Can be expressed as a comma-separated list or a
YAML list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops: /dev/sda:1000,/dev/sdb:500
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops:
- /dev/sda:1000
- /dev/sdb:500
dns
List of DNS nameservers. Can be expressed as a comma-separated list or
a YAML list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns: 8.8.8.8,8.8.4.4
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns:
- 8.8.8.8
- 8.8.4.4
.. note::
To skip IP address validation, use ``validate_ip_addrs=False``
dns_opt
Additional options to be added to the container’s ``resolv.conf`` file.
Can be expressed as a comma-separated list or a YAML list. The below
two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_opt: ndots:9
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_opt:
- ndots:9
dns_search
List of DNS search domains. Can be expressed as a comma-separated list
or a YAML list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_search: foo1.domain.tld,foo2.domain.tld
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_search:
- foo1.domain.tld
- foo2.domain.tld
domainname
The domain name to use for the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dommainname: domain.tld
entrypoint
Entrypoint for the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- entrypoint: "mycmd --arg1 --arg2"
This argument can also be specified as a list:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- entrypoint:
- mycmd
- --arg1
- --arg2
environment
Either a list of variable/value mappings, or a list of strings in the
format ``VARNAME=value``. The below three examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- environment:
- VAR1: value
- VAR2: value
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- environment: 'VAR1=value,VAR2=value'
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- environment:
- VAR1=value
- VAR2=value
extra_hosts
Additional hosts to add to the container's /etc/hosts file. Can be
expressed as a comma-separated list or a Python list. The below two
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- extra_hosts: web1:10.9.8.7,web2:10.9.8.8
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- extra_hosts:
- web1:10.9.8.7
- web2:10.9.8.8
.. note::
To skip IP address validation, use ``validate_ip_addrs=False``
.. note::
This option requires Docker 1.3.0 or newer.
group_add
List of additional group names and/or IDs that the container process
will run as. Can be expressed as a comma-separated list or a YAML list.
The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- group_add: web,network
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- group_add:
- web
- network
hostname
Hostname of the container. If not provided, the value passed as the
container's``name`` will be used for the hostname.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- hostname: web1
.. warning::
``hostname`` cannot be set if ``network_mode`` is set to ``host``.
The below example will result in an error:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- hostname: web1
- network_mode: host
interactive (or *stdin_open*) : False
Leave stdin open, even if not attached
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- interactive: True
ipc_mode (or *ipc*)
Set the IPC mode for the container. The default behavior is to create a
private IPC namespace for the container, but this option can be
used to change that behavior:
- ``container:<container_name_or_id>`` reuses another container shared
memory, semaphores and message queues
- ``host``: use the host's shared memory, semaphores and message queues
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ipc_mode: container:foo
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ipc_mode: host
.. warning::
Using ``host`` gives the container full access to local shared
memory and is therefore considered insecure.
isolation
Specifies the type of isolation technology used by containers
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- isolation: hyperv
.. note::
The default value on Windows server is ``process``, while the
default value on Windows client is ``hyperv``. On Linux, only
``default`` is supported.
labels
Add metadata to the container. Labels can be set both with and without
values, and labels with values can be passed either as ``key=value`` or
``key: value`` pairs. For example, while the below would be very
confusing to read, it is technically valid, and demonstrates the
different ways in which labels can be passed:
.. code-block:: yaml
mynet:
docker_network.present:
- labels:
- foo
- bar=baz
- hello: world
The labels can also simply be passed as a YAML dictionary, though this
can be error-prone due to some :ref:`idiosyncrasies
<yaml-idiosyncrasies>` with how PyYAML loads nested data structures:
.. code-block:: yaml
foo:
docker_network.present:
- labels:
foo: ''
bar: baz
hello: world
.. versionchanged:: 2018.3.0
Methods for specifying labels can now be mixed. Earlier releases
required either labels with or without values.
links
Link this container to another. Links can be specified as a list of
mappings or a comma-separated or Python list of expressions in the
format ``<container_name_or_id>:<link_alias>``. The below three
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- links:
- web1: link1
- web2: link2
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- links: web1:link1,web2:link2
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- links:
- web1:link1
- web2:link2
log_driver and log_opt
Set container's logging driver and options to configure that driver.
Requires Docker 1.6 or newer.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- log_driver: syslog
- log_opt:
- syslog-address: tcp://192.168.0.42
- syslog-facility: daemon
The ``log_opt`` can also be expressed as a comma-separated or YAML list
of ``key=value`` pairs. The below two examples are equivalent to the
above one:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- log_driver: syslog
- log_opt: "syslog-address=tcp://192.168.0.42,syslog-facility=daemon"
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- log_driver: syslog
- log_opt:
- syslog-address=tcp://192.168.0.42
- syslog-facility=daemon
.. note::
The logging driver feature was improved in Docker 1.13 introducing
option name changes. Please see Docker's
`Configure logging drivers`_ documentation for more information.
.. _`Configure logging drivers`: https://docs.docker.com/engine/admin/logging/overview/
lxc_conf
Additional LXC configuration parameters to set before starting the
container. Either a list of variable/value mappings, or a list of
strings in the format ``VARNAME=value``. The below three examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- lxc_conf:
- lxc.utsname: docker
- lxc.arch: x86_64
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- lxc_conf: lxc.utsname=docker,lxc.arch=x86_64
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- lxc_conf:
- lxc.utsname=docker
- lxc.arch=x86_64
.. note::
These LXC configuration parameters will only have the desired
effect if the container is using the LXC execution driver, which
has been deprecated for some time.
mac_address
MAC address to use for the container. If not specified, a random MAC
address will be used.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- mac_address: 01:23:45:67:89:0a
mem_limit (or *memory*) : 0
Memory limit. Can be specified in bytes or using single-letter units
(i.e. ``512M``, ``2G``, etc.). A value of ``0`` (the default) means no
memory limit.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- mem_limit: 512M
mem_swappiness
Tune a container's memory swappiness behavior. Accepts an integer
between 0 and 100.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- mem_swappiness: 60
memswap_limit (or *memory_swap*) : -1
Total memory limit (memory plus swap). Set to ``-1`` to disable swap. A
value of ``0`` means no swap limit.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- memswap_limit: 1G
network_disabled : False
If ``True``, networking will be disabled within the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- network_disabled: True
network_mode : bridge
One of the following:
- ``bridge`` - Creates a new network stack for the container on the
docker bridge
- ``none`` - No networking (equivalent of the Docker CLI argument
``--net=none``). Not to be confused with Python's ``None``.
- ``container:<name_or_id>`` - Reuses another container's network stack
- ``host`` - Use the host's network stack inside the container
- Any name that identifies an existing network that might be created
with ``docker.network_present``.
.. warning::
Using ``host`` mode gives the container full access to the
hosts system's services (such as D-bus), and is therefore
considered insecure.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- network_mode: "none"
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- network_mode: container:web1
oom_kill_disable
Whether to disable OOM killer
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- oom_kill_disable: False
oom_score_adj
An integer value containing the score given to the container in order
to tune OOM killer preferences
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- oom_score_adj: 500
pid_mode
Set to ``host`` to use the host container's PID namespace within the
container. Requires Docker 1.5.0 or newer.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- pid_mode: host
.. note::
This option requires Docker 1.5.0 or newer.
pids_limit
Set the container's PID limit. Set to ``-1`` for unlimited.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- pids_limit: 2000
port_bindings (or *publish*)
Bind exposed ports. Port bindings should be passed in the same way as
the ``--publish`` argument to the ``docker run`` CLI command:
- ``ip:hostPort:containerPort`` - Bind a specific IP and port on the
host to a specific port within the container.
- ``ip::containerPort`` - Bind a specific IP and an ephemeral port to a
specific port within the container.
- ``hostPort:containerPort`` - Bind a specific port on all of the
host's interfaces to a specific port within the container.
- ``containerPort`` - Bind an ephemeral port on all of the host's
interfaces to a specific port within the container.
Multiple bindings can be separated by commas, or expressed as a YAML
list, and port ranges can be defined using dashes. The below two
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- port_bindings: "4505-4506:14505-14506,2123:2123/udp,8080"
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- port_bindings:
- 4505-4506:14505-14506
- 2123:2123/udp
- 8080
.. note::
When specifying a protocol, it must be passed in the
``containerPort`` value, as seen in the examples above.
ports
A list of ports to expose on the container. Can either be a
comma-separated list or a YAML list. If the protocol is omitted, the
port will be assumed to be a TCP port. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ports: 1111,2222/udp
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ports:
- 1111
- 2222/udp
privileged : False
If ``True``, runs the exec process with extended privileges
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- privileged: True
publish_all_ports (or *publish_all*) : False
Publish all ports to the host
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ports: 8080
- publish_all_ports: True
read_only : False
If ``True``, mount the container’s root filesystem as read only
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- read_only: True
restart_policy (or *restart*)
Set a restart policy for the container. Must be passed as a string in
the format ``policy[:retry_count]`` where ``policy`` is one of
``always``, ``unless-stopped``, or ``on-failure``, and ``retry_count``
is an optional limit to the number of retries. The retry count is ignored
when using the ``always`` or ``unless-stopped`` restart policy.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- restart_policy: on-failure:5
bar:
docker_container.running:
- image: bar/baz:latest
- restart_policy: always
security_opt (or *security_opts*):
Security configuration for MLS systems such as SELinux and AppArmor.
Can be expressed as a comma-separated list or a YAML list. The below
two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- security_opt: apparmor:unconfined
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- security_opt:
- apparmor:unconfined
.. important::
Some security options can contain commas. In these cases, this
argument *must* be passed as a Python list, as splitting by comma
will result in an invalid configuration.
.. note::
See the documentation for security_opt at
https://docs.docker.com/engine/reference/run/#security-configuration
shm_size
Size of /dev/shm
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- shm_size: 128M
stop_signal
Specify the signal docker will send to the container when stopping.
Useful when running systemd as PID 1 inside the container.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- stop_signal: SIGRTMIN+3
.. note::
This option requires Docker 1.9.0 or newer and docker-py 1.7.0 or
newer.
.. versionadded:: 2016.11.0
stop_timeout
Timeout to stop the container, in seconds
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- stop_timeout: 5
.. note::
In releases prior to 2017.7.0, this option was not set in the
container configuration, but rather this timeout was enforced only
when shutting down an existing container to replace it. To remove
the ambiguity, and to allow for the container to have a stop
timeout set for it, the old ``stop_timeout`` argument has been
renamed to ``shutdown_timeout``, while ``stop_timeout`` now refer's
to the container's configured stop timeout.
storage_opt
Storage driver options for the container. Can be either a list of
strings in the format ``option=value``, or a list of mappings between
option and value. The below three examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- storage_opt:
- dm.basesize: 40G
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- storage_opt: dm.basesize=40G
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- storage_opt:
- dm.basesize=40G
sysctls (or *sysctl*)
Set sysctl options for the container. Can be either a list of strings
in the format ``option=value``, or a list of mappings between option
and value. The below three examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- sysctls:
- fs.nr_open: 1048576
- kernel.pid_max: 32768
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- sysctls: fs.nr_open=1048576,kernel.pid_max=32768
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- sysctls:
- fs.nr_open=1048576
- kernel.pid_max=32768
tmpfs
A map of container directories which should be replaced by tmpfs mounts
and their corresponding mount options.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- tmpfs:
- /run: rw,noexec,nosuid,size=65536k
tty : False
Attach TTYs
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- tty: True
ulimits
List of ulimits. These limits should be passed in the format
``<ulimit_name>:<soft_limit>:<hard_limit>``, with the hard limit being
optional. Can be expressed as a comma-separated list or a YAML list.
The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ulimits: nofile=1024:1024,nproc=60
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ulimits:
- nofile=1024:1024
- nproc=60
user
User under which to run exec process
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- user: foo
userns_mode (or *user_ns_mode*)
Sets the user namsepace mode, when the user namespace remapping option
is enabled
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- userns_mode: host
volumes (or *volume*)
List of directories to expose as volumes. Can be expressed as a
comma-separated list or a YAML list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes: /mnt/vol1,/mnt/vol2
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes:
- /mnt/vol1
- /mnt/vol2
volumes_from
Container names or IDs from which the container will get volumes. Can
be expressed as a comma-separated list or a YAML list. The below two
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes_from: foo
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes_from:
- foo
volume_driver
sets the container's volume driver
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volume_driver: foobar
working_dir (or *workdir*)
Working directory inside the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- working_dir: /var/log/nginx
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if image is None:
ret['result'] = False
ret['comment'] = 'The \'image\' argument is required'
return ret
elif not isinstance(image, six.string_types):
image = six.text_type(image)
try:
# Since we're rewriting the "networks" value below, save the original
# value here.
configured_networks = networks
networks = _parse_networks(networks)
if networks:
kwargs['networks'] = networks
image_id = _resolve_image(ret, image, client_timeout)
except CommandExecutionError as exc:
ret['result'] = False
if exc.info is not None:
return _format_comments(ret, exc.info)
else:
ret['comment'] = exc.__str__()
return ret
comments = []
# Pop off the send_signal argument passed by the watch requisite
send_signal = kwargs.pop('send_signal', False)
try:
current_image_id = __salt__['docker.inspect_container'](name)['Image']
except CommandExecutionError:
current_image_id = None
except KeyError:
ret['result'] = False
comments.append(
'Unable to detect current image for container \'{0}\'. '
'This might be due to a change in the Docker API.'.format(name)
)
return _format_comments(ret, comments)
# Shorthand to make the below code more understandable
exists = current_image_id is not None
pre_state = __salt__['docker.state'](name) if exists else None
# If skip_comparison is True, we're definitely going to be using the temp
# container as the new container (because we're forcing the change, or
# because the image IDs differ). If False, we'll need to perform a
# comparison between it and the new container.
skip_comparison = force or not exists or current_image_id != image_id
if skip_comparison and __opts__['test']:
ret['result'] = None
if force:
ret['changes']['forced_update'] = True
elif current_image_id != image_id:
ret['changes']['image'] = {'old': current_image_id, 'new': image_id}
comments.append(
'Container \'{0}\' would be {1}'.format(
name,
'created' if not exists else 'replaced'
)
)
return _format_comments(ret, comments)
# Create temp container (or just create the named container if the
# container does not already exist)
try:
temp_container = __salt__['docker.create'](
image,
name=name if not exists else None,
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
client_timeout=client_timeout,
**kwargs)
temp_container_name = temp_container['Name']
except KeyError as exc:
ret['result'] = False
comments.append(
'Key \'{0}\' missing from API response, this may be due to a '
'change in the Docker Remote API. Please report this on the '
'SaltStack issue tracker if it has not already been reported.'
.format(exc)
)
return _format_comments(ret, comments)
except Exception as exc:
ret['result'] = False
msg = exc.__str__()
if isinstance(exc, CommandExecutionError) \
and isinstance(exc.info, dict) and 'invalid' in exc.info:
msg += (
'\n\nIf you feel this information is incorrect, the '
'skip_translate argument can be used to skip input '
'translation for the argument(s) identified as invalid. See '
'the documentation for details.'
)
comments.append(msg)
return _format_comments(ret, comments)
def _replace(orig, new):
rm_kwargs = {'stop': True}
if shutdown_timeout is not None:
rm_kwargs['timeout'] = shutdown_timeout
ret['changes'].setdefault('container_id', {})['removed'] = \
__salt__['docker.rm'](name, **rm_kwargs)
try:
result = __salt__['docker.rename'](new, orig)
except CommandExecutionError as exc:
result = False
comments.append('Failed to rename temp container: {0}'.format(exc))
if result:
comments.append('Replaced container \'{0}\''.format(orig))
else:
comments.append('Failed to replace container \'{0}\'')
return result
def _delete_temp_container():
log.debug('Removing temp container \'%s\'', temp_container_name)
__salt__['docker.rm'](temp_container_name)
# If we're not skipping the comparison, then the assumption is that
# temp_container will be discarded, unless the comparison reveals
# differences, in which case we'll set cleanup_temp = False to prevent it
# from being cleaned.
cleanup_temp = not skip_comparison
try:
pre_net_connect = __salt__['docker.inspect_container'](
name if exists else temp_container_name)
for net_name, net_conf in six.iteritems(networks):
try:
__salt__['docker.connect_container_to_network'](
temp_container_name,
net_name,
**net_conf)
except CommandExecutionError as exc:
# Shouldn't happen, stopped docker containers can be
# attached to networks even if the static IP lies outside
# of the network's subnet. An exception will be raised once
# you try to start the container, however.
ret['result'] = False
comments.append(exc.__str__())
return _format_comments(ret, comments)
post_net_connect = __salt__['docker.inspect_container'](
temp_container_name)
if configured_networks is not None:
# Use set arithmetic to determine the networks which are connected
# but not explicitly defined. They will be disconnected below. Note
# that we check configured_networks because it represents the
# original (unparsed) network configuration. When no networks
# argument is used, the parsed networks will be an empty list, so
# it's not sufficient to do a boolean check on the "networks"
# variable.
extra_nets = set(
post_net_connect.get('NetworkSettings', {}).get('Networks', {})
) - set(networks)
if extra_nets:
for extra_net in extra_nets:
__salt__['docker.disconnect_container_from_network'](
temp_container_name,
extra_net)
# We've made changes, so we need to inspect the container again
post_net_connect = __salt__['docker.inspect_container'](
temp_container_name)
net_changes = __salt__['docker.compare_container_networks'](
pre_net_connect, post_net_connect)
if not skip_comparison:
container_changes = __salt__['docker.compare_containers'](
name,
temp_container_name,
ignore='Hostname',
)
if container_changes:
if _check_diff(container_changes):
ret.setdefault('warnings', []).append(
'The detected changes may be due to incorrect '
'handling of arguments in earlier Salt releases. If '
'this warning persists after running the state '
'again{0}, and no changes were made to the SLS file, '
'then please report this.'.format(
' without test=True' if __opts__['test'] else ''
)
)
changes_ptr = ret['changes'].setdefault('container', {})
changes_ptr.update(container_changes)
if __opts__['test']:
ret['result'] = None
comments.append(
'Container \'{0}\' would be {1}'.format(
name,
'created' if not exists else 'replaced'
)
)
else:
# We don't want to clean the temp container, we'll be
# replacing the existing one with it.
cleanup_temp = False
# Replace the container
if not _replace(name, temp_container_name):
ret['result'] = False
return _format_comments(ret, comments)
ret['changes'].setdefault('container_id', {})['added'] = \
temp_container['Id']
else:
# No changes between existing container and temp container.
# First check if a requisite is asking to send a signal to the
# existing container.
if send_signal:
if __opts__['test']:
comments.append(
'Signal {0} would be sent to container'.format(
watch_action
)
)
else:
try:
__salt__['docker.signal'](name, signal=watch_action)
except CommandExecutionError as exc:
ret['result'] = False
comments.append(
'Failed to signal container: {0}'.format(exc)
)
return _format_comments(ret, comments)
else:
ret['changes']['signal'] = watch_action
comments.append(
'Sent signal {0} to container'.format(watch_action)
)
elif container_changes:
if not comments:
log.warning(
'docker_container.running: detected changes without '
'a specific comment for container \'%s\'', name
)
comments.append(
'Container \'{0}\'{1} updated.'.format(
name,
' would be' if __opts__['test'] else ''
)
)
else:
# Container was not replaced, no differences between the
# existing container and the temp container were detected,
# and no signal was sent to the container.
comments.append(
'Container \'{0}\' is already configured as specified'
.format(name)
)
if net_changes:
ret['changes'].setdefault('container', {})['Networks'] = net_changes
if __opts__['test']:
ret['result'] = None
comments.append('Network configuration would be updated')
elif cleanup_temp:
# We only need to make network changes if the container
# isn't being replaced, since we would already have
# attached all the networks for purposes of comparison.
network_failure = False
for net_name in sorted(net_changes):
errors = []
disconnected = connected = False
try:
if name in __salt__['docker.connected'](net_name):
__salt__['docker.disconnect_container_from_network'](
name,
net_name)
disconnected = True
except CommandExecutionError as exc:
errors.append(exc.__str__())
if net_name in networks:
try:
__salt__['docker.connect_container_to_network'](
name,
net_name,
**networks[net_name])
connected = True
except CommandExecutionError as exc:
errors.append(exc.__str__())
if disconnected:
# We succeeded in disconnecting but failed
# to reconnect. This can happen if the
# network's subnet has changed and we try
# to reconnect with the same IP address
# from the old subnet.
for item in list(net_changes[net_name]):
if net_changes[net_name][item]['old'] is None:
# Since they'd both be None, just
# delete this key from the changes
del net_changes[net_name][item]
else:
net_changes[net_name][item]['new'] = None
if errors:
comments.extend(errors)
network_failure = True
ret['changes'].setdefault(
'container', {}).setdefault(
'Networks', {})[net_name] = net_changes[net_name]
if disconnected and connected:
comments.append(
'Reconnected to network \'{0}\' with updated '
'configuration'.format(net_name)
)
elif disconnected:
comments.append(
'Disconnected from network \'{0}\''.format(
net_name
)
)
elif connected:
comments.append(
'Connected to network \'{0}\''.format(net_name)
)
if network_failure:
ret['result'] = False
return _format_comments(ret, comments)
finally:
if cleanup_temp:
_delete_temp_container()
if skip_comparison:
if not exists:
comments.append('Created container \'{0}\''.format(name))
else:
if not _replace(name, temp_container):
ret['result'] = False
return _format_comments(ret, comments)
ret['changes'].setdefault('container_id', {})['added'] = \
temp_container['Id']
# "exists" means that a container by the specified name existed prior to
# this state being run
# "not cleanup_temp" means that the temp container became permanent, either
# because the named container did not exist or changes were detected
# "cleanup_temp" means that the container already existed and no changes
# were detected, so the the temp container was discarded
if not cleanup_temp and (not exists or (exists and start)) \
or (start and cleanup_temp and pre_state != 'running'):
if __opts__['test']:
ret['result'] = None
comments.append('Container would be started')
return _format_comments(ret, comments)
else:
try:
post_state = __salt__['docker.start'](name)['state']['new']
except Exception as exc:
ret['result'] = False
comments.append(
'Failed to start container \'{0}\': \'{1}\''.format(name, exc)
)
return _format_comments(ret, comments)
else:
post_state = __salt__['docker.state'](name)
if not __opts__['test'] and post_state == 'running':
# Now that we're certain the container is running, check each modified
# network to see if the network went from static (or disconnected) to
# automatic IP configuration. If so, grab the automatically-assigned
# IPs and munge the changes dict to include them. Note that this can
# only be done after the container is started bceause automatic IPs are
# assigned at runtime.
contextkey = '.'.join((name, 'docker_container.running'))
def _get_nets():
if contextkey not in __context__:
new_container_info = \
__salt__['docker.inspect_container'](name)
__context__[contextkey] = new_container_info.get(
'NetworkSettings', {}).get('Networks', {})
return __context__[contextkey]
autoip_keys = __opts__['docker.compare_container_networks'].get('automatic', [])
for net_name, net_changes in six.iteritems(
ret['changes'].get('container', {}).get('Networks', {})):
if 'IPConfiguration' in net_changes \
and net_changes['IPConfiguration']['new'] == 'automatic':
for key in autoip_keys:
val = _get_nets().get(net_name, {}).get(key)
if val:
net_changes[key] = {'old': None, 'new': val}
try:
net_changes.pop('IPConfiguration')
except KeyError:
pass
__context__.pop(contextkey, None)
if pre_state != post_state:
ret['changes']['state'] = {'old': pre_state, 'new': post_state}
if pre_state is not None:
comments.append(
'State changed from \'{0}\' to \'{1}\''.format(
pre_state, post_state
)
)
if exists and current_image_id != image_id:
comments.append('Container has a new image')
ret['changes']['image'] = {'old': current_image_id, 'new': image_id}
if post_state != 'running' and start:
ret['result'] = False
comments.append('Container is not running')
return _format_comments(ret, comments) | def function[running, parameter[name, image, skip_translate, ignore_collisions, validate_ip_addrs, force, watch_action, start, shutdown_timeout, client_timeout, networks]]:
constant[
Ensure that a container with a specific configuration is present and
running
name
Name of the container
image
Image to use for the container
.. note::
This state will pull the image if it is not present. However, if
the image needs to be built from a Dockerfile or loaded from a
saved image, or if you would like to use requisites to trigger a
replacement of the container when the image is updated, then the
:py:func:`docker_image.present
<salt.states.dockermod.image_present>` state should be used to
manage the image.
.. versionchanged:: 2018.3.0
If no tag is specified in the image name, and nothing matching the
specified image is pulled on the minion, the ``docker pull`` that
retrieves the image will pull *all tags* for the image. A tag of
``latest`` is no longer implicit for the pull. For this reason, it
is recommended to specify the image in ``repo:tag`` notation.
.. _docker-container-running-skip-translate:
skip_translate
This function translates Salt CLI or SLS input into the format which
docker-py_ expects. However, in the event that Salt's translation logic
fails (due to potential changes in the Docker Remote API, or to bugs in
the translation code), this argument can be used to exert granular
control over which arguments are translated and which are not.
Pass this argument as a comma-separated list (or Python list) of
arguments, and translation for each passed argument name will be
skipped. Alternatively, pass ``True`` and *all* translation will be
skipped.
Skipping tranlsation allows for arguments to be formatted directly in
the format which docker-py_ expects. This allows for API changes and
other issues to be more easily worked around. An example of using this
option to skip translation would be:
For example, imagine that there is an issue with processing the
``port_bindings`` argument, and the following configuration no longer
works as expected:
.. code-block:: yaml
mycontainer:
docker_container.running:
- image: 7.3.1611
- port_bindings:
- 10.2.9.10:8080:80
By using ``skip_translate``, you can forego the input translation and
configure the port binding in the format docker-py_ needs:
.. code-block:: yaml
mycontainer:
docker_container.running:
- image: 7.3.1611
- skip_translate: port_bindings
- port_bindings: {8080: [('10.2.9.10', 80)], '4193/udp': 9314}
See the following links for more information:
- `docker-py Low-level API`_
- `Docker Engine API`_
.. _docker-py: https://pypi.python.org/pypi/docker-py
.. _`docker-py Low-level API`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container
.. _`Docker Engine API`: https://docs.docker.com/engine/api/v1.33/#operation/ContainerCreate
ignore_collisions : False
Since many of docker-py_'s arguments differ in name from their CLI
counterparts (with which most Docker users are more familiar), Salt
detects usage of these and aliases them to the docker-py_ version of
that argument so that both CLI and API versions of a given argument are
supported. However, if both the alias and the docker-py_ version of the
same argument (e.g. ``env`` and ``environment``) are used, an error
will be raised. Set this argument to ``True`` to suppress these errors
and keep the docker-py_ version of the argument.
validate_ip_addrs : True
For parameters which accept IP addresses as input, IP address
validation will be performed. To disable, set this to ``False``
force : False
Set this parameter to ``True`` to force Salt to re-create the container
irrespective of whether or not it is configured as desired.
watch_action : force
Control what type of action is taken when this state :ref:`watches
<requisites-watch>` another state that has changes. The default action
is ``force``, which runs the state with ``force`` set to ``True``,
triggering a rebuild of the container.
If any other value is passed, it will be assumed to be a kill signal.
If the container matches the specified configuration, and is running,
then the action will be to send that signal to the container. Kill
signals can be either strings or numbers, and are defined in the
**Standard Signals** section of the ``signal(7)`` manpage. Run ``man 7
signal`` on a Linux host to browse this manpage. For example:
.. code-block:: yaml
mycontainer:
docker_container.running:
- image: busybox
- watch_action: SIGHUP
- watch:
- file: some_file
.. note::
If the container differs from the specified configuration, or is
not running, then instead of sending a signal to the container, the
container will be re-created/started and no signal will be sent.
start : True
Set to ``False`` to suppress starting of the container if it exists,
matches the desired configuration, but is not running. This is useful
for data-only containers, or for non-daemonized container processes,
such as the Django ``migrate`` and ``collectstatic`` commands. In
instances such as this, the container only needs to be started the
first time.
shutdown_timeout
If the container needs to be replaced, the container will be stopped
using :py:func:`docker.stop <salt.modules.dockermod.stop>`. If a
``shutdown_timout`` is not set, and the container was created using
``stop_timeout``, that timeout will be used. If neither of these values
were set, then a timeout of 10 seconds will be used.
.. versionchanged:: 2017.7.0
This option was renamed from ``stop_timeout`` to
``shutdown_timeout`` to accommodate the ``stop_timeout`` container
configuration setting.
client_timeout : 60
Timeout in seconds for the Docker client. This is not a timeout for
this function, but for receiving a response from the API.
.. note::
This is only used if Salt needs to pull the requested image.
.. _salt-states-docker-container-network-management:
**NETWORK MANAGEMENT**
.. versionadded:: 2018.3.0
.. versionchanged:: 2019.2.0
If the ``networks`` option is used, any networks (including the default
``bridge`` network) which are not specified will be disconnected.
The ``networks`` argument can be used to ensure that a container is
attached to one or more networks. Optionally, arguments can be passed to
the networks. In the example below, ``net1`` is being configured with
arguments, while ``net2`` and ``bridge`` are being configured *without*
arguments:
.. code-block:: yaml
foo:
docker_container.running:
- image: myuser/myimage:foo
- networks:
- net1:
- aliases:
- bar
- baz
- ipv4_address: 10.0.20.50
- net2
- bridge
- require:
- docker_network: net1
- docker_network: net2
The supported arguments are the ones from the docker-py's
`connect_container_to_network`_ function (other than ``container`` and
``net_id``).
.. important::
Unlike with the arguments described in the **CONTAINER CONFIGURATION
PARAMETERS** section below, these network configuration parameters are
not translated at all. Consult the `connect_container_to_network`_
documentation for the correct type/format of data to pass.
.. _`connect_container_to_network`: https://docker-py.readthedocs.io/en/stable/api.html#docker.api.network.NetworkApiMixin.connect_container_to_network
To start a container with no network connectivity (only possible in
2019.2.0 and later) pass this option as an empty list. For example:
.. code-block:: yaml
foo:
docker_container.running:
- image: myuser/myimage:foo
- networks: []
**CONTAINER CONFIGURATION PARAMETERS**
auto_remove (or *rm*) : False
Enable auto-removal of the container on daemon side when the
container’s process exits (analogous to running a docker container with
``--rm`` on the CLI).
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- auto_remove: True
binds
Files/directories to bind mount. Each bind mount should be passed in
one of the following formats:
- ``<host_path>:<container_path>`` - ``host_path`` is mounted within
the container as ``container_path`` with read-write access.
- ``<host_path>:<container_path>:<selinux_context>`` - ``host_path`` is
mounted within the container as ``container_path`` with read-write
access. Additionally, the specified selinux context will be set
within the container.
- ``<host_path>:<container_path>:<read_only>`` - ``host_path`` is
mounted within the container as ``container_path``, with the
read-only or read-write setting explicitly defined.
- ``<host_path>:<container_path>:<read_only>,<selinux_context>`` -
``host_path`` is mounted within the container as ``container_path``,
with the read-only or read-write setting explicitly defined.
Additionally, the specified selinux context will be set within the
container.
``<read_only>`` can be either ``rw`` for read-write access, or ``ro``
for read-only access. When omitted, it is assumed to be read-write.
``<selinux_context>`` can be ``z`` if the volume is shared between
multiple containers, or ``Z`` if the volume should be private.
.. note::
When both ``<read_only>`` and ``<selinux_context>`` are specified,
there must be a comma before ``<selinux_context>``.
Binds can be expressed as a comma-separated list or a YAML list. The
below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds: /srv/www:/var/www:ro,/etc/foo.conf:/usr/local/etc/foo.conf:rw
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds:
- /srv/www:/var/www:ro
- /home/myuser/conf/foo.conf:/etc/foo.conf:rw
However, in cases where both ro/rw and an selinux context are combined,
the only option is to use a YAML list, like so:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds:
- /srv/www:/var/www:ro,Z
- /home/myuser/conf/foo.conf:/etc/foo.conf:rw,Z
Since the second bind in the previous example is mounted read-write,
the ``rw`` and comma can be dropped. For example:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds:
- /srv/www:/var/www:ro,Z
- /home/myuser/conf/foo.conf:/etc/foo.conf:Z
blkio_weight
Block IO weight (relative weight), accepts a weight value between 10
and 1000.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- blkio_weight: 100
blkio_weight_device
Block IO weight (relative device weight), specified as a list of
expressions in the format ``PATH:RATE``
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- blkio_weight_device: /dev/sda:100
cap_add
List of capabilities to add within the container. Can be expressed as a
comma-separated list or a Python list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_add: SYS_ADMIN,MKNOD
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_add:
- SYS_ADMIN
- MKNOD
.. note::
This option requires Docker 1.2.0 or newer.
cap_drop
List of capabilities to drop within the container. Can be expressed as
a comma-separated list or a Python list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_drop: SYS_ADMIN,MKNOD
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_drop:
- SYS_ADMIN
- MKNOD
.. note::
This option requires Docker 1.2.0 or newer.
command (or *cmd*)
Command to run in the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- command: bash
cpuset_cpus (or *cpuset*)
CPUs on which which to allow execution, specified as a string
containing a range (e.g. ``0-3``) or a comma-separated list of CPUs
(e.g. ``0,1``).
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpuset_cpus: "0,1"
cpuset_mems
Memory nodes on which which to allow execution, specified as a string
containing a range (e.g. ``0-3``) or a comma-separated list of MEMs
(e.g. ``0,1``). Only effective on NUMA systems.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpuset_mems: "0,1"
cpu_group
The length of a CPU period in microseconds
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpu_group: 100000
cpu_period
Microseconds of CPU time that the container can get in a CPU period
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpu_period: 50000
cpu_shares
CPU shares (relative weight), specified as an integer between 2 and 1024.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpu_shares: 512
detach : False
If ``True``, run the container's command in the background (daemon
mode)
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- detach: True
devices
List of host devices to expose within the container. Can be expressed
as a comma-separated list or a YAML list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices: /dev/net/tun,/dev/xvda1:/dev/xvda1,/dev/xvdb1:/dev/xvdb1:r
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices:
- /dev/net/tun
- /dev/xvda1:/dev/xvda1
- /dev/xvdb1:/dev/xvdb1:r
device_read_bps
Limit read rate (bytes per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is either an
integer number of bytes, or a string ending in ``kb``, ``mb``, or
``gb``. Can be expressed as a comma-separated list or a YAML list. The
below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_bps: /dev/sda:1mb,/dev/sdb:5mb
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_bps:
- /dev/sda:1mb
- /dev/sdb:5mb
device_read_iops
Limit read rate (I/O per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is a number
of I/O operations. Can be expressed as a comma-separated list or a YAML
list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops: /dev/sda:1000,/dev/sdb:500
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops:
- /dev/sda:1000
- /dev/sdb:500
device_write_bps
Limit write rate (bytes per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is either an
integer number of bytes, or a string ending in ``kb``, ``mb``, or
``gb``. Can be expressed as a comma-separated list or a YAML list. The
below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_write_bps: /dev/sda:1mb,/dev/sdb:5mb
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_write_bps:
- /dev/sda:1mb
- /dev/sdb:5mb
device_read_iops
Limit write rate (I/O per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is a number
of I/O operations. Can be expressed as a comma-separated list or a
YAML list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops: /dev/sda:1000,/dev/sdb:500
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops:
- /dev/sda:1000
- /dev/sdb:500
dns
List of DNS nameservers. Can be expressed as a comma-separated list or
a YAML list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns: 8.8.8.8,8.8.4.4
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns:
- 8.8.8.8
- 8.8.4.4
.. note::
To skip IP address validation, use ``validate_ip_addrs=False``
dns_opt
Additional options to be added to the container’s ``resolv.conf`` file.
Can be expressed as a comma-separated list or a YAML list. The below
two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_opt: ndots:9
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_opt:
- ndots:9
dns_search
List of DNS search domains. Can be expressed as a comma-separated list
or a YAML list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_search: foo1.domain.tld,foo2.domain.tld
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_search:
- foo1.domain.tld
- foo2.domain.tld
domainname
The domain name to use for the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dommainname: domain.tld
entrypoint
Entrypoint for the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- entrypoint: "mycmd --arg1 --arg2"
This argument can also be specified as a list:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- entrypoint:
- mycmd
- --arg1
- --arg2
environment
Either a list of variable/value mappings, or a list of strings in the
format ``VARNAME=value``. The below three examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- environment:
- VAR1: value
- VAR2: value
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- environment: 'VAR1=value,VAR2=value'
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- environment:
- VAR1=value
- VAR2=value
extra_hosts
Additional hosts to add to the container's /etc/hosts file. Can be
expressed as a comma-separated list or a Python list. The below two
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- extra_hosts: web1:10.9.8.7,web2:10.9.8.8
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- extra_hosts:
- web1:10.9.8.7
- web2:10.9.8.8
.. note::
To skip IP address validation, use ``validate_ip_addrs=False``
.. note::
This option requires Docker 1.3.0 or newer.
group_add
List of additional group names and/or IDs that the container process
will run as. Can be expressed as a comma-separated list or a YAML list.
The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- group_add: web,network
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- group_add:
- web
- network
hostname
Hostname of the container. If not provided, the value passed as the
container's``name`` will be used for the hostname.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- hostname: web1
.. warning::
``hostname`` cannot be set if ``network_mode`` is set to ``host``.
The below example will result in an error:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- hostname: web1
- network_mode: host
interactive (or *stdin_open*) : False
Leave stdin open, even if not attached
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- interactive: True
ipc_mode (or *ipc*)
Set the IPC mode for the container. The default behavior is to create a
private IPC namespace for the container, but this option can be
used to change that behavior:
- ``container:<container_name_or_id>`` reuses another container shared
memory, semaphores and message queues
- ``host``: use the host's shared memory, semaphores and message queues
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ipc_mode: container:foo
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ipc_mode: host
.. warning::
Using ``host`` gives the container full access to local shared
memory and is therefore considered insecure.
isolation
Specifies the type of isolation technology used by containers
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- isolation: hyperv
.. note::
The default value on Windows server is ``process``, while the
default value on Windows client is ``hyperv``. On Linux, only
``default`` is supported.
labels
Add metadata to the container. Labels can be set both with and without
values, and labels with values can be passed either as ``key=value`` or
``key: value`` pairs. For example, while the below would be very
confusing to read, it is technically valid, and demonstrates the
different ways in which labels can be passed:
.. code-block:: yaml
mynet:
docker_network.present:
- labels:
- foo
- bar=baz
- hello: world
The labels can also simply be passed as a YAML dictionary, though this
can be error-prone due to some :ref:`idiosyncrasies
<yaml-idiosyncrasies>` with how PyYAML loads nested data structures:
.. code-block:: yaml
foo:
docker_network.present:
- labels:
foo: ''
bar: baz
hello: world
.. versionchanged:: 2018.3.0
Methods for specifying labels can now be mixed. Earlier releases
required either labels with or without values.
links
Link this container to another. Links can be specified as a list of
mappings or a comma-separated or Python list of expressions in the
format ``<container_name_or_id>:<link_alias>``. The below three
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- links:
- web1: link1
- web2: link2
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- links: web1:link1,web2:link2
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- links:
- web1:link1
- web2:link2
log_driver and log_opt
Set container's logging driver and options to configure that driver.
Requires Docker 1.6 or newer.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- log_driver: syslog
- log_opt:
- syslog-address: tcp://192.168.0.42
- syslog-facility: daemon
The ``log_opt`` can also be expressed as a comma-separated or YAML list
of ``key=value`` pairs. The below two examples are equivalent to the
above one:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- log_driver: syslog
- log_opt: "syslog-address=tcp://192.168.0.42,syslog-facility=daemon"
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- log_driver: syslog
- log_opt:
- syslog-address=tcp://192.168.0.42
- syslog-facility=daemon
.. note::
The logging driver feature was improved in Docker 1.13 introducing
option name changes. Please see Docker's
`Configure logging drivers`_ documentation for more information.
.. _`Configure logging drivers`: https://docs.docker.com/engine/admin/logging/overview/
lxc_conf
Additional LXC configuration parameters to set before starting the
container. Either a list of variable/value mappings, or a list of
strings in the format ``VARNAME=value``. The below three examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- lxc_conf:
- lxc.utsname: docker
- lxc.arch: x86_64
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- lxc_conf: lxc.utsname=docker,lxc.arch=x86_64
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- lxc_conf:
- lxc.utsname=docker
- lxc.arch=x86_64
.. note::
These LXC configuration parameters will only have the desired
effect if the container is using the LXC execution driver, which
has been deprecated for some time.
mac_address
MAC address to use for the container. If not specified, a random MAC
address will be used.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- mac_address: 01:23:45:67:89:0a
mem_limit (or *memory*) : 0
Memory limit. Can be specified in bytes or using single-letter units
(i.e. ``512M``, ``2G``, etc.). A value of ``0`` (the default) means no
memory limit.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- mem_limit: 512M
mem_swappiness
Tune a container's memory swappiness behavior. Accepts an integer
between 0 and 100.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- mem_swappiness: 60
memswap_limit (or *memory_swap*) : -1
Total memory limit (memory plus swap). Set to ``-1`` to disable swap. A
value of ``0`` means no swap limit.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- memswap_limit: 1G
network_disabled : False
If ``True``, networking will be disabled within the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- network_disabled: True
network_mode : bridge
One of the following:
- ``bridge`` - Creates a new network stack for the container on the
docker bridge
- ``none`` - No networking (equivalent of the Docker CLI argument
``--net=none``). Not to be confused with Python's ``None``.
- ``container:<name_or_id>`` - Reuses another container's network stack
- ``host`` - Use the host's network stack inside the container
- Any name that identifies an existing network that might be created
with ``docker.network_present``.
.. warning::
Using ``host`` mode gives the container full access to the
hosts system's services (such as D-bus), and is therefore
considered insecure.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- network_mode: "none"
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- network_mode: container:web1
oom_kill_disable
Whether to disable OOM killer
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- oom_kill_disable: False
oom_score_adj
An integer value containing the score given to the container in order
to tune OOM killer preferences
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- oom_score_adj: 500
pid_mode
Set to ``host`` to use the host container's PID namespace within the
container. Requires Docker 1.5.0 or newer.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- pid_mode: host
.. note::
This option requires Docker 1.5.0 or newer.
pids_limit
Set the container's PID limit. Set to ``-1`` for unlimited.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- pids_limit: 2000
port_bindings (or *publish*)
Bind exposed ports. Port bindings should be passed in the same way as
the ``--publish`` argument to the ``docker run`` CLI command:
- ``ip:hostPort:containerPort`` - Bind a specific IP and port on the
host to a specific port within the container.
- ``ip::containerPort`` - Bind a specific IP and an ephemeral port to a
specific port within the container.
- ``hostPort:containerPort`` - Bind a specific port on all of the
host's interfaces to a specific port within the container.
- ``containerPort`` - Bind an ephemeral port on all of the host's
interfaces to a specific port within the container.
Multiple bindings can be separated by commas, or expressed as a YAML
list, and port ranges can be defined using dashes. The below two
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- port_bindings: "4505-4506:14505-14506,2123:2123/udp,8080"
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- port_bindings:
- 4505-4506:14505-14506
- 2123:2123/udp
- 8080
.. note::
When specifying a protocol, it must be passed in the
``containerPort`` value, as seen in the examples above.
ports
A list of ports to expose on the container. Can either be a
comma-separated list or a YAML list. If the protocol is omitted, the
port will be assumed to be a TCP port. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ports: 1111,2222/udp
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ports:
- 1111
- 2222/udp
privileged : False
If ``True``, runs the exec process with extended privileges
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- privileged: True
publish_all_ports (or *publish_all*) : False
Publish all ports to the host
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ports: 8080
- publish_all_ports: True
read_only : False
If ``True``, mount the container’s root filesystem as read only
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- read_only: True
restart_policy (or *restart*)
Set a restart policy for the container. Must be passed as a string in
the format ``policy[:retry_count]`` where ``policy`` is one of
``always``, ``unless-stopped``, or ``on-failure``, and ``retry_count``
is an optional limit to the number of retries. The retry count is ignored
when using the ``always`` or ``unless-stopped`` restart policy.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- restart_policy: on-failure:5
bar:
docker_container.running:
- image: bar/baz:latest
- restart_policy: always
security_opt (or *security_opts*):
Security configuration for MLS systems such as SELinux and AppArmor.
Can be expressed as a comma-separated list or a YAML list. The below
two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- security_opt: apparmor:unconfined
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- security_opt:
- apparmor:unconfined
.. important::
Some security options can contain commas. In these cases, this
argument *must* be passed as a Python list, as splitting by comma
will result in an invalid configuration.
.. note::
See the documentation for security_opt at
https://docs.docker.com/engine/reference/run/#security-configuration
shm_size
Size of /dev/shm
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- shm_size: 128M
stop_signal
Specify the signal docker will send to the container when stopping.
Useful when running systemd as PID 1 inside the container.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- stop_signal: SIGRTMIN+3
.. note::
This option requires Docker 1.9.0 or newer and docker-py 1.7.0 or
newer.
.. versionadded:: 2016.11.0
stop_timeout
Timeout to stop the container, in seconds
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- stop_timeout: 5
.. note::
In releases prior to 2017.7.0, this option was not set in the
container configuration, but rather this timeout was enforced only
when shutting down an existing container to replace it. To remove
the ambiguity, and to allow for the container to have a stop
timeout set for it, the old ``stop_timeout`` argument has been
renamed to ``shutdown_timeout``, while ``stop_timeout`` now refer's
to the container's configured stop timeout.
storage_opt
Storage driver options for the container. Can be either a list of
strings in the format ``option=value``, or a list of mappings between
option and value. The below three examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- storage_opt:
- dm.basesize: 40G
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- storage_opt: dm.basesize=40G
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- storage_opt:
- dm.basesize=40G
sysctls (or *sysctl*)
Set sysctl options for the container. Can be either a list of strings
in the format ``option=value``, or a list of mappings between option
and value. The below three examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- sysctls:
- fs.nr_open: 1048576
- kernel.pid_max: 32768
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- sysctls: fs.nr_open=1048576,kernel.pid_max=32768
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- sysctls:
- fs.nr_open=1048576
- kernel.pid_max=32768
tmpfs
A map of container directories which should be replaced by tmpfs mounts
and their corresponding mount options.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- tmpfs:
- /run: rw,noexec,nosuid,size=65536k
tty : False
Attach TTYs
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- tty: True
ulimits
List of ulimits. These limits should be passed in the format
``<ulimit_name>:<soft_limit>:<hard_limit>``, with the hard limit being
optional. Can be expressed as a comma-separated list or a YAML list.
The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ulimits: nofile=1024:1024,nproc=60
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ulimits:
- nofile=1024:1024
- nproc=60
user
User under which to run exec process
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- user: foo
userns_mode (or *user_ns_mode*)
Sets the user namsepace mode, when the user namespace remapping option
is enabled
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- userns_mode: host
volumes (or *volume*)
List of directories to expose as volumes. Can be expressed as a
comma-separated list or a YAML list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes: /mnt/vol1,/mnt/vol2
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes:
- /mnt/vol1
- /mnt/vol2
volumes_from
Container names or IDs from which the container will get volumes. Can
be expressed as a comma-separated list or a YAML list. The below two
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes_from: foo
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes_from:
- foo
volume_driver
sets the container's volume driver
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volume_driver: foobar
working_dir (or *workdir*)
Working directory inside the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- working_dir: /var/log/nginx
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b21f7970>, <ast.Constant object at 0x7da1b21f7940>, <ast.Constant object at 0x7da1b21f7910>, <ast.Constant object at 0x7da1b21f78e0>], [<ast.Name object at 0x7da1b21f78b0>, <ast.Dict object at 0x7da1b21f7880>, <ast.Constant object at 0x7da1b21f7850>, <ast.Constant object at 0x7da1b21f7820>]]
if compare[name[image] is constant[None]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] constant[The 'image' argument is required]
return[name[ret]]
<ast.Try object at 0x7da1b21f7250>
variable[comments] assign[=] list[[]]
variable[send_signal] assign[=] call[name[kwargs].pop, parameter[constant[send_signal], constant[False]]]
<ast.Try object at 0x7da1b21f66b0>
variable[exists] assign[=] compare[name[current_image_id] is_not constant[None]]
variable[pre_state] assign[=] <ast.IfExp object at 0x7da1b21f5e70>
variable[skip_comparison] assign[=] <ast.BoolOp object at 0x7da1b23447f0>
if <ast.BoolOp object at 0x7da1b2345a80> begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
if name[force] begin[:]
call[call[name[ret]][constant[changes]]][constant[forced_update]] assign[=] constant[True]
call[name[comments].append, parameter[call[constant[Container '{0}' would be {1}].format, parameter[name[name], <ast.IfExp object at 0x7da1b21f5720>]]]]
return[call[name[_format_comments], parameter[name[ret], name[comments]]]]
<ast.Try object at 0x7da1b21f54e0>
def function[_replace, parameter[orig, new]]:
variable[rm_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b21aba30>], [<ast.Constant object at 0x7da1b21aba00>]]
if compare[name[shutdown_timeout] is_not constant[None]] begin[:]
call[name[rm_kwargs]][constant[timeout]] assign[=] name[shutdown_timeout]
call[call[call[name[ret]][constant[changes]].setdefault, parameter[constant[container_id], dictionary[[], []]]]][constant[removed]] assign[=] call[call[name[__salt__]][constant[docker.rm]], parameter[name[name]]]
<ast.Try object at 0x7da1b21ab4f0>
if name[result] begin[:]
call[name[comments].append, parameter[call[constant[Replaced container '{0}'].format, parameter[name[orig]]]]]
return[name[result]]
def function[_delete_temp_container, parameter[]]:
call[name[log].debug, parameter[constant[Removing temp container '%s'], name[temp_container_name]]]
call[call[name[__salt__]][constant[docker.rm]], parameter[name[temp_container_name]]]
variable[cleanup_temp] assign[=] <ast.UnaryOp object at 0x7da1b21aa980>
<ast.Try object at 0x7da1b21aa920>
if name[skip_comparison] begin[:]
if <ast.UnaryOp object at 0x7da18f00edd0> begin[:]
call[name[comments].append, parameter[call[constant[Created container '{0}'].format, parameter[name[name]]]]]
call[call[call[name[ret]][constant[changes]].setdefault, parameter[constant[container_id], dictionary[[], []]]]][constant[added]] assign[=] call[name[temp_container]][constant[Id]]
if <ast.BoolOp object at 0x7da18f00ef50> begin[:]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[comments].append, parameter[constant[Container would be started]]]
return[call[name[_format_comments], parameter[name[ret], name[comments]]]]
if <ast.BoolOp object at 0x7da1b21082e0> begin[:]
variable[contextkey] assign[=] call[constant[.].join, parameter[tuple[[<ast.Name object at 0x7da1b2108a60>, <ast.Constant object at 0x7da1b21094e0>]]]]
def function[_get_nets, parameter[]]:
if compare[name[contextkey] <ast.NotIn object at 0x7da2590d7190> name[__context__]] begin[:]
variable[new_container_info] assign[=] call[call[name[__salt__]][constant[docker.inspect_container]], parameter[name[name]]]
call[name[__context__]][name[contextkey]] assign[=] call[call[name[new_container_info].get, parameter[constant[NetworkSettings], dictionary[[], []]]].get, parameter[constant[Networks], dictionary[[], []]]]
return[call[name[__context__]][name[contextkey]]]
variable[autoip_keys] assign[=] call[call[name[__opts__]][constant[docker.compare_container_networks]].get, parameter[constant[automatic], list[[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b210afe0>, <ast.Name object at 0x7da1b2109c00>]]] in starred[call[name[six].iteritems, parameter[call[call[call[name[ret]][constant[changes]].get, parameter[constant[container], dictionary[[], []]]].get, parameter[constant[Networks], dictionary[[], []]]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b2069c90> begin[:]
for taget[name[key]] in starred[name[autoip_keys]] begin[:]
variable[val] assign[=] call[call[call[name[_get_nets], parameter[]].get, parameter[name[net_name], dictionary[[], []]]].get, parameter[name[key]]]
if name[val] begin[:]
call[name[net_changes]][name[key]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2069a50>, <ast.Constant object at 0x7da1b2069810>], [<ast.Constant object at 0x7da1b2068b50>, <ast.Name object at 0x7da1b20689d0>]]
<ast.Try object at 0x7da1b2068880>
call[name[__context__].pop, parameter[name[contextkey], constant[None]]]
if compare[name[pre_state] not_equal[!=] name[post_state]] begin[:]
call[call[name[ret]][constant[changes]]][constant[state]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2068490>, <ast.Constant object at 0x7da1b20683d0>], [<ast.Name object at 0x7da1b20683a0>, <ast.Name object at 0x7da1b206a710>]]
if compare[name[pre_state] is_not constant[None]] begin[:]
call[name[comments].append, parameter[call[constant[State changed from '{0}' to '{1}'].format, parameter[name[pre_state], name[post_state]]]]]
if <ast.BoolOp object at 0x7da1b2069360> begin[:]
call[name[comments].append, parameter[constant[Container has a new image]]]
call[call[name[ret]][constant[changes]]][constant[image]] assign[=] dictionary[[<ast.Constant object at 0x7da1b20685e0>, <ast.Constant object at 0x7da1b20685b0>], [<ast.Name object at 0x7da1b20686d0>, <ast.Name object at 0x7da1b2069ba0>]]
if <ast.BoolOp object at 0x7da1b2069b40> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[comments].append, parameter[constant[Container is not running]]]
return[call[name[_format_comments], parameter[name[ret], name[comments]]]] | keyword[def] identifier[running] ( identifier[name] ,
identifier[image] = keyword[None] ,
identifier[skip_translate] = keyword[None] ,
identifier[ignore_collisions] = keyword[False] ,
identifier[validate_ip_addrs] = keyword[True] ,
identifier[force] = keyword[False] ,
identifier[watch_action] = literal[string] ,
identifier[start] = keyword[True] ,
identifier[shutdown_timeout] = keyword[None] ,
identifier[client_timeout] = identifier[salt] . identifier[utils] . identifier[docker] . identifier[CLIENT_TIMEOUT] ,
identifier[networks] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[True] ,
literal[string] : literal[string] }
keyword[if] identifier[image] keyword[is] keyword[None] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[image] , identifier[six] . identifier[string_types] ):
identifier[image] = identifier[six] . identifier[text_type] ( identifier[image] )
keyword[try] :
identifier[configured_networks] = identifier[networks]
identifier[networks] = identifier[_parse_networks] ( identifier[networks] )
keyword[if] identifier[networks] :
identifier[kwargs] [ literal[string] ]= identifier[networks]
identifier[image_id] = identifier[_resolve_image] ( identifier[ret] , identifier[image] , identifier[client_timeout] )
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[exc] :
identifier[ret] [ literal[string] ]= keyword[False]
keyword[if] identifier[exc] . identifier[info] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[exc] . identifier[info] )
keyword[else] :
identifier[ret] [ literal[string] ]= identifier[exc] . identifier[__str__] ()
keyword[return] identifier[ret]
identifier[comments] =[]
identifier[send_signal] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
keyword[try] :
identifier[current_image_id] = identifier[__salt__] [ literal[string] ]( identifier[name] )[ literal[string] ]
keyword[except] identifier[CommandExecutionError] :
identifier[current_image_id] = keyword[None]
keyword[except] identifier[KeyError] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[comments] . identifier[append] (
literal[string]
literal[string] . identifier[format] ( identifier[name] )
)
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
identifier[exists] = identifier[current_image_id] keyword[is] keyword[not] keyword[None]
identifier[pre_state] = identifier[__salt__] [ literal[string] ]( identifier[name] ) keyword[if] identifier[exists] keyword[else] keyword[None]
identifier[skip_comparison] = identifier[force] keyword[or] keyword[not] identifier[exists] keyword[or] identifier[current_image_id] != identifier[image_id]
keyword[if] identifier[skip_comparison] keyword[and] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
keyword[if] identifier[force] :
identifier[ret] [ literal[string] ][ literal[string] ]= keyword[True]
keyword[elif] identifier[current_image_id] != identifier[image_id] :
identifier[ret] [ literal[string] ][ literal[string] ]={ literal[string] : identifier[current_image_id] , literal[string] : identifier[image_id] }
identifier[comments] . identifier[append] (
literal[string] . identifier[format] (
identifier[name] ,
literal[string] keyword[if] keyword[not] identifier[exists] keyword[else] literal[string]
)
)
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
keyword[try] :
identifier[temp_container] = identifier[__salt__] [ literal[string] ](
identifier[image] ,
identifier[name] = identifier[name] keyword[if] keyword[not] identifier[exists] keyword[else] keyword[None] ,
identifier[skip_translate] = identifier[skip_translate] ,
identifier[ignore_collisions] = identifier[ignore_collisions] ,
identifier[validate_ip_addrs] = identifier[validate_ip_addrs] ,
identifier[client_timeout] = identifier[client_timeout] ,
** identifier[kwargs] )
identifier[temp_container_name] = identifier[temp_container] [ literal[string] ]
keyword[except] identifier[KeyError] keyword[as] identifier[exc] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[comments] . identifier[append] (
literal[string]
literal[string]
literal[string]
. identifier[format] ( identifier[exc] )
)
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[msg] = identifier[exc] . identifier[__str__] ()
keyword[if] identifier[isinstance] ( identifier[exc] , identifier[CommandExecutionError] ) keyword[and] identifier[isinstance] ( identifier[exc] . identifier[info] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[exc] . identifier[info] :
identifier[msg] +=(
literal[string]
literal[string]
literal[string]
literal[string]
)
identifier[comments] . identifier[append] ( identifier[msg] )
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
keyword[def] identifier[_replace] ( identifier[orig] , identifier[new] ):
identifier[rm_kwargs] ={ literal[string] : keyword[True] }
keyword[if] identifier[shutdown_timeout] keyword[is] keyword[not] keyword[None] :
identifier[rm_kwargs] [ literal[string] ]= identifier[shutdown_timeout]
identifier[ret] [ literal[string] ]. identifier[setdefault] ( literal[string] ,{})[ literal[string] ]= identifier[__salt__] [ literal[string] ]( identifier[name] ,** identifier[rm_kwargs] )
keyword[try] :
identifier[result] = identifier[__salt__] [ literal[string] ]( identifier[new] , identifier[orig] )
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[exc] :
identifier[result] = keyword[False]
identifier[comments] . identifier[append] ( literal[string] . identifier[format] ( identifier[exc] ))
keyword[if] identifier[result] :
identifier[comments] . identifier[append] ( literal[string] . identifier[format] ( identifier[orig] ))
keyword[else] :
identifier[comments] . identifier[append] ( literal[string] )
keyword[return] identifier[result]
keyword[def] identifier[_delete_temp_container] ():
identifier[log] . identifier[debug] ( literal[string] , identifier[temp_container_name] )
identifier[__salt__] [ literal[string] ]( identifier[temp_container_name] )
identifier[cleanup_temp] = keyword[not] identifier[skip_comparison]
keyword[try] :
identifier[pre_net_connect] = identifier[__salt__] [ literal[string] ](
identifier[name] keyword[if] identifier[exists] keyword[else] identifier[temp_container_name] )
keyword[for] identifier[net_name] , identifier[net_conf] keyword[in] identifier[six] . identifier[iteritems] ( identifier[networks] ):
keyword[try] :
identifier[__salt__] [ literal[string] ](
identifier[temp_container_name] ,
identifier[net_name] ,
** identifier[net_conf] )
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[exc] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[comments] . identifier[append] ( identifier[exc] . identifier[__str__] ())
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
identifier[post_net_connect] = identifier[__salt__] [ literal[string] ](
identifier[temp_container_name] )
keyword[if] identifier[configured_networks] keyword[is] keyword[not] keyword[None] :
identifier[extra_nets] = identifier[set] (
identifier[post_net_connect] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,{})
)- identifier[set] ( identifier[networks] )
keyword[if] identifier[extra_nets] :
keyword[for] identifier[extra_net] keyword[in] identifier[extra_nets] :
identifier[__salt__] [ literal[string] ](
identifier[temp_container_name] ,
identifier[extra_net] )
identifier[post_net_connect] = identifier[__salt__] [ literal[string] ](
identifier[temp_container_name] )
identifier[net_changes] = identifier[__salt__] [ literal[string] ](
identifier[pre_net_connect] , identifier[post_net_connect] )
keyword[if] keyword[not] identifier[skip_comparison] :
identifier[container_changes] = identifier[__salt__] [ literal[string] ](
identifier[name] ,
identifier[temp_container_name] ,
identifier[ignore] = literal[string] ,
)
keyword[if] identifier[container_changes] :
keyword[if] identifier[_check_diff] ( identifier[container_changes] ):
identifier[ret] . identifier[setdefault] ( literal[string] ,[]). identifier[append] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] (
literal[string] keyword[if] identifier[__opts__] [ literal[string] ] keyword[else] literal[string]
)
)
identifier[changes_ptr] = identifier[ret] [ literal[string] ]. identifier[setdefault] ( literal[string] ,{})
identifier[changes_ptr] . identifier[update] ( identifier[container_changes] )
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[comments] . identifier[append] (
literal[string] . identifier[format] (
identifier[name] ,
literal[string] keyword[if] keyword[not] identifier[exists] keyword[else] literal[string]
)
)
keyword[else] :
identifier[cleanup_temp] = keyword[False]
keyword[if] keyword[not] identifier[_replace] ( identifier[name] , identifier[temp_container_name] ):
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
identifier[ret] [ literal[string] ]. identifier[setdefault] ( literal[string] ,{})[ literal[string] ]= identifier[temp_container] [ literal[string] ]
keyword[else] :
keyword[if] identifier[send_signal] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[comments] . identifier[append] (
literal[string] . identifier[format] (
identifier[watch_action]
)
)
keyword[else] :
keyword[try] :
identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[signal] = identifier[watch_action] )
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[exc] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[comments] . identifier[append] (
literal[string] . identifier[format] ( identifier[exc] )
)
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
keyword[else] :
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[watch_action]
identifier[comments] . identifier[append] (
literal[string] . identifier[format] ( identifier[watch_action] )
)
keyword[elif] identifier[container_changes] :
keyword[if] keyword[not] identifier[comments] :
identifier[log] . identifier[warning] (
literal[string]
literal[string] , identifier[name]
)
identifier[comments] . identifier[append] (
literal[string] . identifier[format] (
identifier[name] ,
literal[string] keyword[if] identifier[__opts__] [ literal[string] ] keyword[else] literal[string]
)
)
keyword[else] :
identifier[comments] . identifier[append] (
literal[string]
. identifier[format] ( identifier[name] )
)
keyword[if] identifier[net_changes] :
identifier[ret] [ literal[string] ]. identifier[setdefault] ( literal[string] ,{})[ literal[string] ]= identifier[net_changes]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[comments] . identifier[append] ( literal[string] )
keyword[elif] identifier[cleanup_temp] :
identifier[network_failure] = keyword[False]
keyword[for] identifier[net_name] keyword[in] identifier[sorted] ( identifier[net_changes] ):
identifier[errors] =[]
identifier[disconnected] = identifier[connected] = keyword[False]
keyword[try] :
keyword[if] identifier[name] keyword[in] identifier[__salt__] [ literal[string] ]( identifier[net_name] ):
identifier[__salt__] [ literal[string] ](
identifier[name] ,
identifier[net_name] )
identifier[disconnected] = keyword[True]
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[exc] :
identifier[errors] . identifier[append] ( identifier[exc] . identifier[__str__] ())
keyword[if] identifier[net_name] keyword[in] identifier[networks] :
keyword[try] :
identifier[__salt__] [ literal[string] ](
identifier[name] ,
identifier[net_name] ,
** identifier[networks] [ identifier[net_name] ])
identifier[connected] = keyword[True]
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[exc] :
identifier[errors] . identifier[append] ( identifier[exc] . identifier[__str__] ())
keyword[if] identifier[disconnected] :
keyword[for] identifier[item] keyword[in] identifier[list] ( identifier[net_changes] [ identifier[net_name] ]):
keyword[if] identifier[net_changes] [ identifier[net_name] ][ identifier[item] ][ literal[string] ] keyword[is] keyword[None] :
keyword[del] identifier[net_changes] [ identifier[net_name] ][ identifier[item] ]
keyword[else] :
identifier[net_changes] [ identifier[net_name] ][ identifier[item] ][ literal[string] ]= keyword[None]
keyword[if] identifier[errors] :
identifier[comments] . identifier[extend] ( identifier[errors] )
identifier[network_failure] = keyword[True]
identifier[ret] [ literal[string] ]. identifier[setdefault] (
literal[string] ,{}). identifier[setdefault] (
literal[string] ,{})[ identifier[net_name] ]= identifier[net_changes] [ identifier[net_name] ]
keyword[if] identifier[disconnected] keyword[and] identifier[connected] :
identifier[comments] . identifier[append] (
literal[string]
literal[string] . identifier[format] ( identifier[net_name] )
)
keyword[elif] identifier[disconnected] :
identifier[comments] . identifier[append] (
literal[string] . identifier[format] (
identifier[net_name]
)
)
keyword[elif] identifier[connected] :
identifier[comments] . identifier[append] (
literal[string] . identifier[format] ( identifier[net_name] )
)
keyword[if] identifier[network_failure] :
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
keyword[finally] :
keyword[if] identifier[cleanup_temp] :
identifier[_delete_temp_container] ()
keyword[if] identifier[skip_comparison] :
keyword[if] keyword[not] identifier[exists] :
identifier[comments] . identifier[append] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[else] :
keyword[if] keyword[not] identifier[_replace] ( identifier[name] , identifier[temp_container] ):
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
identifier[ret] [ literal[string] ]. identifier[setdefault] ( literal[string] ,{})[ literal[string] ]= identifier[temp_container] [ literal[string] ]
keyword[if] keyword[not] identifier[cleanup_temp] keyword[and] ( keyword[not] identifier[exists] keyword[or] ( identifier[exists] keyword[and] identifier[start] )) keyword[or] ( identifier[start] keyword[and] identifier[cleanup_temp] keyword[and] identifier[pre_state] != literal[string] ):
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[comments] . identifier[append] ( literal[string] )
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
keyword[else] :
keyword[try] :
identifier[post_state] = identifier[__salt__] [ literal[string] ]( identifier[name] )[ literal[string] ][ literal[string] ]
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[comments] . identifier[append] (
literal[string] . identifier[format] ( identifier[name] , identifier[exc] )
)
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] )
keyword[else] :
identifier[post_state] = identifier[__salt__] [ literal[string] ]( identifier[name] )
keyword[if] keyword[not] identifier[__opts__] [ literal[string] ] keyword[and] identifier[post_state] == literal[string] :
identifier[contextkey] = literal[string] . identifier[join] (( identifier[name] , literal[string] ))
keyword[def] identifier[_get_nets] ():
keyword[if] identifier[contextkey] keyword[not] keyword[in] identifier[__context__] :
identifier[new_container_info] = identifier[__salt__] [ literal[string] ]( identifier[name] )
identifier[__context__] [ identifier[contextkey] ]= identifier[new_container_info] . identifier[get] (
literal[string] ,{}). identifier[get] ( literal[string] ,{})
keyword[return] identifier[__context__] [ identifier[contextkey] ]
identifier[autoip_keys] = identifier[__opts__] [ literal[string] ]. identifier[get] ( literal[string] ,[])
keyword[for] identifier[net_name] , identifier[net_changes] keyword[in] identifier[six] . identifier[iteritems] (
identifier[ret] [ literal[string] ]. identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,{})):
keyword[if] literal[string] keyword[in] identifier[net_changes] keyword[and] identifier[net_changes] [ literal[string] ][ literal[string] ]== literal[string] :
keyword[for] identifier[key] keyword[in] identifier[autoip_keys] :
identifier[val] = identifier[_get_nets] (). identifier[get] ( identifier[net_name] ,{}). identifier[get] ( identifier[key] )
keyword[if] identifier[val] :
identifier[net_changes] [ identifier[key] ]={ literal[string] : keyword[None] , literal[string] : identifier[val] }
keyword[try] :
identifier[net_changes] . identifier[pop] ( literal[string] )
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[__context__] . identifier[pop] ( identifier[contextkey] , keyword[None] )
keyword[if] identifier[pre_state] != identifier[post_state] :
identifier[ret] [ literal[string] ][ literal[string] ]={ literal[string] : identifier[pre_state] , literal[string] : identifier[post_state] }
keyword[if] identifier[pre_state] keyword[is] keyword[not] keyword[None] :
identifier[comments] . identifier[append] (
literal[string] . identifier[format] (
identifier[pre_state] , identifier[post_state]
)
)
keyword[if] identifier[exists] keyword[and] identifier[current_image_id] != identifier[image_id] :
identifier[comments] . identifier[append] ( literal[string] )
identifier[ret] [ literal[string] ][ literal[string] ]={ literal[string] : identifier[current_image_id] , literal[string] : identifier[image_id] }
keyword[if] identifier[post_state] != literal[string] keyword[and] identifier[start] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[comments] . identifier[append] ( literal[string] )
keyword[return] identifier[_format_comments] ( identifier[ret] , identifier[comments] ) | def running(name, image=None, skip_translate=None, ignore_collisions=False, validate_ip_addrs=True, force=False, watch_action='force', start=True, shutdown_timeout=None, client_timeout=salt.utils.docker.CLIENT_TIMEOUT, networks=None, **kwargs):
"""
Ensure that a container with a specific configuration is present and
running
name
Name of the container
image
Image to use for the container
.. note::
This state will pull the image if it is not present. However, if
the image needs to be built from a Dockerfile or loaded from a
saved image, or if you would like to use requisites to trigger a
replacement of the container when the image is updated, then the
:py:func:`docker_image.present
<salt.states.dockermod.image_present>` state should be used to
manage the image.
.. versionchanged:: 2018.3.0
If no tag is specified in the image name, and nothing matching the
specified image is pulled on the minion, the ``docker pull`` that
retrieves the image will pull *all tags* for the image. A tag of
``latest`` is no longer implicit for the pull. For this reason, it
is recommended to specify the image in ``repo:tag`` notation.
.. _docker-container-running-skip-translate:
skip_translate
This function translates Salt CLI or SLS input into the format which
docker-py_ expects. However, in the event that Salt's translation logic
fails (due to potential changes in the Docker Remote API, or to bugs in
the translation code), this argument can be used to exert granular
control over which arguments are translated and which are not.
Pass this argument as a comma-separated list (or Python list) of
arguments, and translation for each passed argument name will be
skipped. Alternatively, pass ``True`` and *all* translation will be
skipped.
Skipping tranlsation allows for arguments to be formatted directly in
the format which docker-py_ expects. This allows for API changes and
other issues to be more easily worked around. An example of using this
option to skip translation would be:
For example, imagine that there is an issue with processing the
``port_bindings`` argument, and the following configuration no longer
works as expected:
.. code-block:: yaml
mycontainer:
docker_container.running:
- image: 7.3.1611
- port_bindings:
- 10.2.9.10:8080:80
By using ``skip_translate``, you can forego the input translation and
configure the port binding in the format docker-py_ needs:
.. code-block:: yaml
mycontainer:
docker_container.running:
- image: 7.3.1611
- skip_translate: port_bindings
- port_bindings: {8080: [('10.2.9.10', 80)], '4193/udp': 9314}
See the following links for more information:
- `docker-py Low-level API`_
- `Docker Engine API`_
.. _docker-py: https://pypi.python.org/pypi/docker-py
.. _`docker-py Low-level API`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container
.. _`Docker Engine API`: https://docs.docker.com/engine/api/v1.33/#operation/ContainerCreate
ignore_collisions : False
Since many of docker-py_'s arguments differ in name from their CLI
counterparts (with which most Docker users are more familiar), Salt
detects usage of these and aliases them to the docker-py_ version of
that argument so that both CLI and API versions of a given argument are
supported. However, if both the alias and the docker-py_ version of the
same argument (e.g. ``env`` and ``environment``) are used, an error
will be raised. Set this argument to ``True`` to suppress these errors
and keep the docker-py_ version of the argument.
validate_ip_addrs : True
For parameters which accept IP addresses as input, IP address
validation will be performed. To disable, set this to ``False``
force : False
Set this parameter to ``True`` to force Salt to re-create the container
irrespective of whether or not it is configured as desired.
watch_action : force
Control what type of action is taken when this state :ref:`watches
<requisites-watch>` another state that has changes. The default action
is ``force``, which runs the state with ``force`` set to ``True``,
triggering a rebuild of the container.
If any other value is passed, it will be assumed to be a kill signal.
If the container matches the specified configuration, and is running,
then the action will be to send that signal to the container. Kill
signals can be either strings or numbers, and are defined in the
**Standard Signals** section of the ``signal(7)`` manpage. Run ``man 7
signal`` on a Linux host to browse this manpage. For example:
.. code-block:: yaml
mycontainer:
docker_container.running:
- image: busybox
- watch_action: SIGHUP
- watch:
- file: some_file
.. note::
If the container differs from the specified configuration, or is
not running, then instead of sending a signal to the container, the
container will be re-created/started and no signal will be sent.
start : True
Set to ``False`` to suppress starting of the container if it exists,
matches the desired configuration, but is not running. This is useful
for data-only containers, or for non-daemonized container processes,
such as the Django ``migrate`` and ``collectstatic`` commands. In
instances such as this, the container only needs to be started the
first time.
shutdown_timeout
If the container needs to be replaced, the container will be stopped
using :py:func:`docker.stop <salt.modules.dockermod.stop>`. If a
``shutdown_timout`` is not set, and the container was created using
``stop_timeout``, that timeout will be used. If neither of these values
were set, then a timeout of 10 seconds will be used.
.. versionchanged:: 2017.7.0
This option was renamed from ``stop_timeout`` to
``shutdown_timeout`` to accommodate the ``stop_timeout`` container
configuration setting.
client_timeout : 60
Timeout in seconds for the Docker client. This is not a timeout for
this function, but for receiving a response from the API.
.. note::
This is only used if Salt needs to pull the requested image.
.. _salt-states-docker-container-network-management:
**NETWORK MANAGEMENT**
.. versionadded:: 2018.3.0
.. versionchanged:: 2019.2.0
If the ``networks`` option is used, any networks (including the default
``bridge`` network) which are not specified will be disconnected.
The ``networks`` argument can be used to ensure that a container is
attached to one or more networks. Optionally, arguments can be passed to
the networks. In the example below, ``net1`` is being configured with
arguments, while ``net2`` and ``bridge`` are being configured *without*
arguments:
.. code-block:: yaml
foo:
docker_container.running:
- image: myuser/myimage:foo
- networks:
- net1:
- aliases:
- bar
- baz
- ipv4_address: 10.0.20.50
- net2
- bridge
- require:
- docker_network: net1
- docker_network: net2
The supported arguments are the ones from the docker-py's
`connect_container_to_network`_ function (other than ``container`` and
``net_id``).
.. important::
Unlike with the arguments described in the **CONTAINER CONFIGURATION
PARAMETERS** section below, these network configuration parameters are
not translated at all. Consult the `connect_container_to_network`_
documentation for the correct type/format of data to pass.
.. _`connect_container_to_network`: https://docker-py.readthedocs.io/en/stable/api.html#docker.api.network.NetworkApiMixin.connect_container_to_network
To start a container with no network connectivity (only possible in
2019.2.0 and later) pass this option as an empty list. For example:
.. code-block:: yaml
foo:
docker_container.running:
- image: myuser/myimage:foo
- networks: []
**CONTAINER CONFIGURATION PARAMETERS**
auto_remove (or *rm*) : False
Enable auto-removal of the container on daemon side when the
container’s process exits (analogous to running a docker container with
``--rm`` on the CLI).
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- auto_remove: True
binds
Files/directories to bind mount. Each bind mount should be passed in
one of the following formats:
- ``<host_path>:<container_path>`` - ``host_path`` is mounted within
the container as ``container_path`` with read-write access.
- ``<host_path>:<container_path>:<selinux_context>`` - ``host_path`` is
mounted within the container as ``container_path`` with read-write
access. Additionally, the specified selinux context will be set
within the container.
- ``<host_path>:<container_path>:<read_only>`` - ``host_path`` is
mounted within the container as ``container_path``, with the
read-only or read-write setting explicitly defined.
- ``<host_path>:<container_path>:<read_only>,<selinux_context>`` -
``host_path`` is mounted within the container as ``container_path``,
with the read-only or read-write setting explicitly defined.
Additionally, the specified selinux context will be set within the
container.
``<read_only>`` can be either ``rw`` for read-write access, or ``ro``
for read-only access. When omitted, it is assumed to be read-write.
``<selinux_context>`` can be ``z`` if the volume is shared between
multiple containers, or ``Z`` if the volume should be private.
.. note::
When both ``<read_only>`` and ``<selinux_context>`` are specified,
there must be a comma before ``<selinux_context>``.
Binds can be expressed as a comma-separated list or a YAML list. The
below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds: /srv/www:/var/www:ro,/etc/foo.conf:/usr/local/etc/foo.conf:rw
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds:
- /srv/www:/var/www:ro
- /home/myuser/conf/foo.conf:/etc/foo.conf:rw
However, in cases where both ro/rw and an selinux context are combined,
the only option is to use a YAML list, like so:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds:
- /srv/www:/var/www:ro,Z
- /home/myuser/conf/foo.conf:/etc/foo.conf:rw,Z
Since the second bind in the previous example is mounted read-write,
the ``rw`` and comma can be dropped. For example:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- binds:
- /srv/www:/var/www:ro,Z
- /home/myuser/conf/foo.conf:/etc/foo.conf:Z
blkio_weight
Block IO weight (relative weight), accepts a weight value between 10
and 1000.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- blkio_weight: 100
blkio_weight_device
Block IO weight (relative device weight), specified as a list of
expressions in the format ``PATH:RATE``
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- blkio_weight_device: /dev/sda:100
cap_add
List of capabilities to add within the container. Can be expressed as a
comma-separated list or a Python list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_add: SYS_ADMIN,MKNOD
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_add:
- SYS_ADMIN
- MKNOD
.. note::
This option requires Docker 1.2.0 or newer.
cap_drop
List of capabilities to drop within the container. Can be expressed as
a comma-separated list or a Python list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_drop: SYS_ADMIN,MKNOD
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cap_drop:
- SYS_ADMIN
- MKNOD
.. note::
This option requires Docker 1.2.0 or newer.
command (or *cmd*)
Command to run in the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- command: bash
cpuset_cpus (or *cpuset*)
CPUs on which which to allow execution, specified as a string
containing a range (e.g. ``0-3``) or a comma-separated list of CPUs
(e.g. ``0,1``).
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpuset_cpus: "0,1"
cpuset_mems
Memory nodes on which which to allow execution, specified as a string
containing a range (e.g. ``0-3``) or a comma-separated list of MEMs
(e.g. ``0,1``). Only effective on NUMA systems.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpuset_mems: "0,1"
cpu_group
The length of a CPU period in microseconds
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpu_group: 100000
cpu_period
Microseconds of CPU time that the container can get in a CPU period
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpu_period: 50000
cpu_shares
CPU shares (relative weight), specified as an integer between 2 and 1024.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- cpu_shares: 512
detach : False
If ``True``, run the container's command in the background (daemon
mode)
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- detach: True
devices
List of host devices to expose within the container. Can be expressed
as a comma-separated list or a YAML list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices: /dev/net/tun,/dev/xvda1:/dev/xvda1,/dev/xvdb1:/dev/xvdb1:r
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices:
- /dev/net/tun
- /dev/xvda1:/dev/xvda1
- /dev/xvdb1:/dev/xvdb1:r
device_read_bps
Limit read rate (bytes per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is either an
integer number of bytes, or a string ending in ``kb``, ``mb``, or
``gb``. Can be expressed as a comma-separated list or a YAML list. The
below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_bps: /dev/sda:1mb,/dev/sdb:5mb
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_bps:
- /dev/sda:1mb
- /dev/sdb:5mb
device_read_iops
Limit read rate (I/O per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is a number
of I/O operations. Can be expressed as a comma-separated list or a YAML
list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops: /dev/sda:1000,/dev/sdb:500
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops:
- /dev/sda:1000
- /dev/sdb:500
device_write_bps
Limit write rate (bytes per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is either an
integer number of bytes, or a string ending in ``kb``, ``mb``, or
``gb``. Can be expressed as a comma-separated list or a YAML list. The
below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_write_bps: /dev/sda:1mb,/dev/sdb:5mb
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_write_bps:
- /dev/sda:1mb
- /dev/sdb:5mb
device_read_iops
Limit write rate (I/O per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is a number
of I/O operations. Can be expressed as a comma-separated list or a
YAML list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops: /dev/sda:1000,/dev/sdb:500
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- devices_read_iops:
- /dev/sda:1000
- /dev/sdb:500
dns
List of DNS nameservers. Can be expressed as a comma-separated list or
a YAML list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns: 8.8.8.8,8.8.4.4
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns:
- 8.8.8.8
- 8.8.4.4
.. note::
To skip IP address validation, use ``validate_ip_addrs=False``
dns_opt
Additional options to be added to the container’s ``resolv.conf`` file.
Can be expressed as a comma-separated list or a YAML list. The below
two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_opt: ndots:9
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_opt:
- ndots:9
dns_search
List of DNS search domains. Can be expressed as a comma-separated list
or a YAML list. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_search: foo1.domain.tld,foo2.domain.tld
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dns_search:
- foo1.domain.tld
- foo2.domain.tld
domainname
The domain name to use for the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- dommainname: domain.tld
entrypoint
Entrypoint for the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- entrypoint: "mycmd --arg1 --arg2"
This argument can also be specified as a list:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- entrypoint:
- mycmd
- --arg1
- --arg2
environment
Either a list of variable/value mappings, or a list of strings in the
format ``VARNAME=value``. The below three examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- environment:
- VAR1: value
- VAR2: value
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- environment: 'VAR1=value,VAR2=value'
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- environment:
- VAR1=value
- VAR2=value
extra_hosts
Additional hosts to add to the container's /etc/hosts file. Can be
expressed as a comma-separated list or a Python list. The below two
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- extra_hosts: web1:10.9.8.7,web2:10.9.8.8
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- extra_hosts:
- web1:10.9.8.7
- web2:10.9.8.8
.. note::
To skip IP address validation, use ``validate_ip_addrs=False``
.. note::
This option requires Docker 1.3.0 or newer.
group_add
List of additional group names and/or IDs that the container process
will run as. Can be expressed as a comma-separated list or a YAML list.
The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- group_add: web,network
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- group_add:
- web
- network
hostname
Hostname of the container. If not provided, the value passed as the
container's``name`` will be used for the hostname.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- hostname: web1
.. warning::
``hostname`` cannot be set if ``network_mode`` is set to ``host``.
The below example will result in an error:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- hostname: web1
- network_mode: host
interactive (or *stdin_open*) : False
Leave stdin open, even if not attached
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- interactive: True
ipc_mode (or *ipc*)
Set the IPC mode for the container. The default behavior is to create a
private IPC namespace for the container, but this option can be
used to change that behavior:
- ``container:<container_name_or_id>`` reuses another container shared
memory, semaphores and message queues
- ``host``: use the host's shared memory, semaphores and message queues
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ipc_mode: container:foo
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ipc_mode: host
.. warning::
Using ``host`` gives the container full access to local shared
memory and is therefore considered insecure.
isolation
Specifies the type of isolation technology used by containers
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- isolation: hyperv
.. note::
The default value on Windows server is ``process``, while the
default value on Windows client is ``hyperv``. On Linux, only
``default`` is supported.
labels
Add metadata to the container. Labels can be set both with and without
values, and labels with values can be passed either as ``key=value`` or
``key: value`` pairs. For example, while the below would be very
confusing to read, it is technically valid, and demonstrates the
different ways in which labels can be passed:
.. code-block:: yaml
mynet:
docker_network.present:
- labels:
- foo
- bar=baz
- hello: world
The labels can also simply be passed as a YAML dictionary, though this
can be error-prone due to some :ref:`idiosyncrasies
<yaml-idiosyncrasies>` with how PyYAML loads nested data structures:
.. code-block:: yaml
foo:
docker_network.present:
- labels:
foo: ''
bar: baz
hello: world
.. versionchanged:: 2018.3.0
Methods for specifying labels can now be mixed. Earlier releases
required either labels with or without values.
links
Link this container to another. Links can be specified as a list of
mappings or a comma-separated or Python list of expressions in the
format ``<container_name_or_id>:<link_alias>``. The below three
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- links:
- web1: link1
- web2: link2
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- links: web1:link1,web2:link2
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- links:
- web1:link1
- web2:link2
log_driver and log_opt
Set container's logging driver and options to configure that driver.
Requires Docker 1.6 or newer.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- log_driver: syslog
- log_opt:
- syslog-address: tcp://192.168.0.42
- syslog-facility: daemon
The ``log_opt`` can also be expressed as a comma-separated or YAML list
of ``key=value`` pairs. The below two examples are equivalent to the
above one:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- log_driver: syslog
- log_opt: "syslog-address=tcp://192.168.0.42,syslog-facility=daemon"
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- log_driver: syslog
- log_opt:
- syslog-address=tcp://192.168.0.42
- syslog-facility=daemon
.. note::
The logging driver feature was improved in Docker 1.13 introducing
option name changes. Please see Docker's
`Configure logging drivers`_ documentation for more information.
.. _`Configure logging drivers`: https://docs.docker.com/engine/admin/logging/overview/
lxc_conf
Additional LXC configuration parameters to set before starting the
container. Either a list of variable/value mappings, or a list of
strings in the format ``VARNAME=value``. The below three examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- lxc_conf:
- lxc.utsname: docker
- lxc.arch: x86_64
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- lxc_conf: lxc.utsname=docker,lxc.arch=x86_64
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- lxc_conf:
- lxc.utsname=docker
- lxc.arch=x86_64
.. note::
These LXC configuration parameters will only have the desired
effect if the container is using the LXC execution driver, which
has been deprecated for some time.
mac_address
MAC address to use for the container. If not specified, a random MAC
address will be used.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- mac_address: 01:23:45:67:89:0a
mem_limit (or *memory*) : 0
Memory limit. Can be specified in bytes or using single-letter units
(i.e. ``512M``, ``2G``, etc.). A value of ``0`` (the default) means no
memory limit.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- mem_limit: 512M
mem_swappiness
Tune a container's memory swappiness behavior. Accepts an integer
between 0 and 100.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- mem_swappiness: 60
memswap_limit (or *memory_swap*) : -1
Total memory limit (memory plus swap). Set to ``-1`` to disable swap. A
value of ``0`` means no swap limit.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- memswap_limit: 1G
network_disabled : False
If ``True``, networking will be disabled within the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- network_disabled: True
network_mode : bridge
One of the following:
- ``bridge`` - Creates a new network stack for the container on the
docker bridge
- ``none`` - No networking (equivalent of the Docker CLI argument
``--net=none``). Not to be confused with Python's ``None``.
- ``container:<name_or_id>`` - Reuses another container's network stack
- ``host`` - Use the host's network stack inside the container
- Any name that identifies an existing network that might be created
with ``docker.network_present``.
.. warning::
Using ``host`` mode gives the container full access to the
hosts system's services (such as D-bus), and is therefore
considered insecure.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- network_mode: "none"
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- network_mode: container:web1
oom_kill_disable
Whether to disable OOM killer
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- oom_kill_disable: False
oom_score_adj
An integer value containing the score given to the container in order
to tune OOM killer preferences
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- oom_score_adj: 500
pid_mode
Set to ``host`` to use the host container's PID namespace within the
container. Requires Docker 1.5.0 or newer.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- pid_mode: host
.. note::
This option requires Docker 1.5.0 or newer.
pids_limit
Set the container's PID limit. Set to ``-1`` for unlimited.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- pids_limit: 2000
port_bindings (or *publish*)
Bind exposed ports. Port bindings should be passed in the same way as
the ``--publish`` argument to the ``docker run`` CLI command:
- ``ip:hostPort:containerPort`` - Bind a specific IP and port on the
host to a specific port within the container.
- ``ip::containerPort`` - Bind a specific IP and an ephemeral port to a
specific port within the container.
- ``hostPort:containerPort`` - Bind a specific port on all of the
host's interfaces to a specific port within the container.
- ``containerPort`` - Bind an ephemeral port on all of the host's
interfaces to a specific port within the container.
Multiple bindings can be separated by commas, or expressed as a YAML
list, and port ranges can be defined using dashes. The below two
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- port_bindings: "4505-4506:14505-14506,2123:2123/udp,8080"
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- port_bindings:
- 4505-4506:14505-14506
- 2123:2123/udp
- 8080
.. note::
When specifying a protocol, it must be passed in the
``containerPort`` value, as seen in the examples above.
ports
A list of ports to expose on the container. Can either be a
comma-separated list or a YAML list. If the protocol is omitted, the
port will be assumed to be a TCP port. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ports: 1111,2222/udp
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ports:
- 1111
- 2222/udp
privileged : False
If ``True``, runs the exec process with extended privileges
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- privileged: True
publish_all_ports (or *publish_all*) : False
Publish all ports to the host
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ports: 8080
- publish_all_ports: True
read_only : False
If ``True``, mount the container’s root filesystem as read only
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- read_only: True
restart_policy (or *restart*)
Set a restart policy for the container. Must be passed as a string in
the format ``policy[:retry_count]`` where ``policy`` is one of
``always``, ``unless-stopped``, or ``on-failure``, and ``retry_count``
is an optional limit to the number of retries. The retry count is ignored
when using the ``always`` or ``unless-stopped`` restart policy.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- restart_policy: on-failure:5
bar:
docker_container.running:
- image: bar/baz:latest
- restart_policy: always
security_opt (or *security_opts*):
Security configuration for MLS systems such as SELinux and AppArmor.
Can be expressed as a comma-separated list or a YAML list. The below
two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- security_opt: apparmor:unconfined
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- security_opt:
- apparmor:unconfined
.. important::
Some security options can contain commas. In these cases, this
argument *must* be passed as a Python list, as splitting by comma
will result in an invalid configuration.
.. note::
See the documentation for security_opt at
https://docs.docker.com/engine/reference/run/#security-configuration
shm_size
Size of /dev/shm
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- shm_size: 128M
stop_signal
Specify the signal docker will send to the container when stopping.
Useful when running systemd as PID 1 inside the container.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- stop_signal: SIGRTMIN+3
.. note::
This option requires Docker 1.9.0 or newer and docker-py 1.7.0 or
newer.
.. versionadded:: 2016.11.0
stop_timeout
Timeout to stop the container, in seconds
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- stop_timeout: 5
.. note::
In releases prior to 2017.7.0, this option was not set in the
container configuration, but rather this timeout was enforced only
when shutting down an existing container to replace it. To remove
the ambiguity, and to allow for the container to have a stop
timeout set for it, the old ``stop_timeout`` argument has been
renamed to ``shutdown_timeout``, while ``stop_timeout`` now refer's
to the container's configured stop timeout.
storage_opt
Storage driver options for the container. Can be either a list of
strings in the format ``option=value``, or a list of mappings between
option and value. The below three examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- storage_opt:
- dm.basesize: 40G
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- storage_opt: dm.basesize=40G
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- storage_opt:
- dm.basesize=40G
sysctls (or *sysctl*)
Set sysctl options for the container. Can be either a list of strings
in the format ``option=value``, or a list of mappings between option
and value. The below three examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- sysctls:
- fs.nr_open: 1048576
- kernel.pid_max: 32768
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- sysctls: fs.nr_open=1048576,kernel.pid_max=32768
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- sysctls:
- fs.nr_open=1048576
- kernel.pid_max=32768
tmpfs
A map of container directories which should be replaced by tmpfs mounts
and their corresponding mount options.
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- tmpfs:
- /run: rw,noexec,nosuid,size=65536k
tty : False
Attach TTYs
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- tty: True
ulimits
List of ulimits. These limits should be passed in the format
``<ulimit_name>:<soft_limit>:<hard_limit>``, with the hard limit being
optional. Can be expressed as a comma-separated list or a YAML list.
The below two examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ulimits: nofile=1024:1024,nproc=60
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- ulimits:
- nofile=1024:1024
- nproc=60
user
User under which to run exec process
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- user: foo
userns_mode (or *user_ns_mode*)
Sets the user namsepace mode, when the user namespace remapping option
is enabled
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- userns_mode: host
volumes (or *volume*)
List of directories to expose as volumes. Can be expressed as a
comma-separated list or a YAML list. The below two examples are
equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes: /mnt/vol1,/mnt/vol2
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes:
- /mnt/vol1
- /mnt/vol2
volumes_from
Container names or IDs from which the container will get volumes. Can
be expressed as a comma-separated list or a YAML list. The below two
examples are equivalent:
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes_from: foo
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volumes_from:
- foo
volume_driver
sets the container's volume driver
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- volume_driver: foobar
working_dir (or *workdir*)
Working directory inside the container
.. code-block:: yaml
foo:
docker_container.running:
- image: bar/baz:latest
- working_dir: /var/log/nginx
"""
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if image is None:
ret['result'] = False
ret['comment'] = "The 'image' argument is required"
return ret # depends on [control=['if'], data=[]]
elif not isinstance(image, six.string_types):
image = six.text_type(image) # depends on [control=['if'], data=[]]
try:
# Since we're rewriting the "networks" value below, save the original
# value here.
configured_networks = networks
networks = _parse_networks(networks)
if networks:
kwargs['networks'] = networks # depends on [control=['if'], data=[]]
image_id = _resolve_image(ret, image, client_timeout) # depends on [control=['try'], data=[]]
except CommandExecutionError as exc:
ret['result'] = False
if exc.info is not None:
return _format_comments(ret, exc.info) # depends on [control=['if'], data=[]]
else:
ret['comment'] = exc.__str__()
return ret # depends on [control=['except'], data=['exc']]
comments = []
# Pop off the send_signal argument passed by the watch requisite
send_signal = kwargs.pop('send_signal', False)
try:
current_image_id = __salt__['docker.inspect_container'](name)['Image'] # depends on [control=['try'], data=[]]
except CommandExecutionError:
current_image_id = None # depends on [control=['except'], data=[]]
except KeyError:
ret['result'] = False
comments.append("Unable to detect current image for container '{0}'. This might be due to a change in the Docker API.".format(name))
return _format_comments(ret, comments) # depends on [control=['except'], data=[]]
# Shorthand to make the below code more understandable
exists = current_image_id is not None
pre_state = __salt__['docker.state'](name) if exists else None
# If skip_comparison is True, we're definitely going to be using the temp
# container as the new container (because we're forcing the change, or
# because the image IDs differ). If False, we'll need to perform a
# comparison between it and the new container.
skip_comparison = force or not exists or current_image_id != image_id
if skip_comparison and __opts__['test']:
ret['result'] = None
if force:
ret['changes']['forced_update'] = True # depends on [control=['if'], data=[]]
elif current_image_id != image_id:
ret['changes']['image'] = {'old': current_image_id, 'new': image_id} # depends on [control=['if'], data=['current_image_id', 'image_id']]
comments.append("Container '{0}' would be {1}".format(name, 'created' if not exists else 'replaced'))
return _format_comments(ret, comments) # depends on [control=['if'], data=[]]
# Create temp container (or just create the named container if the
# container does not already exist)
try:
temp_container = __salt__['docker.create'](image, name=name if not exists else None, skip_translate=skip_translate, ignore_collisions=ignore_collisions, validate_ip_addrs=validate_ip_addrs, client_timeout=client_timeout, **kwargs)
temp_container_name = temp_container['Name'] # depends on [control=['try'], data=[]]
except KeyError as exc:
ret['result'] = False
comments.append("Key '{0}' missing from API response, this may be due to a change in the Docker Remote API. Please report this on the SaltStack issue tracker if it has not already been reported.".format(exc))
return _format_comments(ret, comments) # depends on [control=['except'], data=['exc']]
except Exception as exc:
ret['result'] = False
msg = exc.__str__()
if isinstance(exc, CommandExecutionError) and isinstance(exc.info, dict) and ('invalid' in exc.info):
msg += '\n\nIf you feel this information is incorrect, the skip_translate argument can be used to skip input translation for the argument(s) identified as invalid. See the documentation for details.' # depends on [control=['if'], data=[]]
comments.append(msg)
return _format_comments(ret, comments) # depends on [control=['except'], data=['exc']]
def _replace(orig, new):
rm_kwargs = {'stop': True}
if shutdown_timeout is not None:
rm_kwargs['timeout'] = shutdown_timeout # depends on [control=['if'], data=['shutdown_timeout']]
ret['changes'].setdefault('container_id', {})['removed'] = __salt__['docker.rm'](name, **rm_kwargs)
try:
result = __salt__['docker.rename'](new, orig) # depends on [control=['try'], data=[]]
except CommandExecutionError as exc:
result = False
comments.append('Failed to rename temp container: {0}'.format(exc)) # depends on [control=['except'], data=['exc']]
if result:
comments.append("Replaced container '{0}'".format(orig)) # depends on [control=['if'], data=[]]
else:
comments.append("Failed to replace container '{0}'")
return result
def _delete_temp_container():
log.debug("Removing temp container '%s'", temp_container_name)
__salt__['docker.rm'](temp_container_name)
# If we're not skipping the comparison, then the assumption is that
# temp_container will be discarded, unless the comparison reveals
# differences, in which case we'll set cleanup_temp = False to prevent it
# from being cleaned.
cleanup_temp = not skip_comparison
try:
pre_net_connect = __salt__['docker.inspect_container'](name if exists else temp_container_name)
for (net_name, net_conf) in six.iteritems(networks):
try:
__salt__['docker.connect_container_to_network'](temp_container_name, net_name, **net_conf) # depends on [control=['try'], data=[]]
except CommandExecutionError as exc:
# Shouldn't happen, stopped docker containers can be
# attached to networks even if the static IP lies outside
# of the network's subnet. An exception will be raised once
# you try to start the container, however.
ret['result'] = False
comments.append(exc.__str__())
return _format_comments(ret, comments) # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=[]]
post_net_connect = __salt__['docker.inspect_container'](temp_container_name)
if configured_networks is not None:
# Use set arithmetic to determine the networks which are connected
# but not explicitly defined. They will be disconnected below. Note
# that we check configured_networks because it represents the
# original (unparsed) network configuration. When no networks
# argument is used, the parsed networks will be an empty list, so
# it's not sufficient to do a boolean check on the "networks"
# variable.
extra_nets = set(post_net_connect.get('NetworkSettings', {}).get('Networks', {})) - set(networks)
if extra_nets:
for extra_net in extra_nets:
__salt__['docker.disconnect_container_from_network'](temp_container_name, extra_net) # depends on [control=['for'], data=['extra_net']]
# We've made changes, so we need to inspect the container again
post_net_connect = __salt__['docker.inspect_container'](temp_container_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
net_changes = __salt__['docker.compare_container_networks'](pre_net_connect, post_net_connect)
if not skip_comparison:
container_changes = __salt__['docker.compare_containers'](name, temp_container_name, ignore='Hostname')
if container_changes:
if _check_diff(container_changes):
ret.setdefault('warnings', []).append('The detected changes may be due to incorrect handling of arguments in earlier Salt releases. If this warning persists after running the state again{0}, and no changes were made to the SLS file, then please report this.'.format(' without test=True' if __opts__['test'] else '')) # depends on [control=['if'], data=[]]
changes_ptr = ret['changes'].setdefault('container', {})
changes_ptr.update(container_changes)
if __opts__['test']:
ret['result'] = None
comments.append("Container '{0}' would be {1}".format(name, 'created' if not exists else 'replaced')) # depends on [control=['if'], data=[]]
else:
# We don't want to clean the temp container, we'll be
# replacing the existing one with it.
cleanup_temp = False
# Replace the container
if not _replace(name, temp_container_name):
ret['result'] = False
return _format_comments(ret, comments) # depends on [control=['if'], data=[]]
ret['changes'].setdefault('container_id', {})['added'] = temp_container['Id'] # depends on [control=['if'], data=[]]
# No changes between existing container and temp container.
# First check if a requisite is asking to send a signal to the
# existing container.
elif send_signal:
if __opts__['test']:
comments.append('Signal {0} would be sent to container'.format(watch_action)) # depends on [control=['if'], data=[]]
else:
try:
__salt__['docker.signal'](name, signal=watch_action) # depends on [control=['try'], data=[]]
except CommandExecutionError as exc:
ret['result'] = False
comments.append('Failed to signal container: {0}'.format(exc))
return _format_comments(ret, comments) # depends on [control=['except'], data=['exc']]
else:
ret['changes']['signal'] = watch_action
comments.append('Sent signal {0} to container'.format(watch_action)) # depends on [control=['if'], data=[]]
elif container_changes:
if not comments:
log.warning("docker_container.running: detected changes without a specific comment for container '%s'", name)
comments.append("Container '{0}'{1} updated.".format(name, ' would be' if __opts__['test'] else '')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Container was not replaced, no differences between the
# existing container and the temp container were detected,
# and no signal was sent to the container.
comments.append("Container '{0}' is already configured as specified".format(name)) # depends on [control=['if'], data=[]]
if net_changes:
ret['changes'].setdefault('container', {})['Networks'] = net_changes
if __opts__['test']:
ret['result'] = None
comments.append('Network configuration would be updated') # depends on [control=['if'], data=[]]
elif cleanup_temp:
# We only need to make network changes if the container
# isn't being replaced, since we would already have
# attached all the networks for purposes of comparison.
network_failure = False
for net_name in sorted(net_changes):
errors = []
disconnected = connected = False
try:
if name in __salt__['docker.connected'](net_name):
__salt__['docker.disconnect_container_from_network'](name, net_name)
disconnected = True # depends on [control=['if'], data=['name']] # depends on [control=['try'], data=[]]
except CommandExecutionError as exc:
errors.append(exc.__str__()) # depends on [control=['except'], data=['exc']]
if net_name in networks:
try:
__salt__['docker.connect_container_to_network'](name, net_name, **networks[net_name])
connected = True # depends on [control=['try'], data=[]]
except CommandExecutionError as exc:
errors.append(exc.__str__())
if disconnected:
# We succeeded in disconnecting but failed
# to reconnect. This can happen if the
# network's subnet has changed and we try
# to reconnect with the same IP address
# from the old subnet.
for item in list(net_changes[net_name]):
if net_changes[net_name][item]['old'] is None:
# Since they'd both be None, just
# delete this key from the changes
del net_changes[net_name][item] # depends on [control=['if'], data=[]]
else:
net_changes[net_name][item]['new'] = None # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=['net_name', 'networks']]
if errors:
comments.extend(errors)
network_failure = True # depends on [control=['if'], data=[]]
ret['changes'].setdefault('container', {}).setdefault('Networks', {})[net_name] = net_changes[net_name]
if disconnected and connected:
comments.append("Reconnected to network '{0}' with updated configuration".format(net_name)) # depends on [control=['if'], data=[]]
elif disconnected:
comments.append("Disconnected from network '{0}'".format(net_name)) # depends on [control=['if'], data=[]]
elif connected:
comments.append("Connected to network '{0}'".format(net_name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['net_name']]
if network_failure:
ret['result'] = False
return _format_comments(ret, comments) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
finally:
if cleanup_temp:
_delete_temp_container() # depends on [control=['if'], data=[]]
if skip_comparison:
if not exists:
comments.append("Created container '{0}'".format(name)) # depends on [control=['if'], data=[]]
elif not _replace(name, temp_container):
ret['result'] = False
return _format_comments(ret, comments) # depends on [control=['if'], data=[]]
ret['changes'].setdefault('container_id', {})['added'] = temp_container['Id'] # depends on [control=['if'], data=[]]
# "exists" means that a container by the specified name existed prior to
# this state being run
# "not cleanup_temp" means that the temp container became permanent, either
# because the named container did not exist or changes were detected
# "cleanup_temp" means that the container already existed and no changes
# were detected, so the the temp container was discarded
if not cleanup_temp and (not exists or (exists and start)) or (start and cleanup_temp and (pre_state != 'running')):
if __opts__['test']:
ret['result'] = None
comments.append('Container would be started')
return _format_comments(ret, comments) # depends on [control=['if'], data=[]]
else:
try:
post_state = __salt__['docker.start'](name)['state']['new'] # depends on [control=['try'], data=[]]
except Exception as exc:
ret['result'] = False
comments.append("Failed to start container '{0}': '{1}'".format(name, exc))
return _format_comments(ret, comments) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
else:
post_state = __salt__['docker.state'](name)
if not __opts__['test'] and post_state == 'running':
# Now that we're certain the container is running, check each modified
# network to see if the network went from static (or disconnected) to
# automatic IP configuration. If so, grab the automatically-assigned
# IPs and munge the changes dict to include them. Note that this can
# only be done after the container is started bceause automatic IPs are
# assigned at runtime.
contextkey = '.'.join((name, 'docker_container.running'))
def _get_nets():
if contextkey not in __context__:
new_container_info = __salt__['docker.inspect_container'](name)
__context__[contextkey] = new_container_info.get('NetworkSettings', {}).get('Networks', {}) # depends on [control=['if'], data=['contextkey', '__context__']]
return __context__[contextkey]
autoip_keys = __opts__['docker.compare_container_networks'].get('automatic', [])
for (net_name, net_changes) in six.iteritems(ret['changes'].get('container', {}).get('Networks', {})):
if 'IPConfiguration' in net_changes and net_changes['IPConfiguration']['new'] == 'automatic':
for key in autoip_keys:
val = _get_nets().get(net_name, {}).get(key)
if val:
net_changes[key] = {'old': None, 'new': val}
try:
net_changes.pop('IPConfiguration') # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
__context__.pop(contextkey, None) # depends on [control=['if'], data=[]]
if pre_state != post_state:
ret['changes']['state'] = {'old': pre_state, 'new': post_state}
if pre_state is not None:
comments.append("State changed from '{0}' to '{1}'".format(pre_state, post_state)) # depends on [control=['if'], data=['pre_state']] # depends on [control=['if'], data=['pre_state', 'post_state']]
if exists and current_image_id != image_id:
comments.append('Container has a new image')
ret['changes']['image'] = {'old': current_image_id, 'new': image_id} # depends on [control=['if'], data=[]]
if post_state != 'running' and start:
ret['result'] = False
comments.append('Container is not running') # depends on [control=['if'], data=[]]
return _format_comments(ret, comments) |
def unweighed(self):
"""
This method creates a *plain* (unweighted) copy of the internal
formula. As a result, an object of class :class:`CNF` is returned.
Every clause (both hard or soft) of the WCNF formula is copied to
the ``clauses`` variable of the resulting plain formula, i.e. all
weights are discarded.
:return: an object of class :class:`CNF`.
Example:
.. code-block:: python
>>> from pysat.formula import WCNF
>>> wcnf = WCNF()
>>> wcnf.extend([[-3, 4], [5, 6]])
>>> wcnf.extend([[3], [-4], [-5], [-6]], weights=[1, 5, 3, 4])
>>>
>>> cnf = wcnf.unweighted()
>>> print cnf.clauses
[[-3, 4], [5, 6], [3], [-4], [-5], [-6]]
"""
cnf = CNF()
cnf.nv = self.nv
cnf.clauses = copy.deepcopy(self.hard) + copy.deepcopy(self.soft)
cnf.commends = self.comments[:]
return cnf | def function[unweighed, parameter[self]]:
constant[
This method creates a *plain* (unweighted) copy of the internal
formula. As a result, an object of class :class:`CNF` is returned.
Every clause (both hard or soft) of the WCNF formula is copied to
the ``clauses`` variable of the resulting plain formula, i.e. all
weights are discarded.
:return: an object of class :class:`CNF`.
Example:
.. code-block:: python
>>> from pysat.formula import WCNF
>>> wcnf = WCNF()
>>> wcnf.extend([[-3, 4], [5, 6]])
>>> wcnf.extend([[3], [-4], [-5], [-6]], weights=[1, 5, 3, 4])
>>>
>>> cnf = wcnf.unweighted()
>>> print cnf.clauses
[[-3, 4], [5, 6], [3], [-4], [-5], [-6]]
]
variable[cnf] assign[=] call[name[CNF], parameter[]]
name[cnf].nv assign[=] name[self].nv
name[cnf].clauses assign[=] binary_operation[call[name[copy].deepcopy, parameter[name[self].hard]] + call[name[copy].deepcopy, parameter[name[self].soft]]]
name[cnf].commends assign[=] call[name[self].comments][<ast.Slice object at 0x7da1b11a37c0>]
return[name[cnf]] | keyword[def] identifier[unweighed] ( identifier[self] ):
literal[string]
identifier[cnf] = identifier[CNF] ()
identifier[cnf] . identifier[nv] = identifier[self] . identifier[nv]
identifier[cnf] . identifier[clauses] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[hard] )+ identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[soft] )
identifier[cnf] . identifier[commends] = identifier[self] . identifier[comments] [:]
keyword[return] identifier[cnf] | def unweighed(self):
"""
This method creates a *plain* (unweighted) copy of the internal
formula. As a result, an object of class :class:`CNF` is returned.
Every clause (both hard or soft) of the WCNF formula is copied to
the ``clauses`` variable of the resulting plain formula, i.e. all
weights are discarded.
:return: an object of class :class:`CNF`.
Example:
.. code-block:: python
>>> from pysat.formula import WCNF
>>> wcnf = WCNF()
>>> wcnf.extend([[-3, 4], [5, 6]])
>>> wcnf.extend([[3], [-4], [-5], [-6]], weights=[1, 5, 3, 4])
>>>
>>> cnf = wcnf.unweighted()
>>> print cnf.clauses
[[-3, 4], [5, 6], [3], [-4], [-5], [-6]]
"""
cnf = CNF()
cnf.nv = self.nv
cnf.clauses = copy.deepcopy(self.hard) + copy.deepcopy(self.soft)
cnf.commends = self.comments[:]
return cnf |
def generate_server(raml_root, config):
""" Handle server generation process.
:param raml_root: Instance of ramlfications.raml.RootNode.
:param config: Pyramid Configurator instance.
"""
log.info('Server generation started')
if not raml_root.resources:
return
root_resource = config.get_root_resource()
generated_resources = {}
for raml_resource in raml_root.resources:
if raml_resource.path in generated_resources:
continue
# Get Nefertari parent resource
parent_resource = _get_nefertari_parent_resource(
raml_resource, generated_resources, root_resource)
# Get generated resource and store it
new_resource = generate_resource(
config, raml_resource, parent_resource)
if new_resource is not None:
generated_resources[raml_resource.path] = new_resource | def function[generate_server, parameter[raml_root, config]]:
constant[ Handle server generation process.
:param raml_root: Instance of ramlfications.raml.RootNode.
:param config: Pyramid Configurator instance.
]
call[name[log].info, parameter[constant[Server generation started]]]
if <ast.UnaryOp object at 0x7da20c6e49d0> begin[:]
return[None]
variable[root_resource] assign[=] call[name[config].get_root_resource, parameter[]]
variable[generated_resources] assign[=] dictionary[[], []]
for taget[name[raml_resource]] in starred[name[raml_root].resources] begin[:]
if compare[name[raml_resource].path in name[generated_resources]] begin[:]
continue
variable[parent_resource] assign[=] call[name[_get_nefertari_parent_resource], parameter[name[raml_resource], name[generated_resources], name[root_resource]]]
variable[new_resource] assign[=] call[name[generate_resource], parameter[name[config], name[raml_resource], name[parent_resource]]]
if compare[name[new_resource] is_not constant[None]] begin[:]
call[name[generated_resources]][name[raml_resource].path] assign[=] name[new_resource] | keyword[def] identifier[generate_server] ( identifier[raml_root] , identifier[config] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] )
keyword[if] keyword[not] identifier[raml_root] . identifier[resources] :
keyword[return]
identifier[root_resource] = identifier[config] . identifier[get_root_resource] ()
identifier[generated_resources] ={}
keyword[for] identifier[raml_resource] keyword[in] identifier[raml_root] . identifier[resources] :
keyword[if] identifier[raml_resource] . identifier[path] keyword[in] identifier[generated_resources] :
keyword[continue]
identifier[parent_resource] = identifier[_get_nefertari_parent_resource] (
identifier[raml_resource] , identifier[generated_resources] , identifier[root_resource] )
identifier[new_resource] = identifier[generate_resource] (
identifier[config] , identifier[raml_resource] , identifier[parent_resource] )
keyword[if] identifier[new_resource] keyword[is] keyword[not] keyword[None] :
identifier[generated_resources] [ identifier[raml_resource] . identifier[path] ]= identifier[new_resource] | def generate_server(raml_root, config):
""" Handle server generation process.
:param raml_root: Instance of ramlfications.raml.RootNode.
:param config: Pyramid Configurator instance.
"""
log.info('Server generation started')
if not raml_root.resources:
return # depends on [control=['if'], data=[]]
root_resource = config.get_root_resource()
generated_resources = {}
for raml_resource in raml_root.resources:
if raml_resource.path in generated_resources:
continue # depends on [control=['if'], data=[]]
# Get Nefertari parent resource
parent_resource = _get_nefertari_parent_resource(raml_resource, generated_resources, root_resource)
# Get generated resource and store it
new_resource = generate_resource(config, raml_resource, parent_resource)
if new_resource is not None:
generated_resources[raml_resource.path] = new_resource # depends on [control=['if'], data=['new_resource']] # depends on [control=['for'], data=['raml_resource']] |
def edit_process(self, update_request, process_type_id):
"""EditProcess.
[Preview API] Edit a process of a specific ID.
:param :class:`<UpdateProcessModel> <azure.devops.v5_0.work_item_tracking_process.models.UpdateProcessModel>` update_request:
:param str process_type_id:
:rtype: :class:`<ProcessInfo> <azure.devops.v5_0.work_item_tracking_process.models.ProcessInfo>`
"""
route_values = {}
if process_type_id is not None:
route_values['processTypeId'] = self._serialize.url('process_type_id', process_type_id, 'str')
content = self._serialize.body(update_request, 'UpdateProcessModel')
response = self._send(http_method='PATCH',
location_id='02cc6a73-5cfb-427d-8c8e-b49fb086e8af',
version='5.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('ProcessInfo', response) | def function[edit_process, parameter[self, update_request, process_type_id]]:
constant[EditProcess.
[Preview API] Edit a process of a specific ID.
:param :class:`<UpdateProcessModel> <azure.devops.v5_0.work_item_tracking_process.models.UpdateProcessModel>` update_request:
:param str process_type_id:
:rtype: :class:`<ProcessInfo> <azure.devops.v5_0.work_item_tracking_process.models.ProcessInfo>`
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[process_type_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[processTypeId]] assign[=] call[name[self]._serialize.url, parameter[constant[process_type_id], name[process_type_id], constant[str]]]
variable[content] assign[=] call[name[self]._serialize.body, parameter[name[update_request], constant[UpdateProcessModel]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[ProcessInfo], name[response]]]] | keyword[def] identifier[edit_process] ( identifier[self] , identifier[update_request] , identifier[process_type_id] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[process_type_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[process_type_id] , literal[string] )
identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[update_request] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[content] = identifier[content] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] ) | def edit_process(self, update_request, process_type_id):
"""EditProcess.
[Preview API] Edit a process of a specific ID.
:param :class:`<UpdateProcessModel> <azure.devops.v5_0.work_item_tracking_process.models.UpdateProcessModel>` update_request:
:param str process_type_id:
:rtype: :class:`<ProcessInfo> <azure.devops.v5_0.work_item_tracking_process.models.ProcessInfo>`
"""
route_values = {}
if process_type_id is not None:
route_values['processTypeId'] = self._serialize.url('process_type_id', process_type_id, 'str') # depends on [control=['if'], data=['process_type_id']]
content = self._serialize.body(update_request, 'UpdateProcessModel')
response = self._send(http_method='PATCH', location_id='02cc6a73-5cfb-427d-8c8e-b49fb086e8af', version='5.0-preview.2', route_values=route_values, content=content)
return self._deserialize('ProcessInfo', response) |
def __create_none_connections(self):
"""!
@brief Creates network without connections.
"""
if (self._conn_represent == conn_represent.MATRIX):
for _ in range(0, self._num_osc, 1):
self._osc_conn.append([False] * self._num_osc);
elif (self._conn_represent == conn_represent.LIST):
self._osc_conn = [[] for _ in range(0, self._num_osc, 1)]; | def function[__create_none_connections, parameter[self]]:
constant[!
@brief Creates network without connections.
]
if compare[name[self]._conn_represent equal[==] name[conn_represent].MATRIX] begin[:]
for taget[name[_]] in starred[call[name[range], parameter[constant[0], name[self]._num_osc, constant[1]]]] begin[:]
call[name[self]._osc_conn.append, parameter[binary_operation[list[[<ast.Constant object at 0x7da1b016edd0>]] * name[self]._num_osc]]] | keyword[def] identifier[__create_none_connections] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[_conn_represent] == identifier[conn_represent] . identifier[MATRIX] ):
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[_num_osc] , literal[int] ):
identifier[self] . identifier[_osc_conn] . identifier[append] ([ keyword[False] ]* identifier[self] . identifier[_num_osc] );
keyword[elif] ( identifier[self] . identifier[_conn_represent] == identifier[conn_represent] . identifier[LIST] ):
identifier[self] . identifier[_osc_conn] =[[] keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[_num_osc] , literal[int] )]; | def __create_none_connections(self):
"""!
@brief Creates network without connections.
"""
if self._conn_represent == conn_represent.MATRIX:
for _ in range(0, self._num_osc, 1):
self._osc_conn.append([False] * self._num_osc) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif self._conn_represent == conn_represent.LIST:
self._osc_conn = [[] for _ in range(0, self._num_osc, 1)] # depends on [control=['if'], data=[]] |
def search_dashboard_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_dashboard_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_dashboard_for_facets_with_http_info(**kwargs) # noqa: E501
return data | def function[search_dashboard_for_facets, parameter[self]]:
constant[Lists the values of one or more facets over the customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].search_dashboard_for_facets_with_http_info, parameter[]]] | keyword[def] identifier[search_dashboard_for_facets] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[search_dashboard_for_facets_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[search_dashboard_for_facets_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data] | def search_dashboard_for_facets(self, **kwargs): # noqa: E501
"Lists the values of one or more facets over the customer's non-deleted dashboards # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.search_dashboard_for_facets(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param FacetsSearchRequestContainer body:\n :return: ResponseContainerFacetsResponseContainer\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_dashboard_for_facets_with_http_info(**kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.search_dashboard_for_facets_with_http_info(**kwargs) # noqa: E501
return data |
def total_sky_cover(self, value=99.0):
"""Corresponds to IDD Field `total_sky_cover` This is the value for
total sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena at the hour indicated at the time indicated.)
Args:
value (float): value for IDD Field `total_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `total_sky_cover`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `total_sky_cover`')
if value > 10.0:
raise ValueError('value need to be smaller 10.0 '
'for field `total_sky_cover`')
self._total_sky_cover = value | def function[total_sky_cover, parameter[self, value]]:
constant[Corresponds to IDD Field `total_sky_cover` This is the value for
total sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena at the hour indicated at the time indicated.)
Args:
value (float): value for IDD Field `total_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b0fb24a0>
if compare[name[value] less[<] constant[0.0]] begin[:]
<ast.Raise object at 0x7da1b0fb0e20>
if compare[name[value] greater[>] constant[10.0]] begin[:]
<ast.Raise object at 0x7da1b0fb0580>
name[self]._total_sky_cover assign[=] name[value] | keyword[def] identifier[total_sky_cover] ( identifier[self] , identifier[value] = literal[int] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[value] = identifier[float] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[value] ))
keyword[if] identifier[value] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[value] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[self] . identifier[_total_sky_cover] = identifier[value] | def total_sky_cover(self, value=99.0):
"""Corresponds to IDD Field `total_sky_cover` This is the value for
total sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena at the hour indicated at the time indicated.)
Args:
value (float): value for IDD Field `total_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('value {} need to be of type float for field `total_sky_cover`'.format(value)) # depends on [control=['except'], data=[]]
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 for field `total_sky_cover`') # depends on [control=['if'], data=[]]
if value > 10.0:
raise ValueError('value need to be smaller 10.0 for field `total_sky_cover`') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['value']]
self._total_sky_cover = value |
def _process_response(self, response, marker_elems=None):
"""
Helper to process the xml response from AWS
"""
body = response.read()
#print body
if '<Errors>' not in body:
rs = ResultSet(marker_elems)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
else:
raise MTurkRequestError(response.status, response.reason, body) | def function[_process_response, parameter[self, response, marker_elems]]:
constant[
Helper to process the xml response from AWS
]
variable[body] assign[=] call[name[response].read, parameter[]]
if compare[constant[<Errors>] <ast.NotIn object at 0x7da2590d7190> name[body]] begin[:]
variable[rs] assign[=] call[name[ResultSet], parameter[name[marker_elems]]]
variable[h] assign[=] call[name[handler].XmlHandler, parameter[name[rs], name[self]]]
call[name[xml].sax.parseString, parameter[name[body], name[h]]]
return[name[rs]] | keyword[def] identifier[_process_response] ( identifier[self] , identifier[response] , identifier[marker_elems] = keyword[None] ):
literal[string]
identifier[body] = identifier[response] . identifier[read] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[body] :
identifier[rs] = identifier[ResultSet] ( identifier[marker_elems] )
identifier[h] = identifier[handler] . identifier[XmlHandler] ( identifier[rs] , identifier[self] )
identifier[xml] . identifier[sax] . identifier[parseString] ( identifier[body] , identifier[h] )
keyword[return] identifier[rs]
keyword[else] :
keyword[raise] identifier[MTurkRequestError] ( identifier[response] . identifier[status] , identifier[response] . identifier[reason] , identifier[body] ) | def _process_response(self, response, marker_elems=None):
"""
Helper to process the xml response from AWS
"""
body = response.read()
#print body
if '<Errors>' not in body:
rs = ResultSet(marker_elems)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs # depends on [control=['if'], data=['body']]
else:
raise MTurkRequestError(response.status, response.reason, body) |
def make_op_return_outputs(data, inputs, change_address, fee=OP_RETURN_FEE,
send_amount=0, format='bin'):
""" Builds the outputs for an OP_RETURN transaction.
"""
return [
# main output
{ "script_hex": make_op_return_script(data, format=format), "value": send_amount },
# change output
{ "script_hex": make_pay_to_address_script(change_address),
"value": calculate_change_amount(inputs, send_amount, fee)
}
] | def function[make_op_return_outputs, parameter[data, inputs, change_address, fee, send_amount, format]]:
constant[ Builds the outputs for an OP_RETURN transaction.
]
return[list[[<ast.Dict object at 0x7da1b101a560>, <ast.Dict object at 0x7da1b106cc10>]]] | keyword[def] identifier[make_op_return_outputs] ( identifier[data] , identifier[inputs] , identifier[change_address] , identifier[fee] = identifier[OP_RETURN_FEE] ,
identifier[send_amount] = literal[int] , identifier[format] = literal[string] ):
literal[string]
keyword[return] [
{ literal[string] : identifier[make_op_return_script] ( identifier[data] , identifier[format] = identifier[format] ), literal[string] : identifier[send_amount] },
{ literal[string] : identifier[make_pay_to_address_script] ( identifier[change_address] ),
literal[string] : identifier[calculate_change_amount] ( identifier[inputs] , identifier[send_amount] , identifier[fee] )
}
] | def make_op_return_outputs(data, inputs, change_address, fee=OP_RETURN_FEE, send_amount=0, format='bin'):
""" Builds the outputs for an OP_RETURN transaction.
"""
# main output
# change output
return [{'script_hex': make_op_return_script(data, format=format), 'value': send_amount}, {'script_hex': make_pay_to_address_script(change_address), 'value': calculate_change_amount(inputs, send_amount, fee)}] |
def commit(self, commit):
"""Get data for a given commit
Raises KeyError if a commit is not found or not parsed.
:param commit: repository commit
:type commit: string
"""
try:
return self._commitData(self.repo.commit(commit))
except (ValueError, KeyError, BadObject):
if self._repo_info:
raise KeyError("Commit %s not found for %s" % (commit, str(self._repo_info)))
else:
raise KeyError("Commit %s not found" % commit) | def function[commit, parameter[self, commit]]:
constant[Get data for a given commit
Raises KeyError if a commit is not found or not parsed.
:param commit: repository commit
:type commit: string
]
<ast.Try object at 0x7da2044c2ec0> | keyword[def] identifier[commit] ( identifier[self] , identifier[commit] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_commitData] ( identifier[self] . identifier[repo] . identifier[commit] ( identifier[commit] ))
keyword[except] ( identifier[ValueError] , identifier[KeyError] , identifier[BadObject] ):
keyword[if] identifier[self] . identifier[_repo_info] :
keyword[raise] identifier[KeyError] ( literal[string] %( identifier[commit] , identifier[str] ( identifier[self] . identifier[_repo_info] )))
keyword[else] :
keyword[raise] identifier[KeyError] ( literal[string] % identifier[commit] ) | def commit(self, commit):
"""Get data for a given commit
Raises KeyError if a commit is not found or not parsed.
:param commit: repository commit
:type commit: string
"""
try:
return self._commitData(self.repo.commit(commit)) # depends on [control=['try'], data=[]]
except (ValueError, KeyError, BadObject):
if self._repo_info:
raise KeyError('Commit %s not found for %s' % (commit, str(self._repo_info))) # depends on [control=['if'], data=[]]
else:
raise KeyError('Commit %s not found' % commit) # depends on [control=['except'], data=[]] |
def context(self, mapping_class=DotDictWithAcquisition):
"""return a config as a context that calls close on every item when
it goes out of scope"""
config = None
try:
config = self.get_config(mapping_class=mapping_class)
yield config
finally:
if config:
self._walk_and_close(config) | def function[context, parameter[self, mapping_class]]:
constant[return a config as a context that calls close on every item when
it goes out of scope]
variable[config] assign[=] constant[None]
<ast.Try object at 0x7da2054a4d90> | keyword[def] identifier[context] ( identifier[self] , identifier[mapping_class] = identifier[DotDictWithAcquisition] ):
literal[string]
identifier[config] = keyword[None]
keyword[try] :
identifier[config] = identifier[self] . identifier[get_config] ( identifier[mapping_class] = identifier[mapping_class] )
keyword[yield] identifier[config]
keyword[finally] :
keyword[if] identifier[config] :
identifier[self] . identifier[_walk_and_close] ( identifier[config] ) | def context(self, mapping_class=DotDictWithAcquisition):
"""return a config as a context that calls close on every item when
it goes out of scope"""
config = None
try:
config = self.get_config(mapping_class=mapping_class)
yield config # depends on [control=['try'], data=[]]
finally:
if config:
self._walk_and_close(config) # depends on [control=['if'], data=[]] |
def close(self):
"""close(self)"""
if self.isClosed:
raise ValueError("operation illegal for closed doc")
if hasattr(self, '_outline') and self._outline:
self._dropOutline(self._outline)
self._outline = None
self._reset_page_refs()
self.metadata = None
self.stream = None
self.isClosed = True
self.openErrCode = 0
self.openErrMsg = ''
self.FontInfos = []
for gmap in self.Graftmaps:
self.Graftmaps[gmap] = None
self.Graftmaps = {}
self.ShownPages = {}
val = _fitz.Document_close(self)
self.thisown = False
return val | def function[close, parameter[self]]:
constant[close(self)]
if name[self].isClosed begin[:]
<ast.Raise object at 0x7da18bcc9bd0>
if <ast.BoolOp object at 0x7da18bcca110> begin[:]
call[name[self]._dropOutline, parameter[name[self]._outline]]
name[self]._outline assign[=] constant[None]
call[name[self]._reset_page_refs, parameter[]]
name[self].metadata assign[=] constant[None]
name[self].stream assign[=] constant[None]
name[self].isClosed assign[=] constant[True]
name[self].openErrCode assign[=] constant[0]
name[self].openErrMsg assign[=] constant[]
name[self].FontInfos assign[=] list[[]]
for taget[name[gmap]] in starred[name[self].Graftmaps] begin[:]
call[name[self].Graftmaps][name[gmap]] assign[=] constant[None]
name[self].Graftmaps assign[=] dictionary[[], []]
name[self].ShownPages assign[=] dictionary[[], []]
variable[val] assign[=] call[name[_fitz].Document_close, parameter[name[self]]]
name[self].thisown assign[=] constant[False]
return[name[val]] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[isClosed] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[_outline] :
identifier[self] . identifier[_dropOutline] ( identifier[self] . identifier[_outline] )
identifier[self] . identifier[_outline] = keyword[None]
identifier[self] . identifier[_reset_page_refs] ()
identifier[self] . identifier[metadata] = keyword[None]
identifier[self] . identifier[stream] = keyword[None]
identifier[self] . identifier[isClosed] = keyword[True]
identifier[self] . identifier[openErrCode] = literal[int]
identifier[self] . identifier[openErrMsg] = literal[string]
identifier[self] . identifier[FontInfos] =[]
keyword[for] identifier[gmap] keyword[in] identifier[self] . identifier[Graftmaps] :
identifier[self] . identifier[Graftmaps] [ identifier[gmap] ]= keyword[None]
identifier[self] . identifier[Graftmaps] ={}
identifier[self] . identifier[ShownPages] ={}
identifier[val] = identifier[_fitz] . identifier[Document_close] ( identifier[self] )
identifier[self] . identifier[thisown] = keyword[False]
keyword[return] identifier[val] | def close(self):
"""close(self)"""
if self.isClosed:
raise ValueError('operation illegal for closed doc') # depends on [control=['if'], data=[]]
if hasattr(self, '_outline') and self._outline:
self._dropOutline(self._outline)
self._outline = None # depends on [control=['if'], data=[]]
self._reset_page_refs()
self.metadata = None
self.stream = None
self.isClosed = True
self.openErrCode = 0
self.openErrMsg = ''
self.FontInfos = []
for gmap in self.Graftmaps:
self.Graftmaps[gmap] = None # depends on [control=['for'], data=['gmap']]
self.Graftmaps = {}
self.ShownPages = {}
val = _fitz.Document_close(self)
self.thisown = False
return val |
def entity_types(args):
""" List entity types in a workspace """
r = fapi.list_entity_types(args.project, args.workspace)
fapi._check_response_code(r, 200)
return r.json().keys() | def function[entity_types, parameter[args]]:
constant[ List entity types in a workspace ]
variable[r] assign[=] call[name[fapi].list_entity_types, parameter[name[args].project, name[args].workspace]]
call[name[fapi]._check_response_code, parameter[name[r], constant[200]]]
return[call[call[name[r].json, parameter[]].keys, parameter[]]] | keyword[def] identifier[entity_types] ( identifier[args] ):
literal[string]
identifier[r] = identifier[fapi] . identifier[list_entity_types] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] )
identifier[fapi] . identifier[_check_response_code] ( identifier[r] , literal[int] )
keyword[return] identifier[r] . identifier[json] (). identifier[keys] () | def entity_types(args):
""" List entity types in a workspace """
r = fapi.list_entity_types(args.project, args.workspace)
fapi._check_response_code(r, 200)
return r.json().keys() |
def _get_bug_attr(bug, attr):
"""Default longdescs/flags case to [] since they may not be present."""
if attr in ("longdescs", "flags"):
return getattr(bug, attr, [])
return getattr(bug, attr) | def function[_get_bug_attr, parameter[bug, attr]]:
constant[Default longdescs/flags case to [] since they may not be present.]
if compare[name[attr] in tuple[[<ast.Constant object at 0x7da1b025ffd0>, <ast.Constant object at 0x7da1b025f040>]]] begin[:]
return[call[name[getattr], parameter[name[bug], name[attr], list[[]]]]]
return[call[name[getattr], parameter[name[bug], name[attr]]]] | keyword[def] identifier[_get_bug_attr] ( identifier[bug] , identifier[attr] ):
literal[string]
keyword[if] identifier[attr] keyword[in] ( literal[string] , literal[string] ):
keyword[return] identifier[getattr] ( identifier[bug] , identifier[attr] ,[])
keyword[return] identifier[getattr] ( identifier[bug] , identifier[attr] ) | def _get_bug_attr(bug, attr):
"""Default longdescs/flags case to [] since they may not be present."""
if attr in ('longdescs', 'flags'):
return getattr(bug, attr, []) # depends on [control=['if'], data=['attr']]
return getattr(bug, attr) |
async def message_field(self, msg, field, fvalue=None, obj=None):
"""
Dumps/Loads message field
:param msg:
:param field:
:param fvalue: explicit value for dump
:param obj:
:return:
"""
fname, ftype, params = field[0], field[1], field[2:]
try:
self.tracker.push_field(fname)
if self.writing:
fvalue = getattr(msg, fname, None) if fvalue is None else fvalue
await self._dump_field(fvalue, ftype, params)
else:
await self._load_field(ftype, params, x.eref(msg, fname))
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e | <ast.AsyncFunctionDef object at 0x7da1b244f310> | keyword[async] keyword[def] identifier[message_field] ( identifier[self] , identifier[msg] , identifier[field] , identifier[fvalue] = keyword[None] , identifier[obj] = keyword[None] ):
literal[string]
identifier[fname] , identifier[ftype] , identifier[params] = identifier[field] [ literal[int] ], identifier[field] [ literal[int] ], identifier[field] [ literal[int] :]
keyword[try] :
identifier[self] . identifier[tracker] . identifier[push_field] ( identifier[fname] )
keyword[if] identifier[self] . identifier[writing] :
identifier[fvalue] = identifier[getattr] ( identifier[msg] , identifier[fname] , keyword[None] ) keyword[if] identifier[fvalue] keyword[is] keyword[None] keyword[else] identifier[fvalue]
keyword[await] identifier[self] . identifier[_dump_field] ( identifier[fvalue] , identifier[ftype] , identifier[params] )
keyword[else] :
keyword[await] identifier[self] . identifier[_load_field] ( identifier[ftype] , identifier[params] , identifier[x] . identifier[eref] ( identifier[msg] , identifier[fname] ))
identifier[self] . identifier[tracker] . identifier[pop] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[helpers] . identifier[ArchiveException] ( identifier[e] , identifier[tracker] = identifier[self] . identifier[tracker] ) keyword[from] identifier[e] | async def message_field(self, msg, field, fvalue=None, obj=None):
"""
Dumps/Loads message field
:param msg:
:param field:
:param fvalue: explicit value for dump
:param obj:
:return:
"""
(fname, ftype, params) = (field[0], field[1], field[2:])
try:
self.tracker.push_field(fname)
if self.writing:
fvalue = getattr(msg, fname, None) if fvalue is None else fvalue
await self._dump_field(fvalue, ftype, params) # depends on [control=['if'], data=[]]
else:
await self._load_field(ftype, params, x.eref(msg, fname))
self.tracker.pop() # depends on [control=['try'], data=[]]
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e # depends on [control=['except'], data=['e']] |
def _unverified_decode(token):
"""Decodes a token and does no verification.
Args:
token (Union[str, bytes]): The encoded JWT.
Returns:
Tuple[str, str, str, str]: header, payload, signed_section, and
signature.
Raises:
ValueError: if there are an incorrect amount of segments in the token.
"""
token = _helpers.to_bytes(token)
if token.count(b'.') != 2:
raise ValueError(
'Wrong number of segments in token: {0}'.format(token))
encoded_header, encoded_payload, signature = token.split(b'.')
signed_section = encoded_header + b'.' + encoded_payload
signature = _helpers.padded_urlsafe_b64decode(signature)
# Parse segments
header = _decode_jwt_segment(encoded_header)
payload = _decode_jwt_segment(encoded_payload)
return header, payload, signed_section, signature | def function[_unverified_decode, parameter[token]]:
constant[Decodes a token and does no verification.
Args:
token (Union[str, bytes]): The encoded JWT.
Returns:
Tuple[str, str, str, str]: header, payload, signed_section, and
signature.
Raises:
ValueError: if there are an incorrect amount of segments in the token.
]
variable[token] assign[=] call[name[_helpers].to_bytes, parameter[name[token]]]
if compare[call[name[token].count, parameter[constant[b'.']]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da20c6c6d10>
<ast.Tuple object at 0x7da18dc98a00> assign[=] call[name[token].split, parameter[constant[b'.']]]
variable[signed_section] assign[=] binary_operation[binary_operation[name[encoded_header] + constant[b'.']] + name[encoded_payload]]
variable[signature] assign[=] call[name[_helpers].padded_urlsafe_b64decode, parameter[name[signature]]]
variable[header] assign[=] call[name[_decode_jwt_segment], parameter[name[encoded_header]]]
variable[payload] assign[=] call[name[_decode_jwt_segment], parameter[name[encoded_payload]]]
return[tuple[[<ast.Name object at 0x7da18dc9b190>, <ast.Name object at 0x7da18dc9a9e0>, <ast.Name object at 0x7da18dc984c0>, <ast.Name object at 0x7da18dc9aa40>]]] | keyword[def] identifier[_unverified_decode] ( identifier[token] ):
literal[string]
identifier[token] = identifier[_helpers] . identifier[to_bytes] ( identifier[token] )
keyword[if] identifier[token] . identifier[count] ( literal[string] )!= literal[int] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[token] ))
identifier[encoded_header] , identifier[encoded_payload] , identifier[signature] = identifier[token] . identifier[split] ( literal[string] )
identifier[signed_section] = identifier[encoded_header] + literal[string] + identifier[encoded_payload]
identifier[signature] = identifier[_helpers] . identifier[padded_urlsafe_b64decode] ( identifier[signature] )
identifier[header] = identifier[_decode_jwt_segment] ( identifier[encoded_header] )
identifier[payload] = identifier[_decode_jwt_segment] ( identifier[encoded_payload] )
keyword[return] identifier[header] , identifier[payload] , identifier[signed_section] , identifier[signature] | def _unverified_decode(token):
"""Decodes a token and does no verification.
Args:
token (Union[str, bytes]): The encoded JWT.
Returns:
Tuple[str, str, str, str]: header, payload, signed_section, and
signature.
Raises:
ValueError: if there are an incorrect amount of segments in the token.
"""
token = _helpers.to_bytes(token)
if token.count(b'.') != 2:
raise ValueError('Wrong number of segments in token: {0}'.format(token)) # depends on [control=['if'], data=[]]
(encoded_header, encoded_payload, signature) = token.split(b'.')
signed_section = encoded_header + b'.' + encoded_payload
signature = _helpers.padded_urlsafe_b64decode(signature)
# Parse segments
header = _decode_jwt_segment(encoded_header)
payload = _decode_jwt_segment(encoded_payload)
return (header, payload, signed_section, signature) |
def get_install_paths(name):
"""
Return the (distutils) install paths for the named dist.
A dict with ('purelib', 'platlib', 'headers', 'scripts', 'data') keys.
"""
paths = {}
i = get_install_command(name)
for key in install.SCHEME_KEYS:
paths[key] = getattr(i, 'install_' + key)
# pip uses a similar path as an alternative to the system's (read-only)
# include directory:
if hasattr(sys, 'real_prefix'): # virtualenv
paths['headers'] = os.path.join(sys.prefix,
'include',
'site',
'python' + sys.version[:3],
name)
return paths | def function[get_install_paths, parameter[name]]:
constant[
Return the (distutils) install paths for the named dist.
A dict with ('purelib', 'platlib', 'headers', 'scripts', 'data') keys.
]
variable[paths] assign[=] dictionary[[], []]
variable[i] assign[=] call[name[get_install_command], parameter[name[name]]]
for taget[name[key]] in starred[name[install].SCHEME_KEYS] begin[:]
call[name[paths]][name[key]] assign[=] call[name[getattr], parameter[name[i], binary_operation[constant[install_] + name[key]]]]
if call[name[hasattr], parameter[name[sys], constant[real_prefix]]] begin[:]
call[name[paths]][constant[headers]] assign[=] call[name[os].path.join, parameter[name[sys].prefix, constant[include], constant[site], binary_operation[constant[python] + call[name[sys].version][<ast.Slice object at 0x7da2041d9a50>]], name[name]]]
return[name[paths]] | keyword[def] identifier[get_install_paths] ( identifier[name] ):
literal[string]
identifier[paths] ={}
identifier[i] = identifier[get_install_command] ( identifier[name] )
keyword[for] identifier[key] keyword[in] identifier[install] . identifier[SCHEME_KEYS] :
identifier[paths] [ identifier[key] ]= identifier[getattr] ( identifier[i] , literal[string] + identifier[key] )
keyword[if] identifier[hasattr] ( identifier[sys] , literal[string] ):
identifier[paths] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[sys] . identifier[prefix] ,
literal[string] ,
literal[string] ,
literal[string] + identifier[sys] . identifier[version] [: literal[int] ],
identifier[name] )
keyword[return] identifier[paths] | def get_install_paths(name):
"""
Return the (distutils) install paths for the named dist.
A dict with ('purelib', 'platlib', 'headers', 'scripts', 'data') keys.
"""
paths = {}
i = get_install_command(name)
for key in install.SCHEME_KEYS:
paths[key] = getattr(i, 'install_' + key) # depends on [control=['for'], data=['key']]
# pip uses a similar path as an alternative to the system's (read-only)
# include directory:
if hasattr(sys, 'real_prefix'): # virtualenv
paths['headers'] = os.path.join(sys.prefix, 'include', 'site', 'python' + sys.version[:3], name) # depends on [control=['if'], data=[]]
return paths |
def show_firmware_version_output_show_firmware_version_build_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
output = ET.SubElement(show_firmware_version, "output")
show_firmware_version = ET.SubElement(output, "show-firmware-version")
build_time = ET.SubElement(show_firmware_version, "build-time")
build_time.text = kwargs.pop('build_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[show_firmware_version_output_show_firmware_version_build_time, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[show_firmware_version] assign[=] call[name[ET].Element, parameter[constant[show_firmware_version]]]
variable[config] assign[=] name[show_firmware_version]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[show_firmware_version], constant[output]]]
variable[show_firmware_version] assign[=] call[name[ET].SubElement, parameter[name[output], constant[show-firmware-version]]]
variable[build_time] assign[=] call[name[ET].SubElement, parameter[name[show_firmware_version], constant[build-time]]]
name[build_time].text assign[=] call[name[kwargs].pop, parameter[constant[build_time]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[show_firmware_version_output_show_firmware_version_build_time] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[show_firmware_version] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[show_firmware_version]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[show_firmware_version] , literal[string] )
identifier[show_firmware_version] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[build_time] = identifier[ET] . identifier[SubElement] ( identifier[show_firmware_version] , literal[string] )
identifier[build_time] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def show_firmware_version_output_show_firmware_version_build_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
show_firmware_version = ET.Element('show_firmware_version')
config = show_firmware_version
output = ET.SubElement(show_firmware_version, 'output')
show_firmware_version = ET.SubElement(output, 'show-firmware-version')
build_time = ET.SubElement(show_firmware_version, 'build-time')
build_time.text = kwargs.pop('build_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def create_project(self, name, description):
"""
Create a new project with the specified name and description
:param name: str: name of the project to create
:param description: str: description of the project to create
:return: Project
"""
return self._create_item_response(
self.data_service.create_project(name, description),
Project) | def function[create_project, parameter[self, name, description]]:
constant[
Create a new project with the specified name and description
:param name: str: name of the project to create
:param description: str: description of the project to create
:return: Project
]
return[call[name[self]._create_item_response, parameter[call[name[self].data_service.create_project, parameter[name[name], name[description]]], name[Project]]]] | keyword[def] identifier[create_project] ( identifier[self] , identifier[name] , identifier[description] ):
literal[string]
keyword[return] identifier[self] . identifier[_create_item_response] (
identifier[self] . identifier[data_service] . identifier[create_project] ( identifier[name] , identifier[description] ),
identifier[Project] ) | def create_project(self, name, description):
"""
Create a new project with the specified name and description
:param name: str: name of the project to create
:param description: str: description of the project to create
:return: Project
"""
return self._create_item_response(self.data_service.create_project(name, description), Project) |
def _generate_url_root(protocol, host, port):
"""
Generate API root URL without resources
:param protocol: Web protocol [HTTP | HTTPS] (string)
:param host: Hostname or IP (string)
:param port: Service port (string)
:return: ROOT url
"""
return URL_ROOT_PATTERN.format(protocol=protocol, host=host, port=port) | def function[_generate_url_root, parameter[protocol, host, port]]:
constant[
Generate API root URL without resources
:param protocol: Web protocol [HTTP | HTTPS] (string)
:param host: Hostname or IP (string)
:param port: Service port (string)
:return: ROOT url
]
return[call[name[URL_ROOT_PATTERN].format, parameter[]]] | keyword[def] identifier[_generate_url_root] ( identifier[protocol] , identifier[host] , identifier[port] ):
literal[string]
keyword[return] identifier[URL_ROOT_PATTERN] . identifier[format] ( identifier[protocol] = identifier[protocol] , identifier[host] = identifier[host] , identifier[port] = identifier[port] ) | def _generate_url_root(protocol, host, port):
"""
Generate API root URL without resources
:param protocol: Web protocol [HTTP | HTTPS] (string)
:param host: Hostname or IP (string)
:param port: Service port (string)
:return: ROOT url
"""
return URL_ROOT_PATTERN.format(protocol=protocol, host=host, port=port) |
def get_default_user_groups(self, **kwargs): # noqa: E501
"""Get default user groups customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_default_user_groups(async_req=True)
>>> result = thread.get()
:param async_req bool
:param User body:
:return: ResponseContainerListUserGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_default_user_groups_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_default_user_groups_with_http_info(**kwargs) # noqa: E501
return data | def function[get_default_user_groups, parameter[self]]:
constant[Get default user groups customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_default_user_groups(async_req=True)
>>> result = thread.get()
:param async_req bool
:param User body:
:return: ResponseContainerListUserGroup
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].get_default_user_groups_with_http_info, parameter[]]] | keyword[def] identifier[get_default_user_groups] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[get_default_user_groups_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[get_default_user_groups_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data] | def get_default_user_groups(self, **kwargs): # noqa: E501
'Get default user groups customer preferences # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_default_user_groups(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param User body:\n :return: ResponseContainerListUserGroup\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_default_user_groups_with_http_info(**kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.get_default_user_groups_with_http_info(**kwargs) # noqa: E501
return data |
def parse_market(self, market, split_char='_'):
"""
In comes the market identifier directly from the service. Returned is
the crypto and fiat identifier in moneywagon format.
"""
crypto, fiat = market.lower().split(split_char)
return (
self.fix_symbol(crypto, reverse=True),
self.fix_symbol(fiat, reverse=True)
) | def function[parse_market, parameter[self, market, split_char]]:
constant[
In comes the market identifier directly from the service. Returned is
the crypto and fiat identifier in moneywagon format.
]
<ast.Tuple object at 0x7da1b11bd030> assign[=] call[call[name[market].lower, parameter[]].split, parameter[name[split_char]]]
return[tuple[[<ast.Call object at 0x7da1b11bc070>, <ast.Call object at 0x7da1b11bcc10>]]] | keyword[def] identifier[parse_market] ( identifier[self] , identifier[market] , identifier[split_char] = literal[string] ):
literal[string]
identifier[crypto] , identifier[fiat] = identifier[market] . identifier[lower] (). identifier[split] ( identifier[split_char] )
keyword[return] (
identifier[self] . identifier[fix_symbol] ( identifier[crypto] , identifier[reverse] = keyword[True] ),
identifier[self] . identifier[fix_symbol] ( identifier[fiat] , identifier[reverse] = keyword[True] )
) | def parse_market(self, market, split_char='_'):
"""
In comes the market identifier directly from the service. Returned is
the crypto and fiat identifier in moneywagon format.
"""
(crypto, fiat) = market.lower().split(split_char)
return (self.fix_symbol(crypto, reverse=True), self.fix_symbol(fiat, reverse=True)) |
def discombobulate(self, filehash):
""" prepare napiprojekt scrambled hash """
idx = [0xe, 0x3, 0x6, 0x8, 0x2]
mul = [2, 2, 5, 4, 3]
add = [0, 0xd, 0x10, 0xb, 0x5]
b = []
for i in xrange(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(filehash[i], 16)
v = int(filehash[t:t + 2], 16)
b.append(("%x" % (v * m))[-1])
return ''.join(b) | def function[discombobulate, parameter[self, filehash]]:
constant[ prepare napiprojekt scrambled hash ]
variable[idx] assign[=] list[[<ast.Constant object at 0x7da2047e9d20>, <ast.Constant object at 0x7da2047ea1d0>, <ast.Constant object at 0x7da2047e8070>, <ast.Constant object at 0x7da2047e8b80>, <ast.Constant object at 0x7da2047ea1a0>]]
variable[mul] assign[=] list[[<ast.Constant object at 0x7da2047e8490>, <ast.Constant object at 0x7da2047e89d0>, <ast.Constant object at 0x7da2047e8280>, <ast.Constant object at 0x7da2047e9960>, <ast.Constant object at 0x7da2047ea230>]]
variable[add] assign[=] list[[<ast.Constant object at 0x7da2047ea560>, <ast.Constant object at 0x7da2047ea8f0>, <ast.Constant object at 0x7da2047ebe20>, <ast.Constant object at 0x7da2047e8e20>, <ast.Constant object at 0x7da2047e8e80>]]
variable[b] assign[=] list[[]]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[idx]]]]]] begin[:]
variable[a] assign[=] call[name[add]][name[i]]
variable[m] assign[=] call[name[mul]][name[i]]
variable[i] assign[=] call[name[idx]][name[i]]
variable[t] assign[=] binary_operation[name[a] + call[name[int], parameter[call[name[filehash]][name[i]], constant[16]]]]
variable[v] assign[=] call[name[int], parameter[call[name[filehash]][<ast.Slice object at 0x7da2047e9390>], constant[16]]]
call[name[b].append, parameter[call[binary_operation[constant[%x] <ast.Mod object at 0x7da2590d6920> binary_operation[name[v] * name[m]]]][<ast.UnaryOp object at 0x7da2047eb310>]]]
return[call[constant[].join, parameter[name[b]]]] | keyword[def] identifier[discombobulate] ( identifier[self] , identifier[filehash] ):
literal[string]
identifier[idx] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[mul] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[add] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[b] =[]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[idx] )):
identifier[a] = identifier[add] [ identifier[i] ]
identifier[m] = identifier[mul] [ identifier[i] ]
identifier[i] = identifier[idx] [ identifier[i] ]
identifier[t] = identifier[a] + identifier[int] ( identifier[filehash] [ identifier[i] ], literal[int] )
identifier[v] = identifier[int] ( identifier[filehash] [ identifier[t] : identifier[t] + literal[int] ], literal[int] )
identifier[b] . identifier[append] (( literal[string] %( identifier[v] * identifier[m] ))[- literal[int] ])
keyword[return] literal[string] . identifier[join] ( identifier[b] ) | def discombobulate(self, filehash):
""" prepare napiprojekt scrambled hash """
idx = [14, 3, 6, 8, 2]
mul = [2, 2, 5, 4, 3]
add = [0, 13, 16, 11, 5]
b = []
for i in xrange(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(filehash[i], 16)
v = int(filehash[t:t + 2], 16)
b.append(('%x' % (v * m))[-1]) # depends on [control=['for'], data=['i']]
return ''.join(b) |
def op_at_code_loc(code, loc, opc):
"""Return the instruction name at code[loc] using
opc to look up instruction names. Returns 'got IndexError'
if code[loc] is invalid.
`code` is instruction bytecode, `loc` is an offset (integer) and
`opc` is an opcode module from `xdis`.
"""
try:
op = code[loc]
except IndexError:
return 'got IndexError'
return opc.opname[op] | def function[op_at_code_loc, parameter[code, loc, opc]]:
constant[Return the instruction name at code[loc] using
opc to look up instruction names. Returns 'got IndexError'
if code[loc] is invalid.
`code` is instruction bytecode, `loc` is an offset (integer) and
`opc` is an opcode module from `xdis`.
]
<ast.Try object at 0x7da18dc99360>
return[call[name[opc].opname][name[op]]] | keyword[def] identifier[op_at_code_loc] ( identifier[code] , identifier[loc] , identifier[opc] ):
literal[string]
keyword[try] :
identifier[op] = identifier[code] [ identifier[loc] ]
keyword[except] identifier[IndexError] :
keyword[return] literal[string]
keyword[return] identifier[opc] . identifier[opname] [ identifier[op] ] | def op_at_code_loc(code, loc, opc):
"""Return the instruction name at code[loc] using
opc to look up instruction names. Returns 'got IndexError'
if code[loc] is invalid.
`code` is instruction bytecode, `loc` is an offset (integer) and
`opc` is an opcode module from `xdis`.
"""
try:
op = code[loc] # depends on [control=['try'], data=[]]
except IndexError:
return 'got IndexError' # depends on [control=['except'], data=[]]
return opc.opname[op] |
def get_mfd(self, slip, area, shear_modulus=30.0):
'''
Calculates activity rate on the fault
:param float slip:
Slip rate in mm/yr
:param fault_width:
Width of the fault (km)
:param float disp_length_ratio:
Displacement to length ratio (dimensionless)
:param float shear_modulus:
Shear modulus of the fault (GPa)
:returns:
* Minimum Magnitude (float)
* Bin width (float)
* Occurrence Rates (numpy.ndarray)
'''
# Working in Nm so convert: shear_modulus - GPa -> Nm
# area - km ** 2. -> m ** 2.
# slip - mm/yr -> m/yr
moment_rate = (shear_modulus * 1.E9) * (area * 1.E6) * (slip / 1000.)
moment_mag = _scale_moment(self.mmax, in_nm=True)
characteristic_rate = moment_rate / moment_mag
if self.sigma and (fabs(self.sigma) > 1E-5):
self.mmin = self.mmax + (self.lower_bound * self.sigma)
mag_upper = self.mmax + (self.upper_bound * self.sigma)
mag_range = np.arange(self.mmin,
mag_upper + self.bin_width,
self.bin_width)
self.occurrence_rate = characteristic_rate * (
truncnorm.cdf(mag_range + (self.bin_width / 2.),
self.lower_bound, self.upper_bound,
loc=self.mmax, scale=self.sigma) -
truncnorm.cdf(mag_range - (self.bin_width / 2.),
self.lower_bound, self.upper_bound,
loc=self.mmax, scale=self.sigma))
else:
# Returns only a single rate
self.mmin = self.mmax
self.occurrence_rate = np.array([characteristic_rate], dtype=float)
return self.mmin, self.bin_width, self.occurrence_rate | def function[get_mfd, parameter[self, slip, area, shear_modulus]]:
constant[
Calculates activity rate on the fault
:param float slip:
Slip rate in mm/yr
:param fault_width:
Width of the fault (km)
:param float disp_length_ratio:
Displacement to length ratio (dimensionless)
:param float shear_modulus:
Shear modulus of the fault (GPa)
:returns:
* Minimum Magnitude (float)
* Bin width (float)
* Occurrence Rates (numpy.ndarray)
]
variable[moment_rate] assign[=] binary_operation[binary_operation[binary_operation[name[shear_modulus] * constant[1000000000.0]] * binary_operation[name[area] * constant[1000000.0]]] * binary_operation[name[slip] / constant[1000.0]]]
variable[moment_mag] assign[=] call[name[_scale_moment], parameter[name[self].mmax]]
variable[characteristic_rate] assign[=] binary_operation[name[moment_rate] / name[moment_mag]]
if <ast.BoolOp object at 0x7da18f09dff0> begin[:]
name[self].mmin assign[=] binary_operation[name[self].mmax + binary_operation[name[self].lower_bound * name[self].sigma]]
variable[mag_upper] assign[=] binary_operation[name[self].mmax + binary_operation[name[self].upper_bound * name[self].sigma]]
variable[mag_range] assign[=] call[name[np].arange, parameter[name[self].mmin, binary_operation[name[mag_upper] + name[self].bin_width], name[self].bin_width]]
name[self].occurrence_rate assign[=] binary_operation[name[characteristic_rate] * binary_operation[call[name[truncnorm].cdf, parameter[binary_operation[name[mag_range] + binary_operation[name[self].bin_width / constant[2.0]]], name[self].lower_bound, name[self].upper_bound]] - call[name[truncnorm].cdf, parameter[binary_operation[name[mag_range] - binary_operation[name[self].bin_width / constant[2.0]]], name[self].lower_bound, name[self].upper_bound]]]]
return[tuple[[<ast.Attribute object at 0x7da1b26af3d0>, <ast.Attribute object at 0x7da1b26ae200>, <ast.Attribute object at 0x7da1b26af610>]]] | keyword[def] identifier[get_mfd] ( identifier[self] , identifier[slip] , identifier[area] , identifier[shear_modulus] = literal[int] ):
literal[string]
identifier[moment_rate] =( identifier[shear_modulus] * literal[int] )*( identifier[area] * literal[int] )*( identifier[slip] / literal[int] )
identifier[moment_mag] = identifier[_scale_moment] ( identifier[self] . identifier[mmax] , identifier[in_nm] = keyword[True] )
identifier[characteristic_rate] = identifier[moment_rate] / identifier[moment_mag]
keyword[if] identifier[self] . identifier[sigma] keyword[and] ( identifier[fabs] ( identifier[self] . identifier[sigma] )> literal[int] ):
identifier[self] . identifier[mmin] = identifier[self] . identifier[mmax] +( identifier[self] . identifier[lower_bound] * identifier[self] . identifier[sigma] )
identifier[mag_upper] = identifier[self] . identifier[mmax] +( identifier[self] . identifier[upper_bound] * identifier[self] . identifier[sigma] )
identifier[mag_range] = identifier[np] . identifier[arange] ( identifier[self] . identifier[mmin] ,
identifier[mag_upper] + identifier[self] . identifier[bin_width] ,
identifier[self] . identifier[bin_width] )
identifier[self] . identifier[occurrence_rate] = identifier[characteristic_rate] *(
identifier[truncnorm] . identifier[cdf] ( identifier[mag_range] +( identifier[self] . identifier[bin_width] / literal[int] ),
identifier[self] . identifier[lower_bound] , identifier[self] . identifier[upper_bound] ,
identifier[loc] = identifier[self] . identifier[mmax] , identifier[scale] = identifier[self] . identifier[sigma] )-
identifier[truncnorm] . identifier[cdf] ( identifier[mag_range] -( identifier[self] . identifier[bin_width] / literal[int] ),
identifier[self] . identifier[lower_bound] , identifier[self] . identifier[upper_bound] ,
identifier[loc] = identifier[self] . identifier[mmax] , identifier[scale] = identifier[self] . identifier[sigma] ))
keyword[else] :
identifier[self] . identifier[mmin] = identifier[self] . identifier[mmax]
identifier[self] . identifier[occurrence_rate] = identifier[np] . identifier[array] ([ identifier[characteristic_rate] ], identifier[dtype] = identifier[float] )
keyword[return] identifier[self] . identifier[mmin] , identifier[self] . identifier[bin_width] , identifier[self] . identifier[occurrence_rate] | def get_mfd(self, slip, area, shear_modulus=30.0):
"""
Calculates activity rate on the fault
:param float slip:
Slip rate in mm/yr
:param fault_width:
Width of the fault (km)
:param float disp_length_ratio:
Displacement to length ratio (dimensionless)
:param float shear_modulus:
Shear modulus of the fault (GPa)
:returns:
* Minimum Magnitude (float)
* Bin width (float)
* Occurrence Rates (numpy.ndarray)
"""
# Working in Nm so convert: shear_modulus - GPa -> Nm
# area - km ** 2. -> m ** 2.
# slip - mm/yr -> m/yr
moment_rate = shear_modulus * 1000000000.0 * (area * 1000000.0) * (slip / 1000.0)
moment_mag = _scale_moment(self.mmax, in_nm=True)
characteristic_rate = moment_rate / moment_mag
if self.sigma and fabs(self.sigma) > 1e-05:
self.mmin = self.mmax + self.lower_bound * self.sigma
mag_upper = self.mmax + self.upper_bound * self.sigma
mag_range = np.arange(self.mmin, mag_upper + self.bin_width, self.bin_width)
self.occurrence_rate = characteristic_rate * (truncnorm.cdf(mag_range + self.bin_width / 2.0, self.lower_bound, self.upper_bound, loc=self.mmax, scale=self.sigma) - truncnorm.cdf(mag_range - self.bin_width / 2.0, self.lower_bound, self.upper_bound, loc=self.mmax, scale=self.sigma)) # depends on [control=['if'], data=[]]
else:
# Returns only a single rate
self.mmin = self.mmax
self.occurrence_rate = np.array([characteristic_rate], dtype=float)
return (self.mmin, self.bin_width, self.occurrence_rate) |
def _render_cmd(cmd, cwd, template, saltenv='base', pillarenv=None, pillar_override=None):
'''
If template is a valid template engine, process the cmd and cwd through
that engine.
'''
if not template:
return (cmd, cwd)
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
'Attempted to render file paths with unavailable engine '
'{0}'.format(template)
)
kwargs = {}
kwargs['salt'] = __salt__
if pillarenv is not None or pillar_override is not None:
pillarenv = pillarenv or __opts__['pillarenv']
kwargs['pillar'] = _gather_pillar(pillarenv, pillar_override)
else:
kwargs['pillar'] = __pillar__
kwargs['grains'] = __grains__
kwargs['opts'] = __opts__
kwargs['saltenv'] = saltenv
def _render(contents):
# write out path to temp file
tmp_path_fn = salt.utils.files.mkstemp()
with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(contents))
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
tmp_path_fn,
to_str=True,
**kwargs
)
salt.utils.files.safe_rm(tmp_path_fn)
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
'Failed to execute cmd with error: {0}'.format(
data['data']
)
)
else:
return data['data']
cmd = _render(cmd)
cwd = _render(cwd)
return (cmd, cwd) | def function[_render_cmd, parameter[cmd, cwd, template, saltenv, pillarenv, pillar_override]]:
constant[
If template is a valid template engine, process the cmd and cwd through
that engine.
]
if <ast.UnaryOp object at 0x7da1b21874c0> begin[:]
return[tuple[[<ast.Name object at 0x7da1b2185f30>, <ast.Name object at 0x7da1b2187460>]]]
if compare[name[template] <ast.NotIn object at 0x7da2590d7190> name[salt].utils.templates.TEMPLATE_REGISTRY] begin[:]
<ast.Raise object at 0x7da1b2186f50>
variable[kwargs] assign[=] dictionary[[], []]
call[name[kwargs]][constant[salt]] assign[=] name[__salt__]
if <ast.BoolOp object at 0x7da18dc99450> begin[:]
variable[pillarenv] assign[=] <ast.BoolOp object at 0x7da18dc9a6e0>
call[name[kwargs]][constant[pillar]] assign[=] call[name[_gather_pillar], parameter[name[pillarenv], name[pillar_override]]]
call[name[kwargs]][constant[grains]] assign[=] name[__grains__]
call[name[kwargs]][constant[opts]] assign[=] name[__opts__]
call[name[kwargs]][constant[saltenv]] assign[=] name[saltenv]
def function[_render, parameter[contents]]:
variable[tmp_path_fn] assign[=] call[name[salt].utils.files.mkstemp, parameter[]]
with call[name[salt].utils.files.fopen, parameter[name[tmp_path_fn], constant[w+]]] begin[:]
call[name[fp_].write, parameter[call[name[salt].utils.stringutils.to_str, parameter[name[contents]]]]]
variable[data] assign[=] call[call[name[salt].utils.templates.TEMPLATE_REGISTRY][name[template]], parameter[name[tmp_path_fn]]]
call[name[salt].utils.files.safe_rm, parameter[name[tmp_path_fn]]]
if <ast.UnaryOp object at 0x7da1b21eca90> begin[:]
<ast.Raise object at 0x7da1b21ed120>
variable[cmd] assign[=] call[name[_render], parameter[name[cmd]]]
variable[cwd] assign[=] call[name[_render], parameter[name[cwd]]]
return[tuple[[<ast.Name object at 0x7da1b21edfc0>, <ast.Name object at 0x7da1b21ee080>]]] | keyword[def] identifier[_render_cmd] ( identifier[cmd] , identifier[cwd] , identifier[template] , identifier[saltenv] = literal[string] , identifier[pillarenv] = keyword[None] , identifier[pillar_override] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[template] :
keyword[return] ( identifier[cmd] , identifier[cwd] )
keyword[if] identifier[template] keyword[not] keyword[in] identifier[salt] . identifier[utils] . identifier[templates] . identifier[TEMPLATE_REGISTRY] :
keyword[raise] identifier[CommandExecutionError] (
literal[string]
literal[string] . identifier[format] ( identifier[template] )
)
identifier[kwargs] ={}
identifier[kwargs] [ literal[string] ]= identifier[__salt__]
keyword[if] identifier[pillarenv] keyword[is] keyword[not] keyword[None] keyword[or] identifier[pillar_override] keyword[is] keyword[not] keyword[None] :
identifier[pillarenv] = identifier[pillarenv] keyword[or] identifier[__opts__] [ literal[string] ]
identifier[kwargs] [ literal[string] ]= identifier[_gather_pillar] ( identifier[pillarenv] , identifier[pillar_override] )
keyword[else] :
identifier[kwargs] [ literal[string] ]= identifier[__pillar__]
identifier[kwargs] [ literal[string] ]= identifier[__grains__]
identifier[kwargs] [ literal[string] ]= identifier[__opts__]
identifier[kwargs] [ literal[string] ]= identifier[saltenv]
keyword[def] identifier[_render] ( identifier[contents] ):
identifier[tmp_path_fn] = identifier[salt] . identifier[utils] . identifier[files] . identifier[mkstemp] ()
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[tmp_path_fn] , literal[string] ) keyword[as] identifier[fp_] :
identifier[fp_] . identifier[write] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] ( identifier[contents] ))
identifier[data] = identifier[salt] . identifier[utils] . identifier[templates] . identifier[TEMPLATE_REGISTRY] [ identifier[template] ](
identifier[tmp_path_fn] ,
identifier[to_str] = keyword[True] ,
** identifier[kwargs]
)
identifier[salt] . identifier[utils] . identifier[files] . identifier[safe_rm] ( identifier[tmp_path_fn] )
keyword[if] keyword[not] identifier[data] [ literal[string] ]:
keyword[raise] identifier[CommandExecutionError] (
literal[string] . identifier[format] (
identifier[data] [ literal[string] ]
)
)
keyword[else] :
keyword[return] identifier[data] [ literal[string] ]
identifier[cmd] = identifier[_render] ( identifier[cmd] )
identifier[cwd] = identifier[_render] ( identifier[cwd] )
keyword[return] ( identifier[cmd] , identifier[cwd] ) | def _render_cmd(cmd, cwd, template, saltenv='base', pillarenv=None, pillar_override=None):
"""
If template is a valid template engine, process the cmd and cwd through
that engine.
"""
if not template:
return (cmd, cwd) # depends on [control=['if'], data=[]]
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError('Attempted to render file paths with unavailable engine {0}'.format(template)) # depends on [control=['if'], data=['template']]
kwargs = {}
kwargs['salt'] = __salt__
if pillarenv is not None or pillar_override is not None:
pillarenv = pillarenv or __opts__['pillarenv']
kwargs['pillar'] = _gather_pillar(pillarenv, pillar_override) # depends on [control=['if'], data=[]]
else:
kwargs['pillar'] = __pillar__
kwargs['grains'] = __grains__
kwargs['opts'] = __opts__
kwargs['saltenv'] = saltenv
def _render(contents):
# write out path to temp file
tmp_path_fn = salt.utils.files.mkstemp()
with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(contents)) # depends on [control=['with'], data=['fp_']]
data = salt.utils.templates.TEMPLATE_REGISTRY[template](tmp_path_fn, to_str=True, **kwargs)
salt.utils.files.safe_rm(tmp_path_fn)
if not data['result']:
# Failed to render the template
raise CommandExecutionError('Failed to execute cmd with error: {0}'.format(data['data'])) # depends on [control=['if'], data=[]]
else:
return data['data']
cmd = _render(cmd)
cwd = _render(cwd)
return (cmd, cwd) |
def file_exists(self, fid):
"""Checks if file with provided fid exists
Args:
**fid**: File identifier <volume_id>,<file_name_hash>
Returns:
True if file exists. False if not.
"""
res = self.get_file_size(fid)
if res is not None:
return True
return False | def function[file_exists, parameter[self, fid]]:
constant[Checks if file with provided fid exists
Args:
**fid**: File identifier <volume_id>,<file_name_hash>
Returns:
True if file exists. False if not.
]
variable[res] assign[=] call[name[self].get_file_size, parameter[name[fid]]]
if compare[name[res] is_not constant[None]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[file_exists] ( identifier[self] , identifier[fid] ):
literal[string]
identifier[res] = identifier[self] . identifier[get_file_size] ( identifier[fid] )
keyword[if] identifier[res] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def file_exists(self, fid):
"""Checks if file with provided fid exists
Args:
**fid**: File identifier <volume_id>,<file_name_hash>
Returns:
True if file exists. False if not.
"""
res = self.get_file_size(fid)
if res is not None:
return True # depends on [control=['if'], data=[]]
return False |
def prefix_indent(prefix, textblock, later_prefix=' '):
"""
Prefix and indent all lines in *textblock*.
*prefix* is a prefix string
*later_prefix* is used on all but the first line, if it is a single character
it will be repeated to match length of *prefix*
"""
textblock = textblock.split('\n')
line = prefix + textblock[0] + '\n'
if len(later_prefix) == 1:
later_prefix = ' '*len(prefix)
line = line + '\n'.join([later_prefix + x for x in textblock[1:]])
if line[-1] != '\n':
return line + '\n'
else:
return line | def function[prefix_indent, parameter[prefix, textblock, later_prefix]]:
constant[
Prefix and indent all lines in *textblock*.
*prefix* is a prefix string
*later_prefix* is used on all but the first line, if it is a single character
it will be repeated to match length of *prefix*
]
variable[textblock] assign[=] call[name[textblock].split, parameter[constant[
]]]
variable[line] assign[=] binary_operation[binary_operation[name[prefix] + call[name[textblock]][constant[0]]] + constant[
]]
if compare[call[name[len], parameter[name[later_prefix]]] equal[==] constant[1]] begin[:]
variable[later_prefix] assign[=] binary_operation[constant[ ] * call[name[len], parameter[name[prefix]]]]
variable[line] assign[=] binary_operation[name[line] + call[constant[
].join, parameter[<ast.ListComp object at 0x7da18bc70310>]]]
if compare[call[name[line]][<ast.UnaryOp object at 0x7da18bc73670>] not_equal[!=] constant[
]] begin[:]
return[binary_operation[name[line] + constant[
]]] | keyword[def] identifier[prefix_indent] ( identifier[prefix] , identifier[textblock] , identifier[later_prefix] = literal[string] ):
literal[string]
identifier[textblock] = identifier[textblock] . identifier[split] ( literal[string] )
identifier[line] = identifier[prefix] + identifier[textblock] [ literal[int] ]+ literal[string]
keyword[if] identifier[len] ( identifier[later_prefix] )== literal[int] :
identifier[later_prefix] = literal[string] * identifier[len] ( identifier[prefix] )
identifier[line] = identifier[line] + literal[string] . identifier[join] ([ identifier[later_prefix] + identifier[x] keyword[for] identifier[x] keyword[in] identifier[textblock] [ literal[int] :]])
keyword[if] identifier[line] [- literal[int] ]!= literal[string] :
keyword[return] identifier[line] + literal[string]
keyword[else] :
keyword[return] identifier[line] | def prefix_indent(prefix, textblock, later_prefix=' '):
"""
Prefix and indent all lines in *textblock*.
*prefix* is a prefix string
*later_prefix* is used on all but the first line, if it is a single character
it will be repeated to match length of *prefix*
"""
textblock = textblock.split('\n')
line = prefix + textblock[0] + '\n'
if len(later_prefix) == 1:
later_prefix = ' ' * len(prefix) # depends on [control=['if'], data=[]]
line = line + '\n'.join([later_prefix + x for x in textblock[1:]])
if line[-1] != '\n':
return line + '\n' # depends on [control=['if'], data=[]]
else:
return line |
def _get_row_amplitudes(self):
"""
Perform a real Discrete Fourier Transform (DFT; implemented
using a Fast Fourier Transform algorithm, FFT) of the current
sample from the signal multiplied by the smoothing window.
See numpy.rfft for information about the Fourier transform.
"""
sample_rate = self.signal.sample_rate
# A signal window *must* span one sample rate, irrespective of interval length.
signal_window = tile(self.signal(), ceil(1.0/self.signal.interval_length))
if self.windowing_function == None:
smoothed_window = signal_window[0:sample_rate]
else:
smoothed_window = signal_window[0:sample_rate] * self.windowing_function(sample_rate)
row_amplitudes = abs(fft.rfft(smoothed_window))[0:sample_rate/2]
row_amplitudes = row_amplitudes.reshape(1,sample_rate/2.0)
filter_responses = multiply(self.cochlear_channels, row_amplitudes)
sheet_responses = zeros(self._num_of_channels)
for channel in range(0,self._num_of_channels):
time_responses = abs(fft.ifft(filter_responses[channel]))
sheet_responses[channel] = sum(time_responses) / (sample_rate/2.0)
return sheet_responses.reshape(self._num_of_channels, 1) | def function[_get_row_amplitudes, parameter[self]]:
constant[
Perform a real Discrete Fourier Transform (DFT; implemented
using a Fast Fourier Transform algorithm, FFT) of the current
sample from the signal multiplied by the smoothing window.
See numpy.rfft for information about the Fourier transform.
]
variable[sample_rate] assign[=] name[self].signal.sample_rate
variable[signal_window] assign[=] call[name[tile], parameter[call[name[self].signal, parameter[]], call[name[ceil], parameter[binary_operation[constant[1.0] / name[self].signal.interval_length]]]]]
if compare[name[self].windowing_function equal[==] constant[None]] begin[:]
variable[smoothed_window] assign[=] call[name[signal_window]][<ast.Slice object at 0x7da1b2564640>]
variable[row_amplitudes] assign[=] call[call[name[abs], parameter[call[name[fft].rfft, parameter[name[smoothed_window]]]]]][<ast.Slice object at 0x7da1b2564df0>]
variable[row_amplitudes] assign[=] call[name[row_amplitudes].reshape, parameter[constant[1], binary_operation[name[sample_rate] / constant[2.0]]]]
variable[filter_responses] assign[=] call[name[multiply], parameter[name[self].cochlear_channels, name[row_amplitudes]]]
variable[sheet_responses] assign[=] call[name[zeros], parameter[name[self]._num_of_channels]]
for taget[name[channel]] in starred[call[name[range], parameter[constant[0], name[self]._num_of_channels]]] begin[:]
variable[time_responses] assign[=] call[name[abs], parameter[call[name[fft].ifft, parameter[call[name[filter_responses]][name[channel]]]]]]
call[name[sheet_responses]][name[channel]] assign[=] binary_operation[call[name[sum], parameter[name[time_responses]]] / binary_operation[name[sample_rate] / constant[2.0]]]
return[call[name[sheet_responses].reshape, parameter[name[self]._num_of_channels, constant[1]]]] | keyword[def] identifier[_get_row_amplitudes] ( identifier[self] ):
literal[string]
identifier[sample_rate] = identifier[self] . identifier[signal] . identifier[sample_rate]
identifier[signal_window] = identifier[tile] ( identifier[self] . identifier[signal] (), identifier[ceil] ( literal[int] / identifier[self] . identifier[signal] . identifier[interval_length] ))
keyword[if] identifier[self] . identifier[windowing_function] == keyword[None] :
identifier[smoothed_window] = identifier[signal_window] [ literal[int] : identifier[sample_rate] ]
keyword[else] :
identifier[smoothed_window] = identifier[signal_window] [ literal[int] : identifier[sample_rate] ]* identifier[self] . identifier[windowing_function] ( identifier[sample_rate] )
identifier[row_amplitudes] = identifier[abs] ( identifier[fft] . identifier[rfft] ( identifier[smoothed_window] ))[ literal[int] : identifier[sample_rate] / literal[int] ]
identifier[row_amplitudes] = identifier[row_amplitudes] . identifier[reshape] ( literal[int] , identifier[sample_rate] / literal[int] )
identifier[filter_responses] = identifier[multiply] ( identifier[self] . identifier[cochlear_channels] , identifier[row_amplitudes] )
identifier[sheet_responses] = identifier[zeros] ( identifier[self] . identifier[_num_of_channels] )
keyword[for] identifier[channel] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[_num_of_channels] ):
identifier[time_responses] = identifier[abs] ( identifier[fft] . identifier[ifft] ( identifier[filter_responses] [ identifier[channel] ]))
identifier[sheet_responses] [ identifier[channel] ]= identifier[sum] ( identifier[time_responses] )/( identifier[sample_rate] / literal[int] )
keyword[return] identifier[sheet_responses] . identifier[reshape] ( identifier[self] . identifier[_num_of_channels] , literal[int] ) | def _get_row_amplitudes(self):
"""
Perform a real Discrete Fourier Transform (DFT; implemented
using a Fast Fourier Transform algorithm, FFT) of the current
sample from the signal multiplied by the smoothing window.
See numpy.rfft for information about the Fourier transform.
"""
sample_rate = self.signal.sample_rate
# A signal window *must* span one sample rate, irrespective of interval length.
signal_window = tile(self.signal(), ceil(1.0 / self.signal.interval_length))
if self.windowing_function == None:
smoothed_window = signal_window[0:sample_rate] # depends on [control=['if'], data=[]]
else:
smoothed_window = signal_window[0:sample_rate] * self.windowing_function(sample_rate)
row_amplitudes = abs(fft.rfft(smoothed_window))[0:sample_rate / 2]
row_amplitudes = row_amplitudes.reshape(1, sample_rate / 2.0)
filter_responses = multiply(self.cochlear_channels, row_amplitudes)
sheet_responses = zeros(self._num_of_channels)
for channel in range(0, self._num_of_channels):
time_responses = abs(fft.ifft(filter_responses[channel]))
sheet_responses[channel] = sum(time_responses) / (sample_rate / 2.0) # depends on [control=['for'], data=['channel']]
return sheet_responses.reshape(self._num_of_channels, 1) |
def pca(onarray, offarray, n=10, exchs=None, pc=False, mode='mean'):
"""Apply Principal Component Analysis (PCA) method to estimate baselines at each time.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
n (int): The number of pricipal components.
pc (bool): When True, this function also returns eigen vectors and their coefficients.
mode (None or str): The way of correcting offsets.
'mean': Mean.
'median': Median.
None: No correction.
Returns:
filtered (decode.array): Baseline-subtracted array.
When pc is True:
Ps (list(np.ndarray)): Eigen vectors.
Cs (list(np.ndarray)): Coefficients.
"""
logger = getLogger('decode.models.pca')
logger.info('n_components exchs mode')
if exchs is None:
exchs = [16, 44, 46]
logger.info('{} {} {}'.format(n, exchs, mode))
offid = np.unique(offarray.scanid)
onid = np.unique(onarray.scanid)
onarray = onarray.copy() # Xarray
onarray[:, exchs] = 0
onvalues = onarray.values
onscanid = onarray.scanid.values
offarray = offarray.copy() # Xarray
offarray[:, exchs] = 0
offvalues = offarray.values
offscanid = offarray.scanid.values
Ps, Cs = [], []
Xatm = dc.full_like(onarray, onarray)
Xatmvalues = Xatm.values
model = TruncatedSVD(n_components=n)
for i in onid:
leftid = np.searchsorted(offid, i) - 1
rightid = np.searchsorted(offid, i)
Xon = onvalues[onscanid == i]
if leftid == -1:
Xoff = offvalues[offscanid == offid[rightid]]
Xoff_m = getattr(np, 'nan'+mode)(Xoff, axis=0) if mode is not None else 0
Xon_m = Xoff_m
model.fit(Xoff - Xoff_m)
elif rightid == len(offid):
Xoff = offvalues[offscanid == offid[leftid]]
Xoff_m = getattr(np, 'nan'+mode)(Xoff, axis=0) if mode is not None else 0
Xon_m = Xoff_m
model.fit(Xoff - Xoff_m)
else:
Xoff_l = offvalues[offscanid == offid[leftid]]
Xoff_lm = getattr(np, 'nan'+mode)(Xoff_l, axis=0) if mode is not None else 0
Xoff_r = offvalues[offscanid == offid[rightid]]
Xoff_rm = getattr(np, 'nan'+mode)(Xoff_r, axis=0) if mode is not None else 0
Xon_m = getattr(np, 'nan'+mode)(np.vstack([Xoff_l, Xoff_r]), axis=0) if mode is not None else 0
model.fit(np.vstack([Xoff_l - Xoff_lm, Xoff_r - Xoff_rm]))
P = model.components_
C = model.transform(Xon - Xon_m)
Xatmvalues[onscanid == i] = C @ P + Xon_m
# Xatms.append(dc.full_like(Xon, C @ P + Xon_m.values))
Ps.append(P)
Cs.append(C)
if pc:
return Xatm, Ps, Cs
else:
return Xatm | def function[pca, parameter[onarray, offarray, n, exchs, pc, mode]]:
constant[Apply Principal Component Analysis (PCA) method to estimate baselines at each time.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
n (int): The number of pricipal components.
pc (bool): When True, this function also returns eigen vectors and their coefficients.
mode (None or str): The way of correcting offsets.
'mean': Mean.
'median': Median.
None: No correction.
Returns:
filtered (decode.array): Baseline-subtracted array.
When pc is True:
Ps (list(np.ndarray)): Eigen vectors.
Cs (list(np.ndarray)): Coefficients.
]
variable[logger] assign[=] call[name[getLogger], parameter[constant[decode.models.pca]]]
call[name[logger].info, parameter[constant[n_components exchs mode]]]
if compare[name[exchs] is constant[None]] begin[:]
variable[exchs] assign[=] list[[<ast.Constant object at 0x7da20e961150>, <ast.Constant object at 0x7da20e962b90>, <ast.Constant object at 0x7da20e960610>]]
call[name[logger].info, parameter[call[constant[{} {} {}].format, parameter[name[n], name[exchs], name[mode]]]]]
variable[offid] assign[=] call[name[np].unique, parameter[name[offarray].scanid]]
variable[onid] assign[=] call[name[np].unique, parameter[name[onarray].scanid]]
variable[onarray] assign[=] call[name[onarray].copy, parameter[]]
call[name[onarray]][tuple[[<ast.Slice object at 0x7da20e960520>, <ast.Name object at 0x7da20e960ca0>]]] assign[=] constant[0]
variable[onvalues] assign[=] name[onarray].values
variable[onscanid] assign[=] name[onarray].scanid.values
variable[offarray] assign[=] call[name[offarray].copy, parameter[]]
call[name[offarray]][tuple[[<ast.Slice object at 0x7da20e962110>, <ast.Name object at 0x7da20e963250>]]] assign[=] constant[0]
variable[offvalues] assign[=] name[offarray].values
variable[offscanid] assign[=] name[offarray].scanid.values
<ast.Tuple object at 0x7da20e961390> assign[=] tuple[[<ast.List object at 0x7da20e961330>, <ast.List object at 0x7da20e962fe0>]]
variable[Xatm] assign[=] call[name[dc].full_like, parameter[name[onarray], name[onarray]]]
variable[Xatmvalues] assign[=] name[Xatm].values
variable[model] assign[=] call[name[TruncatedSVD], parameter[]]
for taget[name[i]] in starred[name[onid]] begin[:]
variable[leftid] assign[=] binary_operation[call[name[np].searchsorted, parameter[name[offid], name[i]]] - constant[1]]
variable[rightid] assign[=] call[name[np].searchsorted, parameter[name[offid], name[i]]]
variable[Xon] assign[=] call[name[onvalues]][compare[name[onscanid] equal[==] name[i]]]
if compare[name[leftid] equal[==] <ast.UnaryOp object at 0x7da20e9b1f90>] begin[:]
variable[Xoff] assign[=] call[name[offvalues]][compare[name[offscanid] equal[==] call[name[offid]][name[rightid]]]]
variable[Xoff_m] assign[=] <ast.IfExp object at 0x7da20e9b3a90>
variable[Xon_m] assign[=] name[Xoff_m]
call[name[model].fit, parameter[binary_operation[name[Xoff] - name[Xoff_m]]]]
variable[P] assign[=] name[model].components_
variable[C] assign[=] call[name[model].transform, parameter[binary_operation[name[Xon] - name[Xon_m]]]]
call[name[Xatmvalues]][compare[name[onscanid] equal[==] name[i]]] assign[=] binary_operation[binary_operation[name[C] <ast.MatMult object at 0x7da2590d6860> name[P]] + name[Xon_m]]
call[name[Ps].append, parameter[name[P]]]
call[name[Cs].append, parameter[name[C]]]
if name[pc] begin[:]
return[tuple[[<ast.Name object at 0x7da20c7cac50>, <ast.Name object at 0x7da20c7c84c0>, <ast.Name object at 0x7da20c7ca830>]]] | keyword[def] identifier[pca] ( identifier[onarray] , identifier[offarray] , identifier[n] = literal[int] , identifier[exchs] = keyword[None] , identifier[pc] = keyword[False] , identifier[mode] = literal[string] ):
literal[string]
identifier[logger] = identifier[getLogger] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] )
keyword[if] identifier[exchs] keyword[is] keyword[None] :
identifier[exchs] =[ literal[int] , literal[int] , literal[int] ]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[n] , identifier[exchs] , identifier[mode] ))
identifier[offid] = identifier[np] . identifier[unique] ( identifier[offarray] . identifier[scanid] )
identifier[onid] = identifier[np] . identifier[unique] ( identifier[onarray] . identifier[scanid] )
identifier[onarray] = identifier[onarray] . identifier[copy] ()
identifier[onarray] [:, identifier[exchs] ]= literal[int]
identifier[onvalues] = identifier[onarray] . identifier[values]
identifier[onscanid] = identifier[onarray] . identifier[scanid] . identifier[values]
identifier[offarray] = identifier[offarray] . identifier[copy] ()
identifier[offarray] [:, identifier[exchs] ]= literal[int]
identifier[offvalues] = identifier[offarray] . identifier[values]
identifier[offscanid] = identifier[offarray] . identifier[scanid] . identifier[values]
identifier[Ps] , identifier[Cs] =[],[]
identifier[Xatm] = identifier[dc] . identifier[full_like] ( identifier[onarray] , identifier[onarray] )
identifier[Xatmvalues] = identifier[Xatm] . identifier[values]
identifier[model] = identifier[TruncatedSVD] ( identifier[n_components] = identifier[n] )
keyword[for] identifier[i] keyword[in] identifier[onid] :
identifier[leftid] = identifier[np] . identifier[searchsorted] ( identifier[offid] , identifier[i] )- literal[int]
identifier[rightid] = identifier[np] . identifier[searchsorted] ( identifier[offid] , identifier[i] )
identifier[Xon] = identifier[onvalues] [ identifier[onscanid] == identifier[i] ]
keyword[if] identifier[leftid] ==- literal[int] :
identifier[Xoff] = identifier[offvalues] [ identifier[offscanid] == identifier[offid] [ identifier[rightid] ]]
identifier[Xoff_m] = identifier[getattr] ( identifier[np] , literal[string] + identifier[mode] )( identifier[Xoff] , identifier[axis] = literal[int] ) keyword[if] identifier[mode] keyword[is] keyword[not] keyword[None] keyword[else] literal[int]
identifier[Xon_m] = identifier[Xoff_m]
identifier[model] . identifier[fit] ( identifier[Xoff] - identifier[Xoff_m] )
keyword[elif] identifier[rightid] == identifier[len] ( identifier[offid] ):
identifier[Xoff] = identifier[offvalues] [ identifier[offscanid] == identifier[offid] [ identifier[leftid] ]]
identifier[Xoff_m] = identifier[getattr] ( identifier[np] , literal[string] + identifier[mode] )( identifier[Xoff] , identifier[axis] = literal[int] ) keyword[if] identifier[mode] keyword[is] keyword[not] keyword[None] keyword[else] literal[int]
identifier[Xon_m] = identifier[Xoff_m]
identifier[model] . identifier[fit] ( identifier[Xoff] - identifier[Xoff_m] )
keyword[else] :
identifier[Xoff_l] = identifier[offvalues] [ identifier[offscanid] == identifier[offid] [ identifier[leftid] ]]
identifier[Xoff_lm] = identifier[getattr] ( identifier[np] , literal[string] + identifier[mode] )( identifier[Xoff_l] , identifier[axis] = literal[int] ) keyword[if] identifier[mode] keyword[is] keyword[not] keyword[None] keyword[else] literal[int]
identifier[Xoff_r] = identifier[offvalues] [ identifier[offscanid] == identifier[offid] [ identifier[rightid] ]]
identifier[Xoff_rm] = identifier[getattr] ( identifier[np] , literal[string] + identifier[mode] )( identifier[Xoff_r] , identifier[axis] = literal[int] ) keyword[if] identifier[mode] keyword[is] keyword[not] keyword[None] keyword[else] literal[int]
identifier[Xon_m] = identifier[getattr] ( identifier[np] , literal[string] + identifier[mode] )( identifier[np] . identifier[vstack] ([ identifier[Xoff_l] , identifier[Xoff_r] ]), identifier[axis] = literal[int] ) keyword[if] identifier[mode] keyword[is] keyword[not] keyword[None] keyword[else] literal[int]
identifier[model] . identifier[fit] ( identifier[np] . identifier[vstack] ([ identifier[Xoff_l] - identifier[Xoff_lm] , identifier[Xoff_r] - identifier[Xoff_rm] ]))
identifier[P] = identifier[model] . identifier[components_]
identifier[C] = identifier[model] . identifier[transform] ( identifier[Xon] - identifier[Xon_m] )
identifier[Xatmvalues] [ identifier[onscanid] == identifier[i] ]= identifier[C] @ identifier[P] + identifier[Xon_m]
identifier[Ps] . identifier[append] ( identifier[P] )
identifier[Cs] . identifier[append] ( identifier[C] )
keyword[if] identifier[pc] :
keyword[return] identifier[Xatm] , identifier[Ps] , identifier[Cs]
keyword[else] :
keyword[return] identifier[Xatm] | def pca(onarray, offarray, n=10, exchs=None, pc=False, mode='mean'):
"""Apply Principal Component Analysis (PCA) method to estimate baselines at each time.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
n (int): The number of pricipal components.
pc (bool): When True, this function also returns eigen vectors and their coefficients.
mode (None or str): The way of correcting offsets.
'mean': Mean.
'median': Median.
None: No correction.
Returns:
filtered (decode.array): Baseline-subtracted array.
When pc is True:
Ps (list(np.ndarray)): Eigen vectors.
Cs (list(np.ndarray)): Coefficients.
"""
logger = getLogger('decode.models.pca')
logger.info('n_components exchs mode')
if exchs is None:
exchs = [16, 44, 46] # depends on [control=['if'], data=['exchs']]
logger.info('{} {} {}'.format(n, exchs, mode))
offid = np.unique(offarray.scanid)
onid = np.unique(onarray.scanid)
onarray = onarray.copy() # Xarray
onarray[:, exchs] = 0
onvalues = onarray.values
onscanid = onarray.scanid.values
offarray = offarray.copy() # Xarray
offarray[:, exchs] = 0
offvalues = offarray.values
offscanid = offarray.scanid.values
(Ps, Cs) = ([], [])
Xatm = dc.full_like(onarray, onarray)
Xatmvalues = Xatm.values
model = TruncatedSVD(n_components=n)
for i in onid:
leftid = np.searchsorted(offid, i) - 1
rightid = np.searchsorted(offid, i)
Xon = onvalues[onscanid == i]
if leftid == -1:
Xoff = offvalues[offscanid == offid[rightid]]
Xoff_m = getattr(np, 'nan' + mode)(Xoff, axis=0) if mode is not None else 0
Xon_m = Xoff_m
model.fit(Xoff - Xoff_m) # depends on [control=['if'], data=[]]
elif rightid == len(offid):
Xoff = offvalues[offscanid == offid[leftid]]
Xoff_m = getattr(np, 'nan' + mode)(Xoff, axis=0) if mode is not None else 0
Xon_m = Xoff_m
model.fit(Xoff - Xoff_m) # depends on [control=['if'], data=[]]
else:
Xoff_l = offvalues[offscanid == offid[leftid]]
Xoff_lm = getattr(np, 'nan' + mode)(Xoff_l, axis=0) if mode is not None else 0
Xoff_r = offvalues[offscanid == offid[rightid]]
Xoff_rm = getattr(np, 'nan' + mode)(Xoff_r, axis=0) if mode is not None else 0
Xon_m = getattr(np, 'nan' + mode)(np.vstack([Xoff_l, Xoff_r]), axis=0) if mode is not None else 0
model.fit(np.vstack([Xoff_l - Xoff_lm, Xoff_r - Xoff_rm]))
P = model.components_
C = model.transform(Xon - Xon_m)
Xatmvalues[onscanid == i] = C @ P + Xon_m
# Xatms.append(dc.full_like(Xon, C @ P + Xon_m.values))
Ps.append(P)
Cs.append(C) # depends on [control=['for'], data=['i']]
if pc:
return (Xatm, Ps, Cs) # depends on [control=['if'], data=[]]
else:
return Xatm |
def _buildStartOpts(self, streamUrl, playList=False):
""" Builds the options to pass to subprocess."""
#opts = [self.PLAYER_CMD, "-Irc", "--quiet", streamUrl]
opts = [self.PLAYER_CMD, "-Irc", "-vv", streamUrl]
return opts | def function[_buildStartOpts, parameter[self, streamUrl, playList]]:
constant[ Builds the options to pass to subprocess.]
variable[opts] assign[=] list[[<ast.Attribute object at 0x7da1b1116470>, <ast.Constant object at 0x7da1b1117a90>, <ast.Constant object at 0x7da1b11179a0>, <ast.Name object at 0x7da1b1116410>]]
return[name[opts]] | keyword[def] identifier[_buildStartOpts] ( identifier[self] , identifier[streamUrl] , identifier[playList] = keyword[False] ):
literal[string]
identifier[opts] =[ identifier[self] . identifier[PLAYER_CMD] , literal[string] , literal[string] , identifier[streamUrl] ]
keyword[return] identifier[opts] | def _buildStartOpts(self, streamUrl, playList=False):
""" Builds the options to pass to subprocess."""
#opts = [self.PLAYER_CMD, "-Irc", "--quiet", streamUrl]
opts = [self.PLAYER_CMD, '-Irc', '-vv', streamUrl]
return opts |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncStreamContext for this SyncStreamInstance
:rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext
"""
if self._context is None:
self._context = SyncStreamContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context | def function[_proxy, parameter[self]]:
constant[
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncStreamContext for this SyncStreamInstance
:rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext
]
if compare[name[self]._context is constant[None]] begin[:]
name[self]._context assign[=] call[name[SyncStreamContext], parameter[name[self]._version]]
return[name[self]._context] | keyword[def] identifier[_proxy] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_context] keyword[is] keyword[None] :
identifier[self] . identifier[_context] = identifier[SyncStreamContext] (
identifier[self] . identifier[_version] ,
identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[sid] = identifier[self] . identifier[_solution] [ literal[string] ],
)
keyword[return] identifier[self] . identifier[_context] | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncStreamContext for this SyncStreamInstance
:rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext
"""
if self._context is None:
self._context = SyncStreamContext(self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid']) # depends on [control=['if'], data=[]]
return self._context |
def standard_suggestions(self):
"""Standard generic suggestions.
:return: List of standard suggestions for users who encounter errors.
:rtype: BulletedList
"""
suggestions = BulletedList()
suggestions.add(
'Check that you have the latest version of InaSAFE installed '
'- you may have encountered a bug that is fixed in a '
'subsequent release.')
suggestions.add(
'Check the InaSAFE documentation to see if you are trying to '
'do something unsupported.')
suggestions.add(
'Report the problem using the issue tracker at '
'https://github.com/inasafe/inasafe/issues. Reporting an issue '
'requires that you first create a free account at '
'http://github.com. When you report the issue, '
'please copy and paste the complete contents of this panel '
'into the issue to ensure the best possible chance of getting '
'your issue resolved.')
suggestions.add(
'Try contacting one of the InaSAFE development team by '
'sending an email to info@inasafe.org. Please ensure that you '
'copy and paste the complete contents of this panel into the '
'email.')
return suggestions | def function[standard_suggestions, parameter[self]]:
constant[Standard generic suggestions.
:return: List of standard suggestions for users who encounter errors.
:rtype: BulletedList
]
variable[suggestions] assign[=] call[name[BulletedList], parameter[]]
call[name[suggestions].add, parameter[constant[Check that you have the latest version of InaSAFE installed - you may have encountered a bug that is fixed in a subsequent release.]]]
call[name[suggestions].add, parameter[constant[Check the InaSAFE documentation to see if you are trying to do something unsupported.]]]
call[name[suggestions].add, parameter[constant[Report the problem using the issue tracker at https://github.com/inasafe/inasafe/issues. Reporting an issue requires that you first create a free account at http://github.com. When you report the issue, please copy and paste the complete contents of this panel into the issue to ensure the best possible chance of getting your issue resolved.]]]
call[name[suggestions].add, parameter[constant[Try contacting one of the InaSAFE development team by sending an email to info@inasafe.org. Please ensure that you copy and paste the complete contents of this panel into the email.]]]
return[name[suggestions]] | keyword[def] identifier[standard_suggestions] ( identifier[self] ):
literal[string]
identifier[suggestions] = identifier[BulletedList] ()
identifier[suggestions] . identifier[add] (
literal[string]
literal[string]
literal[string] )
identifier[suggestions] . identifier[add] (
literal[string]
literal[string] )
identifier[suggestions] . identifier[add] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[suggestions] . identifier[add] (
literal[string]
literal[string]
literal[string]
literal[string] )
keyword[return] identifier[suggestions] | def standard_suggestions(self):
"""Standard generic suggestions.
:return: List of standard suggestions for users who encounter errors.
:rtype: BulletedList
"""
suggestions = BulletedList()
suggestions.add('Check that you have the latest version of InaSAFE installed - you may have encountered a bug that is fixed in a subsequent release.')
suggestions.add('Check the InaSAFE documentation to see if you are trying to do something unsupported.')
suggestions.add('Report the problem using the issue tracker at https://github.com/inasafe/inasafe/issues. Reporting an issue requires that you first create a free account at http://github.com. When you report the issue, please copy and paste the complete contents of this panel into the issue to ensure the best possible chance of getting your issue resolved.')
suggestions.add('Try contacting one of the InaSAFE development team by sending an email to info@inasafe.org. Please ensure that you copy and paste the complete contents of this panel into the email.')
return suggestions |
def write_vars(self,fh):
"""
Write the variable (macro) options and arguments to the DAG file
descriptor.
@param fh: descriptor of open DAG file.
"""
if self.__macros.keys() or self.__opts.keys() or self.__args:
fh.write( 'VARS ' + self.__name )
for k in self.__macros.keys():
fh.write( ' ' + str(k) + '="' + str(self.__macros[k]) + '"' )
for k in self.__opts.keys():
fh.write( ' ' + str(k) + '="' + str(self.__opts[k]) + '"' )
if self.__args:
for i in range(self.__arg_index):
fh.write( ' macroargument' + str(i) + '="' + self.__args[i] + '"' )
fh.write( '\n' ) | def function[write_vars, parameter[self, fh]]:
constant[
Write the variable (macro) options and arguments to the DAG file
descriptor.
@param fh: descriptor of open DAG file.
]
if <ast.BoolOp object at 0x7da1b0a32800> begin[:]
call[name[fh].write, parameter[binary_operation[constant[VARS ] + name[self].__name]]]
for taget[name[k]] in starred[call[name[self].__macros.keys, parameter[]]] begin[:]
call[name[fh].write, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[ ] + call[name[str], parameter[name[k]]]] + constant[="]] + call[name[str], parameter[call[name[self].__macros][name[k]]]]] + constant["]]]]
for taget[name[k]] in starred[call[name[self].__opts.keys, parameter[]]] begin[:]
call[name[fh].write, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[ ] + call[name[str], parameter[name[k]]]] + constant[="]] + call[name[str], parameter[call[name[self].__opts][name[k]]]]] + constant["]]]]
if name[self].__args begin[:]
for taget[name[i]] in starred[call[name[range], parameter[name[self].__arg_index]]] begin[:]
call[name[fh].write, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[ macroargument] + call[name[str], parameter[name[i]]]] + constant[="]] + call[name[self].__args][name[i]]] + constant["]]]]
call[name[fh].write, parameter[constant[
]]] | keyword[def] identifier[write_vars] ( identifier[self] , identifier[fh] ):
literal[string]
keyword[if] identifier[self] . identifier[__macros] . identifier[keys] () keyword[or] identifier[self] . identifier[__opts] . identifier[keys] () keyword[or] identifier[self] . identifier[__args] :
identifier[fh] . identifier[write] ( literal[string] + identifier[self] . identifier[__name] )
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[__macros] . identifier[keys] ():
identifier[fh] . identifier[write] ( literal[string] + identifier[str] ( identifier[k] )+ literal[string] + identifier[str] ( identifier[self] . identifier[__macros] [ identifier[k] ])+ literal[string] )
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[__opts] . identifier[keys] ():
identifier[fh] . identifier[write] ( literal[string] + identifier[str] ( identifier[k] )+ literal[string] + identifier[str] ( identifier[self] . identifier[__opts] [ identifier[k] ])+ literal[string] )
keyword[if] identifier[self] . identifier[__args] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[__arg_index] ):
identifier[fh] . identifier[write] ( literal[string] + identifier[str] ( identifier[i] )+ literal[string] + identifier[self] . identifier[__args] [ identifier[i] ]+ literal[string] )
identifier[fh] . identifier[write] ( literal[string] ) | def write_vars(self, fh):
"""
Write the variable (macro) options and arguments to the DAG file
descriptor.
@param fh: descriptor of open DAG file.
"""
if self.__macros.keys() or self.__opts.keys() or self.__args:
fh.write('VARS ' + self.__name) # depends on [control=['if'], data=[]]
for k in self.__macros.keys():
fh.write(' ' + str(k) + '="' + str(self.__macros[k]) + '"') # depends on [control=['for'], data=['k']]
for k in self.__opts.keys():
fh.write(' ' + str(k) + '="' + str(self.__opts[k]) + '"') # depends on [control=['for'], data=['k']]
if self.__args:
for i in range(self.__arg_index):
fh.write(' macroargument' + str(i) + '="' + self.__args[i] + '"') # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
fh.write('\n') |
def search(self, query: Optional[dict] = None,
offset: int = 0,
limit: Optional[int] = None,
order_by: Optional[tuple] = None,
secure: bool = False) -> List[IModel]:
"""Search for models
Filtering is done through ``rulez`` based JSON/dict query, which
defines boolean statements in JSON/dict structure.
: param query: Rulez based query
: param offset: Result offset
: param limit: Maximum number of result
: param order_by: Tuple of ``(field, order)`` where ``order`` is
``'asc'`` or ``'desc'``
: param secure: When set to True, this will filter out any object which
current logged in user is not allowed to see
: todo: ``order_by`` need to allow multiple field ordering
"""
raise NotImplementedError | def function[search, parameter[self, query, offset, limit, order_by, secure]]:
constant[Search for models
Filtering is done through ``rulez`` based JSON/dict query, which
defines boolean statements in JSON/dict structure.
: param query: Rulez based query
: param offset: Result offset
: param limit: Maximum number of result
: param order_by: Tuple of ``(field, order)`` where ``order`` is
``'asc'`` or ``'desc'``
: param secure: When set to True, this will filter out any object which
current logged in user is not allowed to see
: todo: ``order_by`` need to allow multiple field ordering
]
<ast.Raise object at 0x7da20c990a90> | keyword[def] identifier[search] ( identifier[self] , identifier[query] : identifier[Optional] [ identifier[dict] ]= keyword[None] ,
identifier[offset] : identifier[int] = literal[int] ,
identifier[limit] : identifier[Optional] [ identifier[int] ]= keyword[None] ,
identifier[order_by] : identifier[Optional] [ identifier[tuple] ]= keyword[None] ,
identifier[secure] : identifier[bool] = keyword[False] )-> identifier[List] [ identifier[IModel] ]:
literal[string]
keyword[raise] identifier[NotImplementedError] | def search(self, query: Optional[dict]=None, offset: int=0, limit: Optional[int]=None, order_by: Optional[tuple]=None, secure: bool=False) -> List[IModel]:
"""Search for models
Filtering is done through ``rulez`` based JSON/dict query, which
defines boolean statements in JSON/dict structure.
: param query: Rulez based query
: param offset: Result offset
: param limit: Maximum number of result
: param order_by: Tuple of ``(field, order)`` where ``order`` is
``'asc'`` or ``'desc'``
: param secure: When set to True, this will filter out any object which
current logged in user is not allowed to see
: todo: ``order_by`` need to allow multiple field ordering
"""
raise NotImplementedError |
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
rangeFlag = retFlag = 0
startStr = endStr = ''
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
for cre, rflag in [(self.ptc.CRE_TIMERNG1, 1),
(self.ptc.CRE_TIMERNG2, 2),
(self.ptc.CRE_TIMERNG4, 7),
(self.ptc.CRE_TIMERNG3, 3),
(self.ptc.CRE_DATERNG1, 4),
(self.ptc.CRE_DATERNG2, 5),
(self.ptc.CRE_DATERNG3, 6)]:
m = cre.search(s)
if m is not None:
rangeFlag = rflag
break
debug and log.debug('evalRanges: rangeFlag = %s [%s]', rangeFlag, s)
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
sourceTime, ctx = self.parse(s, sourceTime,
VERSION_CONTEXT_STYLE)
if not ctx.hasDateOrTime:
sourceTime = None
else:
parseStr = s
if rangeFlag in (1, 2):
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2
elif rangeFlag in (3, 7):
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startStr = parseStr[:m.start()] + self.ptc.meridian[0]
else:
startStr = parseStr[:m.start()] + self.ptc.meridian[1]
else:
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 1
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endStr = parseStr[m.start() + 1:]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endStr)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startStr = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startStr)
startYear = date.group('year')
if startYear is None:
startStr = startStr + ', ' + endYear
else:
startStr = parseStr[:m.start()]
retFlag = 1
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startStr)
mth = mth.group('mthname')
# appending the month name to the end date
endStr = mth + parseStr[(m.start() + 1):]
retFlag = 1
else:
# if range is not found
startDT = endDT = time.localtime()
if retFlag:
startDT, sctx = self.parse(startStr, sourceTime,
VERSION_CONTEXT_STYLE)
endDT, ectx = self.parse(endStr, sourceTime,
VERSION_CONTEXT_STYLE)
if not sctx.hasDateOrTime or not ectx.hasDateOrTime:
retFlag = 0
return startDT, endDT, retFlag | def function[evalRanges, parameter[self, datetimeString, sourceTime]]:
constant[
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
]
variable[rangeFlag] assign[=] constant[0]
variable[startStr] assign[=] constant[]
variable[s] assign[=] call[call[name[datetimeString].strip, parameter[]].lower, parameter[]]
if compare[name[self].ptc.rangeSep in name[s]] begin[:]
variable[s] assign[=] call[name[s].replace, parameter[name[self].ptc.rangeSep, binary_operation[constant[ %s ] <ast.Mod object at 0x7da2590d6920> name[self].ptc.rangeSep]]]
variable[s] assign[=] call[name[s].replace, parameter[constant[ ], constant[ ]]]
for taget[tuple[[<ast.Name object at 0x7da20c6aab30>, <ast.Name object at 0x7da20c6a8130>]]] in starred[list[[<ast.Tuple object at 0x7da20c6a96c0>, <ast.Tuple object at 0x7da20c6a9ab0>, <ast.Tuple object at 0x7da20c6a88e0>, <ast.Tuple object at 0x7da20c6aa980>, <ast.Tuple object at 0x7da20c6a8af0>, <ast.Tuple object at 0x7da20c6aa830>, <ast.Tuple object at 0x7da20c6aa7a0>]]] begin[:]
variable[m] assign[=] call[name[cre].search, parameter[name[s]]]
if compare[name[m] is_not constant[None]] begin[:]
variable[rangeFlag] assign[=] name[rflag]
break
<ast.BoolOp object at 0x7da20c6a9b10>
if compare[name[m] is_not constant[None]] begin[:]
if compare[call[name[m].group, parameter[]] not_equal[!=] name[s]] begin[:]
variable[parseStr] assign[=] call[name[m].group, parameter[]]
variable[chunk1] assign[=] call[name[s]][<ast.Slice object at 0x7da20c6aa620>]
variable[chunk2] assign[=] call[name[s]][<ast.Slice object at 0x7da20c6a8160>]
variable[s] assign[=] binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6a9660>, <ast.Name object at 0x7da20c6aae30>]]]
<ast.Tuple object at 0x7da20c6a9360> assign[=] call[name[self].parse, parameter[name[s], name[sourceTime], name[VERSION_CONTEXT_STYLE]]]
if <ast.UnaryOp object at 0x7da20c6a9510> begin[:]
variable[sourceTime] assign[=] constant[None]
if compare[name[rangeFlag] in tuple[[<ast.Constant object at 0x7da20c6aabc0>, <ast.Constant object at 0x7da20c6abc40>]]] begin[:]
variable[m] assign[=] call[name[re].search, parameter[name[self].ptc.rangeSep, name[parseStr]]]
variable[startStr] assign[=] call[name[parseStr]][<ast.Slice object at 0x7da20c6ab9a0>]
variable[endStr] assign[=] call[name[parseStr]][<ast.Slice object at 0x7da20c6a9db0>]
variable[retFlag] assign[=] constant[2]
if name[retFlag] begin[:]
<ast.Tuple object at 0x7da18f09ebc0> assign[=] call[name[self].parse, parameter[name[startStr], name[sourceTime], name[VERSION_CONTEXT_STYLE]]]
<ast.Tuple object at 0x7da18f09fca0> assign[=] call[name[self].parse, parameter[name[endStr], name[sourceTime], name[VERSION_CONTEXT_STYLE]]]
if <ast.BoolOp object at 0x7da18f09f8e0> begin[:]
variable[retFlag] assign[=] constant[0]
return[tuple[[<ast.Name object at 0x7da18f09d450>, <ast.Name object at 0x7da18f09c370>, <ast.Name object at 0x7da18f09f2e0>]]] | keyword[def] identifier[evalRanges] ( identifier[self] , identifier[datetimeString] , identifier[sourceTime] = keyword[None] ):
literal[string]
identifier[rangeFlag] = identifier[retFlag] = literal[int]
identifier[startStr] = identifier[endStr] = literal[string]
identifier[s] = identifier[datetimeString] . identifier[strip] (). identifier[lower] ()
keyword[if] identifier[self] . identifier[ptc] . identifier[rangeSep] keyword[in] identifier[s] :
identifier[s] = identifier[s] . identifier[replace] ( identifier[self] . identifier[ptc] . identifier[rangeSep] , literal[string] % identifier[self] . identifier[ptc] . identifier[rangeSep] )
identifier[s] = identifier[s] . identifier[replace] ( literal[string] , literal[string] )
keyword[for] identifier[cre] , identifier[rflag] keyword[in] [( identifier[self] . identifier[ptc] . identifier[CRE_TIMERNG1] , literal[int] ),
( identifier[self] . identifier[ptc] . identifier[CRE_TIMERNG2] , literal[int] ),
( identifier[self] . identifier[ptc] . identifier[CRE_TIMERNG4] , literal[int] ),
( identifier[self] . identifier[ptc] . identifier[CRE_TIMERNG3] , literal[int] ),
( identifier[self] . identifier[ptc] . identifier[CRE_DATERNG1] , literal[int] ),
( identifier[self] . identifier[ptc] . identifier[CRE_DATERNG2] , literal[int] ),
( identifier[self] . identifier[ptc] . identifier[CRE_DATERNG3] , literal[int] )]:
identifier[m] = identifier[cre] . identifier[search] ( identifier[s] )
keyword[if] identifier[m] keyword[is] keyword[not] keyword[None] :
identifier[rangeFlag] = identifier[rflag]
keyword[break]
identifier[debug] keyword[and] identifier[log] . identifier[debug] ( literal[string] , identifier[rangeFlag] , identifier[s] )
keyword[if] identifier[m] keyword[is] keyword[not] keyword[None] :
keyword[if] ( identifier[m] . identifier[group] ()!= identifier[s] ):
identifier[parseStr] = identifier[m] . identifier[group] ()
identifier[chunk1] = identifier[s] [: identifier[m] . identifier[start] ()]
identifier[chunk2] = identifier[s] [ identifier[m] . identifier[end] ():]
identifier[s] = literal[string] %( identifier[chunk1] , identifier[chunk2] )
identifier[sourceTime] , identifier[ctx] = identifier[self] . identifier[parse] ( identifier[s] , identifier[sourceTime] ,
identifier[VERSION_CONTEXT_STYLE] )
keyword[if] keyword[not] identifier[ctx] . identifier[hasDateOrTime] :
identifier[sourceTime] = keyword[None]
keyword[else] :
identifier[parseStr] = identifier[s]
keyword[if] identifier[rangeFlag] keyword[in] ( literal[int] , literal[int] ):
identifier[m] = identifier[re] . identifier[search] ( identifier[self] . identifier[ptc] . identifier[rangeSep] , identifier[parseStr] )
identifier[startStr] = identifier[parseStr] [: identifier[m] . identifier[start] ()]
identifier[endStr] = identifier[parseStr] [ identifier[m] . identifier[start] ()+ literal[int] :]
identifier[retFlag] = literal[int]
keyword[elif] identifier[rangeFlag] keyword[in] ( literal[int] , literal[int] ):
identifier[m] = identifier[re] . identifier[search] ( identifier[self] . identifier[ptc] . identifier[rangeSep] , identifier[parseStr] )
keyword[if] identifier[self] . identifier[ptc] . identifier[usesMeridian] :
identifier[ampm] = identifier[re] . identifier[search] ( identifier[self] . identifier[ptc] . identifier[am] [ literal[int] ], identifier[parseStr] )
keyword[if] identifier[ampm] keyword[is] keyword[not] keyword[None] :
identifier[startStr] = identifier[parseStr] [: identifier[m] . identifier[start] ()]+ identifier[self] . identifier[ptc] . identifier[meridian] [ literal[int] ]
keyword[else] :
identifier[startStr] = identifier[parseStr] [: identifier[m] . identifier[start] ()]+ identifier[self] . identifier[ptc] . identifier[meridian] [ literal[int] ]
keyword[else] :
identifier[startStr] = identifier[parseStr] [: identifier[m] . identifier[start] ()]
identifier[endStr] = identifier[parseStr] [ identifier[m] . identifier[start] ()+ literal[int] :]
identifier[retFlag] = literal[int]
keyword[elif] identifier[rangeFlag] == literal[int] :
identifier[m] = identifier[re] . identifier[search] ( identifier[self] . identifier[ptc] . identifier[rangeSep] , identifier[parseStr] )
identifier[startStr] = identifier[parseStr] [: identifier[m] . identifier[start] ()]
identifier[endStr] = identifier[parseStr] [ identifier[m] . identifier[start] ()+ literal[int] :]
identifier[retFlag] = literal[int]
keyword[elif] identifier[rangeFlag] == literal[int] :
identifier[m] = identifier[re] . identifier[search] ( identifier[self] . identifier[ptc] . identifier[rangeSep] , identifier[parseStr] )
identifier[endStr] = identifier[parseStr] [ identifier[m] . identifier[start] ()+ literal[int] :]
identifier[date] = identifier[self] . identifier[ptc] . identifier[CRE_DATE3] . identifier[search] ( identifier[endStr] )
identifier[endYear] = identifier[date] . identifier[group] ( literal[string] )
keyword[if] identifier[endYear] keyword[is] keyword[not] keyword[None] :
identifier[startStr] =( identifier[parseStr] [: identifier[m] . identifier[start] ()]). identifier[strip] ()
identifier[date] = identifier[self] . identifier[ptc] . identifier[CRE_DATE3] . identifier[search] ( identifier[startStr] )
identifier[startYear] = identifier[date] . identifier[group] ( literal[string] )
keyword[if] identifier[startYear] keyword[is] keyword[None] :
identifier[startStr] = identifier[startStr] + literal[string] + identifier[endYear]
keyword[else] :
identifier[startStr] = identifier[parseStr] [: identifier[m] . identifier[start] ()]
identifier[retFlag] = literal[int]
keyword[elif] identifier[rangeFlag] == literal[int] :
identifier[m] = identifier[re] . identifier[search] ( identifier[self] . identifier[ptc] . identifier[rangeSep] , identifier[parseStr] )
identifier[startStr] = identifier[parseStr] [: identifier[m] . identifier[start] ()]
identifier[mth] = identifier[self] . identifier[ptc] . identifier[CRE_DATE3] . identifier[search] ( identifier[startStr] )
identifier[mth] = identifier[mth] . identifier[group] ( literal[string] )
identifier[endStr] = identifier[mth] + identifier[parseStr] [( identifier[m] . identifier[start] ()+ literal[int] ):]
identifier[retFlag] = literal[int]
keyword[else] :
identifier[startDT] = identifier[endDT] = identifier[time] . identifier[localtime] ()
keyword[if] identifier[retFlag] :
identifier[startDT] , identifier[sctx] = identifier[self] . identifier[parse] ( identifier[startStr] , identifier[sourceTime] ,
identifier[VERSION_CONTEXT_STYLE] )
identifier[endDT] , identifier[ectx] = identifier[self] . identifier[parse] ( identifier[endStr] , identifier[sourceTime] ,
identifier[VERSION_CONTEXT_STYLE] )
keyword[if] keyword[not] identifier[sctx] . identifier[hasDateOrTime] keyword[or] keyword[not] identifier[ectx] . identifier[hasDateOrTime] :
identifier[retFlag] = literal[int]
keyword[return] identifier[startDT] , identifier[endDT] , identifier[retFlag] | def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
rangeFlag = retFlag = 0
startStr = endStr = ''
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ') # depends on [control=['if'], data=['s']]
for (cre, rflag) in [(self.ptc.CRE_TIMERNG1, 1), (self.ptc.CRE_TIMERNG2, 2), (self.ptc.CRE_TIMERNG4, 7), (self.ptc.CRE_TIMERNG3, 3), (self.ptc.CRE_DATERNG1, 4), (self.ptc.CRE_DATERNG2, 5), (self.ptc.CRE_DATERNG3, 6)]:
m = cre.search(s)
if m is not None:
rangeFlag = rflag
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
debug and log.debug('evalRanges: rangeFlag = %s [%s]', rangeFlag, s)
if m is not None:
if m.group() != s:
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
(sourceTime, ctx) = self.parse(s, sourceTime, VERSION_CONTEXT_STYLE)
if not ctx.hasDateOrTime:
sourceTime = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['s']]
else:
parseStr = s # depends on [control=['if'], data=['m']]
if rangeFlag in (1, 2):
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2 # depends on [control=['if'], data=[]]
elif rangeFlag in (3, 7):
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startStr = parseStr[:m.start()] + self.ptc.meridian[0] # depends on [control=['if'], data=[]]
else:
startStr = parseStr[:m.start()] + self.ptc.meridian[1] # depends on [control=['if'], data=[]]
else:
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2 # depends on [control=['if'], data=[]]
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 1 # depends on [control=['if'], data=[]]
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endStr = parseStr[m.start() + 1:]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endStr)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startStr = parseStr[:m.start()].strip()
date = self.ptc.CRE_DATE3.search(startStr)
startYear = date.group('year')
if startYear is None:
startStr = startStr + ', ' + endYear # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['endYear']]
else:
startStr = parseStr[:m.start()]
retFlag = 1 # depends on [control=['if'], data=[]]
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startStr)
mth = mth.group('mthname')
# appending the month name to the end date
endStr = mth + parseStr[m.start() + 1:]
retFlag = 1 # depends on [control=['if'], data=[]]
else:
# if range is not found
startDT = endDT = time.localtime()
if retFlag:
(startDT, sctx) = self.parse(startStr, sourceTime, VERSION_CONTEXT_STYLE)
(endDT, ectx) = self.parse(endStr, sourceTime, VERSION_CONTEXT_STYLE)
if not sctx.hasDateOrTime or not ectx.hasDateOrTime:
retFlag = 0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return (startDT, endDT, retFlag) |
async def refresh_token(self, refresh_token):
"""
:param refresh_token: an openid refresh-token from a previous token request
"""
async with self._client_session() as client:
well_known = await self._get_well_known(client)
try:
return await self._post(
client,
well_known['token_endpoint'],
data={
'grant_type': GRANT_TYPE_REFRESH_TOKEN,
'refresh_token': refresh_token,
}
)
except aiohttp.ClientResponseError as e:
raise ConfigException('oidc: failed to refresh access token') | <ast.AsyncFunctionDef object at 0x7da20c6c45b0> | keyword[async] keyword[def] identifier[refresh_token] ( identifier[self] , identifier[refresh_token] ):
literal[string]
keyword[async] keyword[with] identifier[self] . identifier[_client_session] () keyword[as] identifier[client] :
identifier[well_known] = keyword[await] identifier[self] . identifier[_get_well_known] ( identifier[client] )
keyword[try] :
keyword[return] keyword[await] identifier[self] . identifier[_post] (
identifier[client] ,
identifier[well_known] [ literal[string] ],
identifier[data] ={
literal[string] : identifier[GRANT_TYPE_REFRESH_TOKEN] ,
literal[string] : identifier[refresh_token] ,
}
)
keyword[except] identifier[aiohttp] . identifier[ClientResponseError] keyword[as] identifier[e] :
keyword[raise] identifier[ConfigException] ( literal[string] ) | async def refresh_token(self, refresh_token):
"""
:param refresh_token: an openid refresh-token from a previous token request
"""
async with self._client_session() as client:
well_known = await self._get_well_known(client)
try:
return await self._post(client, well_known['token_endpoint'], data={'grant_type': GRANT_TYPE_REFRESH_TOKEN, 'refresh_token': refresh_token}) # depends on [control=['try'], data=[]]
except aiohttp.ClientResponseError as e:
raise ConfigException('oidc: failed to refresh access token') # depends on [control=['except'], data=[]] |
def b(field: str, kwargs: Dict[str, Any],
present: Optional[Any] = None, missing: Any = '') -> str:
"""
Return `present` value (default to `field`) if `field` in `kwargs` and
Truthy, otherwise return `missing` value
"""
if kwargs.get(field):
return field if present is None else str(present)
return str(missing) | def function[b, parameter[field, kwargs, present, missing]]:
constant[
Return `present` value (default to `field`) if `field` in `kwargs` and
Truthy, otherwise return `missing` value
]
if call[name[kwargs].get, parameter[name[field]]] begin[:]
return[<ast.IfExp object at 0x7da1b0fc6260>]
return[call[name[str], parameter[name[missing]]]] | keyword[def] identifier[b] ( identifier[field] : identifier[str] , identifier[kwargs] : identifier[Dict] [ identifier[str] , identifier[Any] ],
identifier[present] : identifier[Optional] [ identifier[Any] ]= keyword[None] , identifier[missing] : identifier[Any] = literal[string] )-> identifier[str] :
literal[string]
keyword[if] identifier[kwargs] . identifier[get] ( identifier[field] ):
keyword[return] identifier[field] keyword[if] identifier[present] keyword[is] keyword[None] keyword[else] identifier[str] ( identifier[present] )
keyword[return] identifier[str] ( identifier[missing] ) | def b(field: str, kwargs: Dict[str, Any], present: Optional[Any]=None, missing: Any='') -> str:
"""
Return `present` value (default to `field`) if `field` in `kwargs` and
Truthy, otherwise return `missing` value
"""
if kwargs.get(field):
return field if present is None else str(present) # depends on [control=['if'], data=[]]
return str(missing) |
def send_respawn(self):
"""
Respawns the player.
"""
nick = self.player.nick
self.send_struct('<B%iH' % len(nick), 0, *map(ord, nick)) | def function[send_respawn, parameter[self]]:
constant[
Respawns the player.
]
variable[nick] assign[=] name[self].player.nick
call[name[self].send_struct, parameter[binary_operation[constant[<B%iH] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[nick]]]], constant[0], <ast.Starred object at 0x7da1b0b71d80>]] | keyword[def] identifier[send_respawn] ( identifier[self] ):
literal[string]
identifier[nick] = identifier[self] . identifier[player] . identifier[nick]
identifier[self] . identifier[send_struct] ( literal[string] % identifier[len] ( identifier[nick] ), literal[int] ,* identifier[map] ( identifier[ord] , identifier[nick] )) | def send_respawn(self):
"""
Respawns the player.
"""
nick = self.player.nick
self.send_struct('<B%iH' % len(nick), 0, *map(ord, nick)) |
def create_tasks_dict(self, ast):
'''
Parse each "Task" in the AST. This will create self.tasks_dictionary,
where each task name is a key.
:return: Creates the self.tasks_dictionary necessary for much of the
parser. Returning it is only necessary for unittests.
'''
tasks = self.find_asts(ast, 'Task')
for task in tasks:
self.parse_task(task)
return self.tasks_dictionary | def function[create_tasks_dict, parameter[self, ast]]:
constant[
Parse each "Task" in the AST. This will create self.tasks_dictionary,
where each task name is a key.
:return: Creates the self.tasks_dictionary necessary for much of the
parser. Returning it is only necessary for unittests.
]
variable[tasks] assign[=] call[name[self].find_asts, parameter[name[ast], constant[Task]]]
for taget[name[task]] in starred[name[tasks]] begin[:]
call[name[self].parse_task, parameter[name[task]]]
return[name[self].tasks_dictionary] | keyword[def] identifier[create_tasks_dict] ( identifier[self] , identifier[ast] ):
literal[string]
identifier[tasks] = identifier[self] . identifier[find_asts] ( identifier[ast] , literal[string] )
keyword[for] identifier[task] keyword[in] identifier[tasks] :
identifier[self] . identifier[parse_task] ( identifier[task] )
keyword[return] identifier[self] . identifier[tasks_dictionary] | def create_tasks_dict(self, ast):
"""
Parse each "Task" in the AST. This will create self.tasks_dictionary,
where each task name is a key.
:return: Creates the self.tasks_dictionary necessary for much of the
parser. Returning it is only necessary for unittests.
"""
tasks = self.find_asts(ast, 'Task')
for task in tasks:
self.parse_task(task) # depends on [control=['for'], data=['task']]
return self.tasks_dictionary |
def remove_all(gset, elem):
"""Removes every occurrence of ``elem`` from ``gset``.
Returns the number of times ``elem`` was removed.
"""
n = 0
while True:
try:
remove_once(gset, elem)
n = n + 1
except RemoveError:
return n | def function[remove_all, parameter[gset, elem]]:
constant[Removes every occurrence of ``elem`` from ``gset``.
Returns the number of times ``elem`` was removed.
]
variable[n] assign[=] constant[0]
while constant[True] begin[:]
<ast.Try object at 0x7da18c4cf010> | keyword[def] identifier[remove_all] ( identifier[gset] , identifier[elem] ):
literal[string]
identifier[n] = literal[int]
keyword[while] keyword[True] :
keyword[try] :
identifier[remove_once] ( identifier[gset] , identifier[elem] )
identifier[n] = identifier[n] + literal[int]
keyword[except] identifier[RemoveError] :
keyword[return] identifier[n] | def remove_all(gset, elem):
"""Removes every occurrence of ``elem`` from ``gset``.
Returns the number of times ``elem`` was removed.
"""
n = 0
while True:
try:
remove_once(gset, elem)
n = n + 1 # depends on [control=['try'], data=[]]
except RemoveError:
return n # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def _transform_delta(f:Formula, formula2AtomicFormula):
"""From a Propositional Formula to a Propositional Formula
with non-propositional subformulas replaced with a "freezed" atomic formula."""
t = type(f)
if t == PLNot:
return PLNot(_transform_delta(f, formula2AtomicFormula))
# elif isinstance(f, PLBinaryOperator): #PLAnd, PLOr, PLImplies, PLEquivalence
elif t == PLAnd or t == PLOr or t == PLImplies or t == PLEquivalence:
return t([_transform_delta(subf, formula2AtomicFormula) for subf in f.formulas])
elif t == PLTrue or t == PLFalse:
return f
else:
return formula2AtomicFormula[f] | def function[_transform_delta, parameter[f, formula2AtomicFormula]]:
constant[From a Propositional Formula to a Propositional Formula
with non-propositional subformulas replaced with a "freezed" atomic formula.]
variable[t] assign[=] call[name[type], parameter[name[f]]]
if compare[name[t] equal[==] name[PLNot]] begin[:]
return[call[name[PLNot], parameter[call[name[_transform_delta], parameter[name[f], name[formula2AtomicFormula]]]]]] | keyword[def] identifier[_transform_delta] ( identifier[f] : identifier[Formula] , identifier[formula2AtomicFormula] ):
literal[string]
identifier[t] = identifier[type] ( identifier[f] )
keyword[if] identifier[t] == identifier[PLNot] :
keyword[return] identifier[PLNot] ( identifier[_transform_delta] ( identifier[f] , identifier[formula2AtomicFormula] ))
keyword[elif] identifier[t] == identifier[PLAnd] keyword[or] identifier[t] == identifier[PLOr] keyword[or] identifier[t] == identifier[PLImplies] keyword[or] identifier[t] == identifier[PLEquivalence] :
keyword[return] identifier[t] ([ identifier[_transform_delta] ( identifier[subf] , identifier[formula2AtomicFormula] ) keyword[for] identifier[subf] keyword[in] identifier[f] . identifier[formulas] ])
keyword[elif] identifier[t] == identifier[PLTrue] keyword[or] identifier[t] == identifier[PLFalse] :
keyword[return] identifier[f]
keyword[else] :
keyword[return] identifier[formula2AtomicFormula] [ identifier[f] ] | def _transform_delta(f: Formula, formula2AtomicFormula):
"""From a Propositional Formula to a Propositional Formula
with non-propositional subformulas replaced with a "freezed" atomic formula."""
t = type(f)
if t == PLNot:
return PLNot(_transform_delta(f, formula2AtomicFormula)) # depends on [control=['if'], data=['PLNot']]
# elif isinstance(f, PLBinaryOperator): #PLAnd, PLOr, PLImplies, PLEquivalence
elif t == PLAnd or t == PLOr or t == PLImplies or (t == PLEquivalence):
return t([_transform_delta(subf, formula2AtomicFormula) for subf in f.formulas]) # depends on [control=['if'], data=[]]
elif t == PLTrue or t == PLFalse:
return f # depends on [control=['if'], data=[]]
else:
return formula2AtomicFormula[f] |
async def listCronJobs(self):
'''
Get information about all the cron jobs accessible to the current user
'''
crons = []
for iden, cron in self.cell.agenda.list():
useriden = cron['useriden']
if not (self.user.admin or useriden == self.user.iden):
continue
user = self.cell.auth.user(useriden)
cron['username'] = '<unknown>' if user is None else user.name
crons.append((iden, cron))
return crons | <ast.AsyncFunctionDef object at 0x7da1b23d02e0> | keyword[async] keyword[def] identifier[listCronJobs] ( identifier[self] ):
literal[string]
identifier[crons] =[]
keyword[for] identifier[iden] , identifier[cron] keyword[in] identifier[self] . identifier[cell] . identifier[agenda] . identifier[list] ():
identifier[useriden] = identifier[cron] [ literal[string] ]
keyword[if] keyword[not] ( identifier[self] . identifier[user] . identifier[admin] keyword[or] identifier[useriden] == identifier[self] . identifier[user] . identifier[iden] ):
keyword[continue]
identifier[user] = identifier[self] . identifier[cell] . identifier[auth] . identifier[user] ( identifier[useriden] )
identifier[cron] [ literal[string] ]= literal[string] keyword[if] identifier[user] keyword[is] keyword[None] keyword[else] identifier[user] . identifier[name]
identifier[crons] . identifier[append] (( identifier[iden] , identifier[cron] ))
keyword[return] identifier[crons] | async def listCronJobs(self):
"""
Get information about all the cron jobs accessible to the current user
"""
crons = []
for (iden, cron) in self.cell.agenda.list():
useriden = cron['useriden']
if not (self.user.admin or useriden == self.user.iden):
continue # depends on [control=['if'], data=[]]
user = self.cell.auth.user(useriden)
cron['username'] = '<unknown>' if user is None else user.name
crons.append((iden, cron)) # depends on [control=['for'], data=[]]
return crons |
def decrypt(crypt_text) -> str:
""" Use config.json key to decrypt """
cipher = Fernet(current_app.config['KEY'])
if not isinstance(crypt_text, bytes):
crypt_text = str.encode(crypt_text)
return cipher.decrypt(crypt_text).decode("utf-8") | def function[decrypt, parameter[crypt_text]]:
constant[ Use config.json key to decrypt ]
variable[cipher] assign[=] call[name[Fernet], parameter[call[name[current_app].config][constant[KEY]]]]
if <ast.UnaryOp object at 0x7da1b287c340> begin[:]
variable[crypt_text] assign[=] call[name[str].encode, parameter[name[crypt_text]]]
return[call[call[name[cipher].decrypt, parameter[name[crypt_text]]].decode, parameter[constant[utf-8]]]] | keyword[def] identifier[decrypt] ( identifier[crypt_text] )-> identifier[str] :
literal[string]
identifier[cipher] = identifier[Fernet] ( identifier[current_app] . identifier[config] [ literal[string] ])
keyword[if] keyword[not] identifier[isinstance] ( identifier[crypt_text] , identifier[bytes] ):
identifier[crypt_text] = identifier[str] . identifier[encode] ( identifier[crypt_text] )
keyword[return] identifier[cipher] . identifier[decrypt] ( identifier[crypt_text] ). identifier[decode] ( literal[string] ) | def decrypt(crypt_text) -> str:
""" Use config.json key to decrypt """
cipher = Fernet(current_app.config['KEY'])
if not isinstance(crypt_text, bytes):
crypt_text = str.encode(crypt_text) # depends on [control=['if'], data=[]]
return cipher.decrypt(crypt_text).decode('utf-8') |
def applications(self):
"""
Access the applications
:returns: twilio.rest.api.v2010.account.application.ApplicationList
:rtype: twilio.rest.api.v2010.account.application.ApplicationList
"""
if self._applications is None:
self._applications = ApplicationList(self._version, account_sid=self._solution['sid'], )
return self._applications | def function[applications, parameter[self]]:
constant[
Access the applications
:returns: twilio.rest.api.v2010.account.application.ApplicationList
:rtype: twilio.rest.api.v2010.account.application.ApplicationList
]
if compare[name[self]._applications is constant[None]] begin[:]
name[self]._applications assign[=] call[name[ApplicationList], parameter[name[self]._version]]
return[name[self]._applications] | keyword[def] identifier[applications] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_applications] keyword[is] keyword[None] :
identifier[self] . identifier[_applications] = identifier[ApplicationList] ( identifier[self] . identifier[_version] , identifier[account_sid] = identifier[self] . identifier[_solution] [ literal[string] ],)
keyword[return] identifier[self] . identifier[_applications] | def applications(self):
"""
Access the applications
:returns: twilio.rest.api.v2010.account.application.ApplicationList
:rtype: twilio.rest.api.v2010.account.application.ApplicationList
"""
if self._applications is None:
self._applications = ApplicationList(self._version, account_sid=self._solution['sid']) # depends on [control=['if'], data=[]]
return self._applications |
def emit_containers(self, containers, verbose=True):
"""
Emits the applications and sorts containers by name
:param containers: List of the container definitions
:type containers: list of dict
:param verbose: Print out newlines and indented JSON
:type verbose: bool
:returns: The text output
:rtype: str
"""
containers = sorted(containers, key=lambda c: c.get('name'))
output = {
'kind': 'Deployment',
'apiVersion': 'extensions/v1beta1',
'metadata': {
'name': None,
'namespace': 'default',
'labels': {
'app': None,
'version': 'latest',
},
},
'spec': {
'replicas': 1,
'selector': {
'matchLabels': {
'app': None,
'version': 'latest'
}
},
'template': {
'metadata': {
'labels': {
'app': None,
'version': 'latest'
}
},
'spec': {
'containers': json.loads(json.dumps(containers))
}
}
}
}
if self.volumes:
volumes = sorted(self.volumes.values(), key=lambda x: x.get('name'))
output['spec']['template']['spec']['volumes'] = volumes
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
return yaml.dump(
output,
default_flow_style=False,
Dumper=noalias_dumper
) | def function[emit_containers, parameter[self, containers, verbose]]:
constant[
Emits the applications and sorts containers by name
:param containers: List of the container definitions
:type containers: list of dict
:param verbose: Print out newlines and indented JSON
:type verbose: bool
:returns: The text output
:rtype: str
]
variable[containers] assign[=] call[name[sorted], parameter[name[containers]]]
variable[output] assign[=] dictionary[[<ast.Constant object at 0x7da20c991120>, <ast.Constant object at 0x7da20c993d30>, <ast.Constant object at 0x7da20c990250>, <ast.Constant object at 0x7da20c9912d0>], [<ast.Constant object at 0x7da20c9917b0>, <ast.Constant object at 0x7da20c992590>, <ast.Dict object at 0x7da20c9939d0>, <ast.Dict object at 0x7da20c990310>]]
if name[self].volumes begin[:]
variable[volumes] assign[=] call[name[sorted], parameter[call[name[self].volumes.values, parameter[]]]]
call[call[call[call[name[output]][constant[spec]]][constant[template]]][constant[spec]]][constant[volumes]] assign[=] name[volumes]
variable[noalias_dumper] assign[=] name[yaml].dumper.SafeDumper
name[noalias_dumper].ignore_aliases assign[=] <ast.Lambda object at 0x7da2044c0790>
return[call[name[yaml].dump, parameter[name[output]]]] | keyword[def] identifier[emit_containers] ( identifier[self] , identifier[containers] , identifier[verbose] = keyword[True] ):
literal[string]
identifier[containers] = identifier[sorted] ( identifier[containers] , identifier[key] = keyword[lambda] identifier[c] : identifier[c] . identifier[get] ( literal[string] ))
identifier[output] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] :{
literal[string] : keyword[None] ,
literal[string] : literal[string] ,
literal[string] :{
literal[string] : keyword[None] ,
literal[string] : literal[string] ,
},
},
literal[string] :{
literal[string] : literal[int] ,
literal[string] :{
literal[string] :{
literal[string] : keyword[None] ,
literal[string] : literal[string]
}
},
literal[string] :{
literal[string] :{
literal[string] :{
literal[string] : keyword[None] ,
literal[string] : literal[string]
}
},
literal[string] :{
literal[string] : identifier[json] . identifier[loads] ( identifier[json] . identifier[dumps] ( identifier[containers] ))
}
}
}
}
keyword[if] identifier[self] . identifier[volumes] :
identifier[volumes] = identifier[sorted] ( identifier[self] . identifier[volumes] . identifier[values] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[get] ( literal[string] ))
identifier[output] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]= identifier[volumes]
identifier[noalias_dumper] = identifier[yaml] . identifier[dumper] . identifier[SafeDumper]
identifier[noalias_dumper] . identifier[ignore_aliases] = keyword[lambda] identifier[self] , identifier[data] : keyword[True]
keyword[return] identifier[yaml] . identifier[dump] (
identifier[output] ,
identifier[default_flow_style] = keyword[False] ,
identifier[Dumper] = identifier[noalias_dumper]
) | def emit_containers(self, containers, verbose=True):
"""
Emits the applications and sorts containers by name
:param containers: List of the container definitions
:type containers: list of dict
:param verbose: Print out newlines and indented JSON
:type verbose: bool
:returns: The text output
:rtype: str
"""
containers = sorted(containers, key=lambda c: c.get('name'))
output = {'kind': 'Deployment', 'apiVersion': 'extensions/v1beta1', 'metadata': {'name': None, 'namespace': 'default', 'labels': {'app': None, 'version': 'latest'}}, 'spec': {'replicas': 1, 'selector': {'matchLabels': {'app': None, 'version': 'latest'}}, 'template': {'metadata': {'labels': {'app': None, 'version': 'latest'}}, 'spec': {'containers': json.loads(json.dumps(containers))}}}}
if self.volumes:
volumes = sorted(self.volumes.values(), key=lambda x: x.get('name'))
output['spec']['template']['spec']['volumes'] = volumes # depends on [control=['if'], data=[]]
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
return yaml.dump(output, default_flow_style=False, Dumper=noalias_dumper) |
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes | def function[_superclasses_for_subject, parameter[self, graph, typeof]]:
constant[helper, returns a list of all superclasses of a given class]
variable[classes] assign[=] list[[]]
variable[superclass] assign[=] name[typeof]
while constant[True] begin[:]
variable[found] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da1b25ec580>, <ast.Name object at 0x7da1b24fc3a0>]]] in starred[call[name[self].schema_def.ontology][name[superclass]]] begin[:]
if compare[call[name[self].schema_def.lexicon][constant[subclass]] equal[==] call[name[str], parameter[name[p]]]] begin[:]
variable[found] assign[=] constant[True]
call[name[classes].append, parameter[name[o]]]
variable[superclass] assign[=] name[o]
if <ast.UnaryOp object at 0x7da1b25ecd60> begin[:]
break
return[name[classes]] | keyword[def] identifier[_superclasses_for_subject] ( identifier[self] , identifier[graph] , identifier[typeof] ):
literal[string]
identifier[classes] =[]
identifier[superclass] = identifier[typeof]
keyword[while] keyword[True] :
identifier[found] = keyword[False]
keyword[for] identifier[p] , identifier[o] keyword[in] identifier[self] . identifier[schema_def] . identifier[ontology] [ identifier[superclass] ]:
keyword[if] identifier[self] . identifier[schema_def] . identifier[lexicon] [ literal[string] ]== identifier[str] ( identifier[p] ):
identifier[found] = keyword[True]
identifier[classes] . identifier[append] ( identifier[o] )
identifier[superclass] = identifier[o]
keyword[if] keyword[not] identifier[found] :
keyword[break]
keyword[return] identifier[classes] | def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for (p, o) in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not found:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return classes |
def get_bibtex(isbn_identifier):
"""
Get a BibTeX string for the given ISBN.
:param isbn_identifier: ISBN to fetch BibTeX entry for.
:returns: A BibTeX string or ``None`` if could not fetch it.
>>> get_bibtex('9783161484100')
'@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}'
"""
# Try to find the BibTeX using associated DOIs
bibtex = doi.get_bibtex(to_doi(isbn_identifier))
if bibtex is None:
# In some cases, there are no DOIs for a given ISBN. In this case, try
# to fetch bibtex directly from the ISBN, using a combination of
# Google Books and worldcat.org results.
bibtex = isbnlib.registry.bibformatters['bibtex'](
isbnlib.meta(isbn_identifier, 'default'))
return bibtex | def function[get_bibtex, parameter[isbn_identifier]]:
constant[
Get a BibTeX string for the given ISBN.
:param isbn_identifier: ISBN to fetch BibTeX entry for.
:returns: A BibTeX string or ``None`` if could not fetch it.
>>> get_bibtex('9783161484100')
'@book{9783161484100,\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\n author = {Peekaboo Maps},\n isbn = {9783161484100},\n year = {2009},\n publisher = {Peek A Boo Maps}\n}'
]
variable[bibtex] assign[=] call[name[doi].get_bibtex, parameter[call[name[to_doi], parameter[name[isbn_identifier]]]]]
if compare[name[bibtex] is constant[None]] begin[:]
variable[bibtex] assign[=] call[call[name[isbnlib].registry.bibformatters][constant[bibtex]], parameter[call[name[isbnlib].meta, parameter[name[isbn_identifier], constant[default]]]]]
return[name[bibtex]] | keyword[def] identifier[get_bibtex] ( identifier[isbn_identifier] ):
literal[string]
identifier[bibtex] = identifier[doi] . identifier[get_bibtex] ( identifier[to_doi] ( identifier[isbn_identifier] ))
keyword[if] identifier[bibtex] keyword[is] keyword[None] :
identifier[bibtex] = identifier[isbnlib] . identifier[registry] . identifier[bibformatters] [ literal[string] ](
identifier[isbnlib] . identifier[meta] ( identifier[isbn_identifier] , literal[string] ))
keyword[return] identifier[bibtex] | def get_bibtex(isbn_identifier):
"""
Get a BibTeX string for the given ISBN.
:param isbn_identifier: ISBN to fetch BibTeX entry for.
:returns: A BibTeX string or ``None`` if could not fetch it.
>>> get_bibtex('9783161484100')
'@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}'
"""
# Try to find the BibTeX using associated DOIs
bibtex = doi.get_bibtex(to_doi(isbn_identifier))
if bibtex is None:
# In some cases, there are no DOIs for a given ISBN. In this case, try
# to fetch bibtex directly from the ISBN, using a combination of
# Google Books and worldcat.org results.
bibtex = isbnlib.registry.bibformatters['bibtex'](isbnlib.meta(isbn_identifier, 'default')) # depends on [control=['if'], data=['bibtex']]
return bibtex |
def solve(self):
"""Run the ACE calculational loop."""
self._initialize()
while self._outer_error_is_decreasing() and self._outer_iters < MAX_OUTERS:
print('* Starting outer iteration {0:03d}. Current err = {1:12.5E}'
''.format(self._outer_iters, self._last_outer_error))
self._iterate_to_update_x_transforms()
self._update_y_transform()
self._outer_iters += 1 | def function[solve, parameter[self]]:
constant[Run the ACE calculational loop.]
call[name[self]._initialize, parameter[]]
while <ast.BoolOp object at 0x7da1b19b59c0> begin[:]
call[name[print], parameter[call[constant[* Starting outer iteration {0:03d}. Current err = {1:12.5E}].format, parameter[name[self]._outer_iters, name[self]._last_outer_error]]]]
call[name[self]._iterate_to_update_x_transforms, parameter[]]
call[name[self]._update_y_transform, parameter[]]
<ast.AugAssign object at 0x7da1b1a1baf0> | keyword[def] identifier[solve] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_initialize] ()
keyword[while] identifier[self] . identifier[_outer_error_is_decreasing] () keyword[and] identifier[self] . identifier[_outer_iters] < identifier[MAX_OUTERS] :
identifier[print] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[_outer_iters] , identifier[self] . identifier[_last_outer_error] ))
identifier[self] . identifier[_iterate_to_update_x_transforms] ()
identifier[self] . identifier[_update_y_transform] ()
identifier[self] . identifier[_outer_iters] += literal[int] | def solve(self):
"""Run the ACE calculational loop."""
self._initialize()
while self._outer_error_is_decreasing() and self._outer_iters < MAX_OUTERS:
print('* Starting outer iteration {0:03d}. Current err = {1:12.5E}'.format(self._outer_iters, self._last_outer_error))
self._iterate_to_update_x_transforms()
self._update_y_transform()
self._outer_iters += 1 # depends on [control=['while'], data=[]] |
def wrap_arguments(args=None):
"""Wrap a list of tuples in xml ready to pass into a SOAP request.
Args:
args (list): a list of (name, value) tuples specifying the
name of each argument and its value, eg
``[('InstanceID', 0), ('Speed', 1)]``. The value
can be a string or something with a string representation. The
arguments are escaped and wrapped in <name> and <value> tags.
Example:
>>> from soco import SoCo
>>> device = SoCo('192.168.1.101')
>>> s = Service(device)
>>> print(s.wrap_arguments([('InstanceID', 0), ('Speed', 1)]))
<InstanceID>0</InstanceID><Speed>1</Speed>'
"""
if args is None:
args = []
tags = []
for name, value in args:
tag = "<{name}>{value}</{name}>".format(
name=name, value=escape("%s" % value, {'"': """}))
# % converts to unicode because we are using unicode literals.
# Avoids use of 'unicode' function which does not exist in python 3
tags.append(tag)
xml = "".join(tags)
return xml | def function[wrap_arguments, parameter[args]]:
constant[Wrap a list of tuples in xml ready to pass into a SOAP request.
Args:
args (list): a list of (name, value) tuples specifying the
name of each argument and its value, eg
``[('InstanceID', 0), ('Speed', 1)]``. The value
can be a string or something with a string representation. The
arguments are escaped and wrapped in <name> and <value> tags.
Example:
>>> from soco import SoCo
>>> device = SoCo('192.168.1.101')
>>> s = Service(device)
>>> print(s.wrap_arguments([('InstanceID', 0), ('Speed', 1)]))
<InstanceID>0</InstanceID><Speed>1</Speed>'
]
if compare[name[args] is constant[None]] begin[:]
variable[args] assign[=] list[[]]
variable[tags] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20e962320>, <ast.Name object at 0x7da20e963d60>]]] in starred[name[args]] begin[:]
variable[tag] assign[=] call[constant[<{name}>{value}</{name}>].format, parameter[]]
call[name[tags].append, parameter[name[tag]]]
variable[xml] assign[=] call[constant[].join, parameter[name[tags]]]
return[name[xml]] | keyword[def] identifier[wrap_arguments] ( identifier[args] = keyword[None] ):
literal[string]
keyword[if] identifier[args] keyword[is] keyword[None] :
identifier[args] =[]
identifier[tags] =[]
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[args] :
identifier[tag] = literal[string] . identifier[format] (
identifier[name] = identifier[name] , identifier[value] = identifier[escape] ( literal[string] % identifier[value] ,{ literal[string] : literal[string] }))
identifier[tags] . identifier[append] ( identifier[tag] )
identifier[xml] = literal[string] . identifier[join] ( identifier[tags] )
keyword[return] identifier[xml] | def wrap_arguments(args=None):
"""Wrap a list of tuples in xml ready to pass into a SOAP request.
Args:
args (list): a list of (name, value) tuples specifying the
name of each argument and its value, eg
``[('InstanceID', 0), ('Speed', 1)]``. The value
can be a string or something with a string representation. The
arguments are escaped and wrapped in <name> and <value> tags.
Example:
>>> from soco import SoCo
>>> device = SoCo('192.168.1.101')
>>> s = Service(device)
>>> print(s.wrap_arguments([('InstanceID', 0), ('Speed', 1)]))
<InstanceID>0</InstanceID><Speed>1</Speed>'
"""
if args is None:
args = [] # depends on [control=['if'], data=['args']]
tags = []
for (name, value) in args:
tag = '<{name}>{value}</{name}>'.format(name=name, value=escape('%s' % value, {'"': '"'}))
# % converts to unicode because we are using unicode literals.
# Avoids use of 'unicode' function which does not exist in python 3
tags.append(tag) # depends on [control=['for'], data=[]]
xml = ''.join(tags)
return xml |
def get(args):
"""
invoke wptools and assemble selected output
"""
html = args.H
lang = args.l
nowrap = args.n
query = args.q
silent = args.s
title = args.t
verbose = args.v
wiki = args.w
if query:
qobj = WPToolsQuery(lang=lang, wiki=wiki)
if title:
return qobj.query(title)
return qobj.random()
page = wptools.page(title, lang=lang, silent=silent,
verbose=verbose, wiki=wiki)
try:
page.get_query()
except (StandardError, ValueError, LookupError):
return "NOT_FOUND"
if not page.data.get('extext'):
out = page.cache['query']['query']
out = _page_text(page, nowrap)
if html:
out = _page_html(page)
try:
return out.encode('utf-8')
except KeyError:
return out | def function[get, parameter[args]]:
constant[
invoke wptools and assemble selected output
]
variable[html] assign[=] name[args].H
variable[lang] assign[=] name[args].l
variable[nowrap] assign[=] name[args].n
variable[query] assign[=] name[args].q
variable[silent] assign[=] name[args].s
variable[title] assign[=] name[args].t
variable[verbose] assign[=] name[args].v
variable[wiki] assign[=] name[args].w
if name[query] begin[:]
variable[qobj] assign[=] call[name[WPToolsQuery], parameter[]]
if name[title] begin[:]
return[call[name[qobj].query, parameter[name[title]]]]
return[call[name[qobj].random, parameter[]]]
variable[page] assign[=] call[name[wptools].page, parameter[name[title]]]
<ast.Try object at 0x7da1b1205c30>
if <ast.UnaryOp object at 0x7da1b1204640> begin[:]
variable[out] assign[=] call[call[name[page].cache][constant[query]]][constant[query]]
variable[out] assign[=] call[name[_page_text], parameter[name[page], name[nowrap]]]
if name[html] begin[:]
variable[out] assign[=] call[name[_page_html], parameter[name[page]]]
<ast.Try object at 0x7da1b1204c10> | keyword[def] identifier[get] ( identifier[args] ):
literal[string]
identifier[html] = identifier[args] . identifier[H]
identifier[lang] = identifier[args] . identifier[l]
identifier[nowrap] = identifier[args] . identifier[n]
identifier[query] = identifier[args] . identifier[q]
identifier[silent] = identifier[args] . identifier[s]
identifier[title] = identifier[args] . identifier[t]
identifier[verbose] = identifier[args] . identifier[v]
identifier[wiki] = identifier[args] . identifier[w]
keyword[if] identifier[query] :
identifier[qobj] = identifier[WPToolsQuery] ( identifier[lang] = identifier[lang] , identifier[wiki] = identifier[wiki] )
keyword[if] identifier[title] :
keyword[return] identifier[qobj] . identifier[query] ( identifier[title] )
keyword[return] identifier[qobj] . identifier[random] ()
identifier[page] = identifier[wptools] . identifier[page] ( identifier[title] , identifier[lang] = identifier[lang] , identifier[silent] = identifier[silent] ,
identifier[verbose] = identifier[verbose] , identifier[wiki] = identifier[wiki] )
keyword[try] :
identifier[page] . identifier[get_query] ()
keyword[except] ( identifier[StandardError] , identifier[ValueError] , identifier[LookupError] ):
keyword[return] literal[string]
keyword[if] keyword[not] identifier[page] . identifier[data] . identifier[get] ( literal[string] ):
identifier[out] = identifier[page] . identifier[cache] [ literal[string] ][ literal[string] ]
identifier[out] = identifier[_page_text] ( identifier[page] , identifier[nowrap] )
keyword[if] identifier[html] :
identifier[out] = identifier[_page_html] ( identifier[page] )
keyword[try] :
keyword[return] identifier[out] . identifier[encode] ( literal[string] )
keyword[except] identifier[KeyError] :
keyword[return] identifier[out] | def get(args):
"""
invoke wptools and assemble selected output
"""
html = args.H
lang = args.l
nowrap = args.n
query = args.q
silent = args.s
title = args.t
verbose = args.v
wiki = args.w
if query:
qobj = WPToolsQuery(lang=lang, wiki=wiki)
if title:
return qobj.query(title) # depends on [control=['if'], data=[]]
return qobj.random() # depends on [control=['if'], data=[]]
page = wptools.page(title, lang=lang, silent=silent, verbose=verbose, wiki=wiki)
try:
page.get_query() # depends on [control=['try'], data=[]]
except (StandardError, ValueError, LookupError):
return 'NOT_FOUND' # depends on [control=['except'], data=[]]
if not page.data.get('extext'):
out = page.cache['query']['query'] # depends on [control=['if'], data=[]]
out = _page_text(page, nowrap)
if html:
out = _page_html(page) # depends on [control=['if'], data=[]]
try:
return out.encode('utf-8') # depends on [control=['try'], data=[]]
except KeyError:
return out # depends on [control=['except'], data=[]] |
def get_sub_area(area, xslice, yslice):
"""Apply slices to the area_extent and size of the area."""
new_area_extent = ((area.pixel_upper_left[0] +
(xslice.start - 0.5) * area.pixel_size_x),
(area.pixel_upper_left[1] -
(yslice.stop - 0.5) * area.pixel_size_y),
(area.pixel_upper_left[0] +
(xslice.stop - 0.5) * area.pixel_size_x),
(area.pixel_upper_left[1] -
(yslice.start - 0.5) * area.pixel_size_y))
return AreaDefinition(area.area_id, area.name,
area.proj_id, area.proj_dict,
xslice.stop - xslice.start,
yslice.stop - yslice.start,
new_area_extent) | def function[get_sub_area, parameter[area, xslice, yslice]]:
constant[Apply slices to the area_extent and size of the area.]
variable[new_area_extent] assign[=] tuple[[<ast.BinOp object at 0x7da1b1d6f070>, <ast.BinOp object at 0x7da1b22a4160>, <ast.BinOp object at 0x7da1b22a58a0>, <ast.BinOp object at 0x7da1b22a70d0>]]
return[call[name[AreaDefinition], parameter[name[area].area_id, name[area].name, name[area].proj_id, name[area].proj_dict, binary_operation[name[xslice].stop - name[xslice].start], binary_operation[name[yslice].stop - name[yslice].start], name[new_area_extent]]]] | keyword[def] identifier[get_sub_area] ( identifier[area] , identifier[xslice] , identifier[yslice] ):
literal[string]
identifier[new_area_extent] =(( identifier[area] . identifier[pixel_upper_left] [ literal[int] ]+
( identifier[xslice] . identifier[start] - literal[int] )* identifier[area] . identifier[pixel_size_x] ),
( identifier[area] . identifier[pixel_upper_left] [ literal[int] ]-
( identifier[yslice] . identifier[stop] - literal[int] )* identifier[area] . identifier[pixel_size_y] ),
( identifier[area] . identifier[pixel_upper_left] [ literal[int] ]+
( identifier[xslice] . identifier[stop] - literal[int] )* identifier[area] . identifier[pixel_size_x] ),
( identifier[area] . identifier[pixel_upper_left] [ literal[int] ]-
( identifier[yslice] . identifier[start] - literal[int] )* identifier[area] . identifier[pixel_size_y] ))
keyword[return] identifier[AreaDefinition] ( identifier[area] . identifier[area_id] , identifier[area] . identifier[name] ,
identifier[area] . identifier[proj_id] , identifier[area] . identifier[proj_dict] ,
identifier[xslice] . identifier[stop] - identifier[xslice] . identifier[start] ,
identifier[yslice] . identifier[stop] - identifier[yslice] . identifier[start] ,
identifier[new_area_extent] ) | def get_sub_area(area, xslice, yslice):
"""Apply slices to the area_extent and size of the area."""
new_area_extent = (area.pixel_upper_left[0] + (xslice.start - 0.5) * area.pixel_size_x, area.pixel_upper_left[1] - (yslice.stop - 0.5) * area.pixel_size_y, area.pixel_upper_left[0] + (xslice.stop - 0.5) * area.pixel_size_x, area.pixel_upper_left[1] - (yslice.start - 0.5) * area.pixel_size_y)
return AreaDefinition(area.area_id, area.name, area.proj_id, area.proj_dict, xslice.stop - xslice.start, yslice.stop - yslice.start, new_area_extent) |
def render(cls, obj):
"""
Using any display hooks that have been registered, render the
object to a dictionary of MIME types and metadata information.
"""
class_hierarchy = inspect.getmro(type(obj))
hooks = []
for _, type_hooks in cls._display_hooks.items():
for cls in class_hierarchy:
if cls in type_hooks:
hooks.append(type_hooks[cls])
break
data, metadata = {}, {}
for hook in hooks:
ret = hook(obj)
if ret is None:
continue
d, md = ret
data.update(d)
metadata.update(md)
return data, metadata | def function[render, parameter[cls, obj]]:
constant[
Using any display hooks that have been registered, render the
object to a dictionary of MIME types and metadata information.
]
variable[class_hierarchy] assign[=] call[name[inspect].getmro, parameter[call[name[type], parameter[name[obj]]]]]
variable[hooks] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b2344fd0>, <ast.Name object at 0x7da1b23475e0>]]] in starred[call[name[cls]._display_hooks.items, parameter[]]] begin[:]
for taget[name[cls]] in starred[name[class_hierarchy]] begin[:]
if compare[name[cls] in name[type_hooks]] begin[:]
call[name[hooks].append, parameter[call[name[type_hooks]][name[cls]]]]
break
<ast.Tuple object at 0x7da1b2345720> assign[=] tuple[[<ast.Dict object at 0x7da1b2344160>, <ast.Dict object at 0x7da1b2345c00>]]
for taget[name[hook]] in starred[name[hooks]] begin[:]
variable[ret] assign[=] call[name[hook], parameter[name[obj]]]
if compare[name[ret] is constant[None]] begin[:]
continue
<ast.Tuple object at 0x7da1b2346c50> assign[=] name[ret]
call[name[data].update, parameter[name[d]]]
call[name[metadata].update, parameter[name[md]]]
return[tuple[[<ast.Name object at 0x7da2054a59c0>, <ast.Name object at 0x7da2054a5fc0>]]] | keyword[def] identifier[render] ( identifier[cls] , identifier[obj] ):
literal[string]
identifier[class_hierarchy] = identifier[inspect] . identifier[getmro] ( identifier[type] ( identifier[obj] ))
identifier[hooks] =[]
keyword[for] identifier[_] , identifier[type_hooks] keyword[in] identifier[cls] . identifier[_display_hooks] . identifier[items] ():
keyword[for] identifier[cls] keyword[in] identifier[class_hierarchy] :
keyword[if] identifier[cls] keyword[in] identifier[type_hooks] :
identifier[hooks] . identifier[append] ( identifier[type_hooks] [ identifier[cls] ])
keyword[break]
identifier[data] , identifier[metadata] ={},{}
keyword[for] identifier[hook] keyword[in] identifier[hooks] :
identifier[ret] = identifier[hook] ( identifier[obj] )
keyword[if] identifier[ret] keyword[is] keyword[None] :
keyword[continue]
identifier[d] , identifier[md] = identifier[ret]
identifier[data] . identifier[update] ( identifier[d] )
identifier[metadata] . identifier[update] ( identifier[md] )
keyword[return] identifier[data] , identifier[metadata] | def render(cls, obj):
"""
Using any display hooks that have been registered, render the
object to a dictionary of MIME types and metadata information.
"""
class_hierarchy = inspect.getmro(type(obj))
hooks = []
for (_, type_hooks) in cls._display_hooks.items():
for cls in class_hierarchy:
if cls in type_hooks:
hooks.append(type_hooks[cls])
break # depends on [control=['if'], data=['cls', 'type_hooks']] # depends on [control=['for'], data=['cls']] # depends on [control=['for'], data=[]]
(data, metadata) = ({}, {})
for hook in hooks:
ret = hook(obj)
if ret is None:
continue # depends on [control=['if'], data=[]]
(d, md) = ret
data.update(d)
metadata.update(md) # depends on [control=['for'], data=['hook']]
return (data, metadata) |
def get_verbose(obj, field_name=""):
"""
Returns the verbose name of an object's field.
:param obj: A model instance.
:param field_name: The requested field value in string format.
"""
if hasattr(obj, "_meta") and hasattr(obj._meta, "get_field_by_name"):
try:
return obj._meta.get_field(field_name).verbose_name
except FieldDoesNotExist:
pass
return "" | def function[get_verbose, parameter[obj, field_name]]:
constant[
Returns the verbose name of an object's field.
:param obj: A model instance.
:param field_name: The requested field value in string format.
]
if <ast.BoolOp object at 0x7da1b255a7a0> begin[:]
<ast.Try object at 0x7da1b255a1a0>
return[constant[]] | keyword[def] identifier[get_verbose] ( identifier[obj] , identifier[field_name] = literal[string] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[obj] . identifier[_meta] , literal[string] ):
keyword[try] :
keyword[return] identifier[obj] . identifier[_meta] . identifier[get_field] ( identifier[field_name] ). identifier[verbose_name]
keyword[except] identifier[FieldDoesNotExist] :
keyword[pass]
keyword[return] literal[string] | def get_verbose(obj, field_name=''):
"""
Returns the verbose name of an object's field.
:param obj: A model instance.
:param field_name: The requested field value in string format.
"""
if hasattr(obj, '_meta') and hasattr(obj._meta, 'get_field_by_name'):
try:
return obj._meta.get_field(field_name).verbose_name # depends on [control=['try'], data=[]]
except FieldDoesNotExist:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return '' |
def gather_info_hdu(self, hdulist):
"""Obtain a summary of information about the image."""
values = {}
values['n_ext'] = len(hdulist)
extnames = [hdu.header.get('extname', '') for hdu in hdulist[1:]]
values['name_ext'] = ['PRIMARY'] + extnames
fits_extractor = self.extractor_map['fits']
for key in self.meta_dinfo_headers:
values[key] = fits_extractor.extract(key, hdulist)
return values | def function[gather_info_hdu, parameter[self, hdulist]]:
constant[Obtain a summary of information about the image.]
variable[values] assign[=] dictionary[[], []]
call[name[values]][constant[n_ext]] assign[=] call[name[len], parameter[name[hdulist]]]
variable[extnames] assign[=] <ast.ListComp object at 0x7da1b26ae9e0>
call[name[values]][constant[name_ext]] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b26ad960>]] + name[extnames]]
variable[fits_extractor] assign[=] call[name[self].extractor_map][constant[fits]]
for taget[name[key]] in starred[name[self].meta_dinfo_headers] begin[:]
call[name[values]][name[key]] assign[=] call[name[fits_extractor].extract, parameter[name[key], name[hdulist]]]
return[name[values]] | keyword[def] identifier[gather_info_hdu] ( identifier[self] , identifier[hdulist] ):
literal[string]
identifier[values] ={}
identifier[values] [ literal[string] ]= identifier[len] ( identifier[hdulist] )
identifier[extnames] =[ identifier[hdu] . identifier[header] . identifier[get] ( literal[string] , literal[string] ) keyword[for] identifier[hdu] keyword[in] identifier[hdulist] [ literal[int] :]]
identifier[values] [ literal[string] ]=[ literal[string] ]+ identifier[extnames]
identifier[fits_extractor] = identifier[self] . identifier[extractor_map] [ literal[string] ]
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[meta_dinfo_headers] :
identifier[values] [ identifier[key] ]= identifier[fits_extractor] . identifier[extract] ( identifier[key] , identifier[hdulist] )
keyword[return] identifier[values] | def gather_info_hdu(self, hdulist):
"""Obtain a summary of information about the image."""
values = {}
values['n_ext'] = len(hdulist)
extnames = [hdu.header.get('extname', '') for hdu in hdulist[1:]]
values['name_ext'] = ['PRIMARY'] + extnames
fits_extractor = self.extractor_map['fits']
for key in self.meta_dinfo_headers:
values[key] = fits_extractor.extract(key, hdulist) # depends on [control=['for'], data=['key']]
return values |
def update(self, data, offset, is_last, buffer_index=0):
"""
Update the buffer at the given index.
Args:
data (np.ndarray): The frames.
offset (int): The index of the first frame in `data` within the sequence.
is_last (bool): Whether this is the last block of frames in the sequence.
buffer_index (int): The index of the buffer to update (< self.num_buffers).
"""
if buffer_index >= self.num_buffers:
raise ValueError('Expected buffer index < {} but got index {}.'.format(self.num_buffers, buffer_index))
if self.buffers[buffer_index] is not None and self.buffers[buffer_index].shape[0] > 0:
expected_next_frame = self.current_frame + self.buffers[buffer_index].shape[0]
if expected_next_frame != offset:
raise ValueError(
'There are missing frames. Last frame in buffer is {}. The passed frames start at {}.'.format(
expected_next_frame, offset))
self.buffers[buffer_index] = np.vstack([self.buffers[buffer_index], data])
else:
self.buffers[buffer_index] = data
self.buffers_full[buffer_index] = is_last | def function[update, parameter[self, data, offset, is_last, buffer_index]]:
constant[
Update the buffer at the given index.
Args:
data (np.ndarray): The frames.
offset (int): The index of the first frame in `data` within the sequence.
is_last (bool): Whether this is the last block of frames in the sequence.
buffer_index (int): The index of the buffer to update (< self.num_buffers).
]
if compare[name[buffer_index] greater_or_equal[>=] name[self].num_buffers] begin[:]
<ast.Raise object at 0x7da1b0ed3010>
if <ast.BoolOp object at 0x7da1b0ed1930> begin[:]
variable[expected_next_frame] assign[=] binary_operation[name[self].current_frame + call[call[name[self].buffers][name[buffer_index]].shape][constant[0]]]
if compare[name[expected_next_frame] not_equal[!=] name[offset]] begin[:]
<ast.Raise object at 0x7da1b0e27010>
call[name[self].buffers][name[buffer_index]] assign[=] call[name[np].vstack, parameter[list[[<ast.Subscript object at 0x7da1b0e24490>, <ast.Name object at 0x7da1b0e276d0>]]]]
call[name[self].buffers_full][name[buffer_index]] assign[=] name[is_last] | keyword[def] identifier[update] ( identifier[self] , identifier[data] , identifier[offset] , identifier[is_last] , identifier[buffer_index] = literal[int] ):
literal[string]
keyword[if] identifier[buffer_index] >= identifier[self] . identifier[num_buffers] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[self] . identifier[num_buffers] , identifier[buffer_index] ))
keyword[if] identifier[self] . identifier[buffers] [ identifier[buffer_index] ] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[buffers] [ identifier[buffer_index] ]. identifier[shape] [ literal[int] ]> literal[int] :
identifier[expected_next_frame] = identifier[self] . identifier[current_frame] + identifier[self] . identifier[buffers] [ identifier[buffer_index] ]. identifier[shape] [ literal[int] ]
keyword[if] identifier[expected_next_frame] != identifier[offset] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] (
identifier[expected_next_frame] , identifier[offset] ))
identifier[self] . identifier[buffers] [ identifier[buffer_index] ]= identifier[np] . identifier[vstack] ([ identifier[self] . identifier[buffers] [ identifier[buffer_index] ], identifier[data] ])
keyword[else] :
identifier[self] . identifier[buffers] [ identifier[buffer_index] ]= identifier[data]
identifier[self] . identifier[buffers_full] [ identifier[buffer_index] ]= identifier[is_last] | def update(self, data, offset, is_last, buffer_index=0):
"""
Update the buffer at the given index.
Args:
data (np.ndarray): The frames.
offset (int): The index of the first frame in `data` within the sequence.
is_last (bool): Whether this is the last block of frames in the sequence.
buffer_index (int): The index of the buffer to update (< self.num_buffers).
"""
if buffer_index >= self.num_buffers:
raise ValueError('Expected buffer index < {} but got index {}.'.format(self.num_buffers, buffer_index)) # depends on [control=['if'], data=['buffer_index']]
if self.buffers[buffer_index] is not None and self.buffers[buffer_index].shape[0] > 0:
expected_next_frame = self.current_frame + self.buffers[buffer_index].shape[0]
if expected_next_frame != offset:
raise ValueError('There are missing frames. Last frame in buffer is {}. The passed frames start at {}.'.format(expected_next_frame, offset)) # depends on [control=['if'], data=['expected_next_frame', 'offset']]
self.buffers[buffer_index] = np.vstack([self.buffers[buffer_index], data]) # depends on [control=['if'], data=[]]
else:
self.buffers[buffer_index] = data
self.buffers_full[buffer_index] = is_last |
def _phiforce(self,R,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2011-10-19 - Written - Bovy (IAS)
"""
if R < self._rb:
return self._mphio*math.sin(self._m*phi-self._mphib)\
*self._rbp*(2.*self._r1p-self._rbp/R**self._p)
else:
return self._mphio*R**self._p*math.sin(self._m*phi-self._mphib) | def function[_phiforce, parameter[self, R, phi, t]]:
constant[
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2011-10-19 - Written - Bovy (IAS)
]
if compare[name[R] less[<] name[self]._rb] begin[:]
return[binary_operation[binary_operation[binary_operation[name[self]._mphio * call[name[math].sin, parameter[binary_operation[binary_operation[name[self]._m * name[phi]] - name[self]._mphib]]]] * name[self]._rbp] * binary_operation[binary_operation[constant[2.0] * name[self]._r1p] - binary_operation[name[self]._rbp / binary_operation[name[R] ** name[self]._p]]]]] | keyword[def] identifier[_phiforce] ( identifier[self] , identifier[R] , identifier[phi] = literal[int] , identifier[t] = literal[int] ):
literal[string]
keyword[if] identifier[R] < identifier[self] . identifier[_rb] :
keyword[return] identifier[self] . identifier[_mphio] * identifier[math] . identifier[sin] ( identifier[self] . identifier[_m] * identifier[phi] - identifier[self] . identifier[_mphib] )* identifier[self] . identifier[_rbp] *( literal[int] * identifier[self] . identifier[_r1p] - identifier[self] . identifier[_rbp] / identifier[R] ** identifier[self] . identifier[_p] )
keyword[else] :
keyword[return] identifier[self] . identifier[_mphio] * identifier[R] ** identifier[self] . identifier[_p] * identifier[math] . identifier[sin] ( identifier[self] . identifier[_m] * identifier[phi] - identifier[self] . identifier[_mphib] ) | def _phiforce(self, R, phi=0.0, t=0.0):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2011-10-19 - Written - Bovy (IAS)
"""
if R < self._rb:
return self._mphio * math.sin(self._m * phi - self._mphib) * self._rbp * (2.0 * self._r1p - self._rbp / R ** self._p) # depends on [control=['if'], data=['R']]
else:
return self._mphio * R ** self._p * math.sin(self._m * phi - self._mphib) |
def stop_trial(self, trial):
"""Stops trial.
Trials may be stopped at any time. If trial is in state PENDING
or PAUSED, calls `on_trial_remove` for scheduler and
`on_trial_complete(..., early_terminated=True) for search_alg.
Otherwise waits for result for the trial and calls
`on_trial_complete` for scheduler and search_alg if RUNNING.
"""
error = False
error_msg = None
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return
elif trial.status in [Trial.PENDING, Trial.PAUSED]:
self._scheduler_alg.on_trial_remove(self, trial)
self._search_alg.on_trial_complete(
trial.trial_id, early_terminated=True)
elif trial.status is Trial.RUNNING:
try:
result = self.trial_executor.fetch_result(trial)
trial.update_last_result(result, terminate=True)
self._scheduler_alg.on_trial_complete(self, trial, result)
self._search_alg.on_trial_complete(
trial.trial_id, result=result)
except Exception:
error_msg = traceback.format_exc()
logger.exception("Error processing event.")
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
error = True
self.trial_executor.stop_trial(trial, error=error, error_msg=error_msg) | def function[stop_trial, parameter[self, trial]]:
constant[Stops trial.
Trials may be stopped at any time. If trial is in state PENDING
or PAUSED, calls `on_trial_remove` for scheduler and
`on_trial_complete(..., early_terminated=True) for search_alg.
Otherwise waits for result for the trial and calls
`on_trial_complete` for scheduler and search_alg if RUNNING.
]
variable[error] assign[=] constant[False]
variable[error_msg] assign[=] constant[None]
if compare[name[trial].status in list[[<ast.Attribute object at 0x7da20c7c8370>, <ast.Attribute object at 0x7da20c7c8280>]]] begin[:]
return[None]
call[name[self].trial_executor.stop_trial, parameter[name[trial]]] | keyword[def] identifier[stop_trial] ( identifier[self] , identifier[trial] ):
literal[string]
identifier[error] = keyword[False]
identifier[error_msg] = keyword[None]
keyword[if] identifier[trial] . identifier[status] keyword[in] [ identifier[Trial] . identifier[ERROR] , identifier[Trial] . identifier[TERMINATED] ]:
keyword[return]
keyword[elif] identifier[trial] . identifier[status] keyword[in] [ identifier[Trial] . identifier[PENDING] , identifier[Trial] . identifier[PAUSED] ]:
identifier[self] . identifier[_scheduler_alg] . identifier[on_trial_remove] ( identifier[self] , identifier[trial] )
identifier[self] . identifier[_search_alg] . identifier[on_trial_complete] (
identifier[trial] . identifier[trial_id] , identifier[early_terminated] = keyword[True] )
keyword[elif] identifier[trial] . identifier[status] keyword[is] identifier[Trial] . identifier[RUNNING] :
keyword[try] :
identifier[result] = identifier[self] . identifier[trial_executor] . identifier[fetch_result] ( identifier[trial] )
identifier[trial] . identifier[update_last_result] ( identifier[result] , identifier[terminate] = keyword[True] )
identifier[self] . identifier[_scheduler_alg] . identifier[on_trial_complete] ( identifier[self] , identifier[trial] , identifier[result] )
identifier[self] . identifier[_search_alg] . identifier[on_trial_complete] (
identifier[trial] . identifier[trial_id] , identifier[result] = identifier[result] )
keyword[except] identifier[Exception] :
identifier[error_msg] = identifier[traceback] . identifier[format_exc] ()
identifier[logger] . identifier[exception] ( literal[string] )
identifier[self] . identifier[_scheduler_alg] . identifier[on_trial_error] ( identifier[self] , identifier[trial] )
identifier[self] . identifier[_search_alg] . identifier[on_trial_complete] ( identifier[trial] . identifier[trial_id] , identifier[error] = keyword[True] )
identifier[error] = keyword[True]
identifier[self] . identifier[trial_executor] . identifier[stop_trial] ( identifier[trial] , identifier[error] = identifier[error] , identifier[error_msg] = identifier[error_msg] ) | def stop_trial(self, trial):
"""Stops trial.
Trials may be stopped at any time. If trial is in state PENDING
or PAUSED, calls `on_trial_remove` for scheduler and
`on_trial_complete(..., early_terminated=True) for search_alg.
Otherwise waits for result for the trial and calls
`on_trial_complete` for scheduler and search_alg if RUNNING.
"""
error = False
error_msg = None
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return # depends on [control=['if'], data=[]]
elif trial.status in [Trial.PENDING, Trial.PAUSED]:
self._scheduler_alg.on_trial_remove(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, early_terminated=True) # depends on [control=['if'], data=[]]
elif trial.status is Trial.RUNNING:
try:
result = self.trial_executor.fetch_result(trial)
trial.update_last_result(result, terminate=True)
self._scheduler_alg.on_trial_complete(self, trial, result)
self._search_alg.on_trial_complete(trial.trial_id, result=result) # depends on [control=['try'], data=[]]
except Exception:
error_msg = traceback.format_exc()
logger.exception('Error processing event.')
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
error = True # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
self.trial_executor.stop_trial(trial, error=error, error_msg=error_msg) |
def remove_children(self, reset_parent=True):
"""
Remove all the children of this node.
:param bool reset_parent: if ``True``, set to ``None`` the parent attribute
of the children
"""
if reset_parent:
for child in self.children:
child.parent = None
self.__children = [] | def function[remove_children, parameter[self, reset_parent]]:
constant[
Remove all the children of this node.
:param bool reset_parent: if ``True``, set to ``None`` the parent attribute
of the children
]
if name[reset_parent] begin[:]
for taget[name[child]] in starred[name[self].children] begin[:]
name[child].parent assign[=] constant[None]
name[self].__children assign[=] list[[]] | keyword[def] identifier[remove_children] ( identifier[self] , identifier[reset_parent] = keyword[True] ):
literal[string]
keyword[if] identifier[reset_parent] :
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[children] :
identifier[child] . identifier[parent] = keyword[None]
identifier[self] . identifier[__children] =[] | def remove_children(self, reset_parent=True):
"""
Remove all the children of this node.
:param bool reset_parent: if ``True``, set to ``None`` the parent attribute
of the children
"""
if reset_parent:
for child in self.children:
child.parent = None # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
self.__children = [] |
def metadata(sceneid, pmin=2, pmax=98, **kwargs):
"""
Return band bounds and statistics.
Attributes
----------
sceneid : str
CBERS sceneid.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.utils.raster_get_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
"""
scene_params = _cbers_parse_scene_id(sceneid)
cbers_address = "{}/{}".format(CBERS_BUCKET, scene_params["key"])
bands = scene_params["bands"]
ref_band = scene_params["reference_band"]
info = {"sceneid": sceneid}
addresses = [
"{}/{}_BAND{}.tif".format(cbers_address, sceneid, band) for band in bands
]
_stats_worker = partial(
utils.raster_get_stats,
indexes=[1],
nodata=0,
overview_level=2,
percentiles=(pmin, pmax),
**kwargs
)
with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
responses = list(executor.map(_stats_worker, addresses))
info["bounds"] = [r["bounds"] for b, r in zip(bands, responses) if b == ref_band][0]
info["statistics"] = {
b: v for b, d in zip(bands, responses) for k, v in d["statistics"].items()
}
return info | def function[metadata, parameter[sceneid, pmin, pmax]]:
constant[
Return band bounds and statistics.
Attributes
----------
sceneid : str
CBERS sceneid.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.utils.raster_get_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
]
variable[scene_params] assign[=] call[name[_cbers_parse_scene_id], parameter[name[sceneid]]]
variable[cbers_address] assign[=] call[constant[{}/{}].format, parameter[name[CBERS_BUCKET], call[name[scene_params]][constant[key]]]]
variable[bands] assign[=] call[name[scene_params]][constant[bands]]
variable[ref_band] assign[=] call[name[scene_params]][constant[reference_band]]
variable[info] assign[=] dictionary[[<ast.Constant object at 0x7da1b07cc9d0>], [<ast.Name object at 0x7da1b07cc970>]]
variable[addresses] assign[=] <ast.ListComp object at 0x7da1b07cf1f0>
variable[_stats_worker] assign[=] call[name[partial], parameter[name[utils].raster_get_stats]]
with call[name[futures].ThreadPoolExecutor, parameter[]] begin[:]
variable[responses] assign[=] call[name[list], parameter[call[name[executor].map, parameter[name[_stats_worker], name[addresses]]]]]
call[name[info]][constant[bounds]] assign[=] call[<ast.ListComp object at 0x7da1b07cd930>][constant[0]]
call[name[info]][constant[statistics]] assign[=] <ast.DictComp object at 0x7da1b07cc760>
return[name[info]] | keyword[def] identifier[metadata] ( identifier[sceneid] , identifier[pmin] = literal[int] , identifier[pmax] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[scene_params] = identifier[_cbers_parse_scene_id] ( identifier[sceneid] )
identifier[cbers_address] = literal[string] . identifier[format] ( identifier[CBERS_BUCKET] , identifier[scene_params] [ literal[string] ])
identifier[bands] = identifier[scene_params] [ literal[string] ]
identifier[ref_band] = identifier[scene_params] [ literal[string] ]
identifier[info] ={ literal[string] : identifier[sceneid] }
identifier[addresses] =[
literal[string] . identifier[format] ( identifier[cbers_address] , identifier[sceneid] , identifier[band] ) keyword[for] identifier[band] keyword[in] identifier[bands]
]
identifier[_stats_worker] = identifier[partial] (
identifier[utils] . identifier[raster_get_stats] ,
identifier[indexes] =[ literal[int] ],
identifier[nodata] = literal[int] ,
identifier[overview_level] = literal[int] ,
identifier[percentiles] =( identifier[pmin] , identifier[pmax] ),
** identifier[kwargs]
)
keyword[with] identifier[futures] . identifier[ThreadPoolExecutor] ( identifier[max_workers] = identifier[MAX_THREADS] ) keyword[as] identifier[executor] :
identifier[responses] = identifier[list] ( identifier[executor] . identifier[map] ( identifier[_stats_worker] , identifier[addresses] ))
identifier[info] [ literal[string] ]=[ identifier[r] [ literal[string] ] keyword[for] identifier[b] , identifier[r] keyword[in] identifier[zip] ( identifier[bands] , identifier[responses] ) keyword[if] identifier[b] == identifier[ref_band] ][ literal[int] ]
identifier[info] [ literal[string] ]={
identifier[b] : identifier[v] keyword[for] identifier[b] , identifier[d] keyword[in] identifier[zip] ( identifier[bands] , identifier[responses] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] [ literal[string] ]. identifier[items] ()
}
keyword[return] identifier[info] | def metadata(sceneid, pmin=2, pmax=98, **kwargs):
"""
Return band bounds and statistics.
Attributes
----------
sceneid : str
CBERS sceneid.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.utils.raster_get_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
"""
scene_params = _cbers_parse_scene_id(sceneid)
cbers_address = '{}/{}'.format(CBERS_BUCKET, scene_params['key'])
bands = scene_params['bands']
ref_band = scene_params['reference_band']
info = {'sceneid': sceneid}
addresses = ['{}/{}_BAND{}.tif'.format(cbers_address, sceneid, band) for band in bands]
_stats_worker = partial(utils.raster_get_stats, indexes=[1], nodata=0, overview_level=2, percentiles=(pmin, pmax), **kwargs)
with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
responses = list(executor.map(_stats_worker, addresses)) # depends on [control=['with'], data=['executor']]
info['bounds'] = [r['bounds'] for (b, r) in zip(bands, responses) if b == ref_band][0]
info['statistics'] = {b: v for (b, d) in zip(bands, responses) for (k, v) in d['statistics'].items()}
return info |
def verify(self):
"""Ensure all expected calls were called,
raise AssertionError otherwise.
You do not need to use this directly. Use fudge.verify()
"""
try:
for exp in self.get_expected_calls():
exp.assert_called()
exp.assert_times_called()
for fake, call_order in self.get_expected_call_order().items():
call_order.assert_order_met(finalize=True)
finally:
self.clear_calls() | def function[verify, parameter[self]]:
constant[Ensure all expected calls were called,
raise AssertionError otherwise.
You do not need to use this directly. Use fudge.verify()
]
<ast.Try object at 0x7da18bccb160> | keyword[def] identifier[verify] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[for] identifier[exp] keyword[in] identifier[self] . identifier[get_expected_calls] ():
identifier[exp] . identifier[assert_called] ()
identifier[exp] . identifier[assert_times_called] ()
keyword[for] identifier[fake] , identifier[call_order] keyword[in] identifier[self] . identifier[get_expected_call_order] (). identifier[items] ():
identifier[call_order] . identifier[assert_order_met] ( identifier[finalize] = keyword[True] )
keyword[finally] :
identifier[self] . identifier[clear_calls] () | def verify(self):
"""Ensure all expected calls were called,
raise AssertionError otherwise.
You do not need to use this directly. Use fudge.verify()
"""
try:
for exp in self.get_expected_calls():
exp.assert_called()
exp.assert_times_called() # depends on [control=['for'], data=['exp']]
for (fake, call_order) in self.get_expected_call_order().items():
call_order.assert_order_met(finalize=True) # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
finally:
self.clear_calls() |
def _get_encrypted_masterpassword(self):
""" Obtain the encrypted masterkey
.. note:: The encrypted masterkey is checksummed, so that we can
figure out that a provided password is correct or not. The
checksum is only 4 bytes long!
"""
if not self.unlocked():
raise WalletLocked
aes = AESCipher(self.password)
return "{}${}".format(
self._derive_checksum(self.masterkey), aes.encrypt(self.masterkey)
) | def function[_get_encrypted_masterpassword, parameter[self]]:
constant[ Obtain the encrypted masterkey
.. note:: The encrypted masterkey is checksummed, so that we can
figure out that a provided password is correct or not. The
checksum is only 4 bytes long!
]
if <ast.UnaryOp object at 0x7da18f09c6d0> begin[:]
<ast.Raise object at 0x7da18f09cac0>
variable[aes] assign[=] call[name[AESCipher], parameter[name[self].password]]
return[call[constant[{}${}].format, parameter[call[name[self]._derive_checksum, parameter[name[self].masterkey]], call[name[aes].encrypt, parameter[name[self].masterkey]]]]] | keyword[def] identifier[_get_encrypted_masterpassword] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[unlocked] ():
keyword[raise] identifier[WalletLocked]
identifier[aes] = identifier[AESCipher] ( identifier[self] . identifier[password] )
keyword[return] literal[string] . identifier[format] (
identifier[self] . identifier[_derive_checksum] ( identifier[self] . identifier[masterkey] ), identifier[aes] . identifier[encrypt] ( identifier[self] . identifier[masterkey] )
) | def _get_encrypted_masterpassword(self):
""" Obtain the encrypted masterkey
.. note:: The encrypted masterkey is checksummed, so that we can
figure out that a provided password is correct or not. The
checksum is only 4 bytes long!
"""
if not self.unlocked():
raise WalletLocked # depends on [control=['if'], data=[]]
aes = AESCipher(self.password)
return '{}${}'.format(self._derive_checksum(self.masterkey), aes.encrypt(self.masterkey)) |
def urls(self):
"""
A dictionary of the urls to be mocked with this service and the handlers
that should be called in their place
"""
url_bases = self._url_module.url_bases
unformatted_paths = self._url_module.url_paths
urls = {}
for url_base in url_bases:
for url_path, handler in unformatted_paths.items():
url = url_path.format(url_base)
urls[url] = handler
return urls | def function[urls, parameter[self]]:
constant[
A dictionary of the urls to be mocked with this service and the handlers
that should be called in their place
]
variable[url_bases] assign[=] name[self]._url_module.url_bases
variable[unformatted_paths] assign[=] name[self]._url_module.url_paths
variable[urls] assign[=] dictionary[[], []]
for taget[name[url_base]] in starred[name[url_bases]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2047e8c70>, <ast.Name object at 0x7da2047e8070>]]] in starred[call[name[unformatted_paths].items, parameter[]]] begin[:]
variable[url] assign[=] call[name[url_path].format, parameter[name[url_base]]]
call[name[urls]][name[url]] assign[=] name[handler]
return[name[urls]] | keyword[def] identifier[urls] ( identifier[self] ):
literal[string]
identifier[url_bases] = identifier[self] . identifier[_url_module] . identifier[url_bases]
identifier[unformatted_paths] = identifier[self] . identifier[_url_module] . identifier[url_paths]
identifier[urls] ={}
keyword[for] identifier[url_base] keyword[in] identifier[url_bases] :
keyword[for] identifier[url_path] , identifier[handler] keyword[in] identifier[unformatted_paths] . identifier[items] ():
identifier[url] = identifier[url_path] . identifier[format] ( identifier[url_base] )
identifier[urls] [ identifier[url] ]= identifier[handler]
keyword[return] identifier[urls] | def urls(self):
"""
A dictionary of the urls to be mocked with this service and the handlers
that should be called in their place
"""
url_bases = self._url_module.url_bases
unformatted_paths = self._url_module.url_paths
urls = {}
for url_base in url_bases:
for (url_path, handler) in unformatted_paths.items():
url = url_path.format(url_base)
urls[url] = handler # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['url_base']]
return urls |
def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
self.mode(mode)._jwrite.jdbc(url, table, jprop) | def function[jdbc, parameter[self, url, table, mode, properties]]:
constant[Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
]
if compare[name[properties] is constant[None]] begin[:]
variable[properties] assign[=] call[name[dict], parameter[]]
variable[jprop] assign[=] call[call[name[JavaClass], parameter[constant[java.util.Properties], name[self]._spark._sc._gateway._gateway_client]], parameter[]]
for taget[name[k]] in starred[name[properties]] begin[:]
call[name[jprop].setProperty, parameter[name[k], call[name[properties]][name[k]]]]
call[call[name[self].mode, parameter[name[mode]]]._jwrite.jdbc, parameter[name[url], name[table], name[jprop]]] | keyword[def] identifier[jdbc] ( identifier[self] , identifier[url] , identifier[table] , identifier[mode] = keyword[None] , identifier[properties] = keyword[None] ):
literal[string]
keyword[if] identifier[properties] keyword[is] keyword[None] :
identifier[properties] = identifier[dict] ()
identifier[jprop] = identifier[JavaClass] ( literal[string] , identifier[self] . identifier[_spark] . identifier[_sc] . identifier[_gateway] . identifier[_gateway_client] )()
keyword[for] identifier[k] keyword[in] identifier[properties] :
identifier[jprop] . identifier[setProperty] ( identifier[k] , identifier[properties] [ identifier[k] ])
identifier[self] . identifier[mode] ( identifier[mode] ). identifier[_jwrite] . identifier[jdbc] ( identifier[url] , identifier[table] , identifier[jprop] ) | def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
"""
if properties is None:
properties = dict() # depends on [control=['if'], data=['properties']]
jprop = JavaClass('java.util.Properties', self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k]) # depends on [control=['for'], data=['k']]
self.mode(mode)._jwrite.jdbc(url, table, jprop) |
def on_key_release(self, symbol, modifiers):
"""
Pyglet specific key release callback.
Forwards and translates the events to :py:func:`keyboard_event`
"""
self.keyboard_event(symbol, self.keys.ACTION_RELEASE, modifiers) | def function[on_key_release, parameter[self, symbol, modifiers]]:
constant[
Pyglet specific key release callback.
Forwards and translates the events to :py:func:`keyboard_event`
]
call[name[self].keyboard_event, parameter[name[symbol], name[self].keys.ACTION_RELEASE, name[modifiers]]] | keyword[def] identifier[on_key_release] ( identifier[self] , identifier[symbol] , identifier[modifiers] ):
literal[string]
identifier[self] . identifier[keyboard_event] ( identifier[symbol] , identifier[self] . identifier[keys] . identifier[ACTION_RELEASE] , identifier[modifiers] ) | def on_key_release(self, symbol, modifiers):
"""
Pyglet specific key release callback.
Forwards and translates the events to :py:func:`keyboard_event`
"""
self.keyboard_event(symbol, self.keys.ACTION_RELEASE, modifiers) |
def get_zeropoint(expnum, ccd, prefix=None, version='p'):
"""Get the zeropoint for this exposure using the zeropoint.used file created during source planting..
This command expects that there is a file called #######p##.zeropoint.used which contains the zeropoint.
@param expnum: exposure to get zeropoint of
@param ccd: which ccd (extension - 1) to get zp
@param prefix: possible string prefixed to expsoure number.
@param version: one of [spo] as in #######p##
@return: zeropoint
"""
uri = get_uri(expnum, ccd, version, ext='zeropoint.used', prefix=prefix)
try:
return zmag[uri]
except:
pass
try:
zmag[uri] = float(open_vos_or_local(uri).read())
return zmag[uri]
except:
pass
zmag[uri] = 0.0
return zmag[uri] | def function[get_zeropoint, parameter[expnum, ccd, prefix, version]]:
constant[Get the zeropoint for this exposure using the zeropoint.used file created during source planting..
This command expects that there is a file called #######p##.zeropoint.used which contains the zeropoint.
@param expnum: exposure to get zeropoint of
@param ccd: which ccd (extension - 1) to get zp
@param prefix: possible string prefixed to expsoure number.
@param version: one of [spo] as in #######p##
@return: zeropoint
]
variable[uri] assign[=] call[name[get_uri], parameter[name[expnum], name[ccd], name[version]]]
<ast.Try object at 0x7da1b1969fc0>
<ast.Try object at 0x7da1b1969e10>
call[name[zmag]][name[uri]] assign[=] constant[0.0]
return[call[name[zmag]][name[uri]]] | keyword[def] identifier[get_zeropoint] ( identifier[expnum] , identifier[ccd] , identifier[prefix] = keyword[None] , identifier[version] = literal[string] ):
literal[string]
identifier[uri] = identifier[get_uri] ( identifier[expnum] , identifier[ccd] , identifier[version] , identifier[ext] = literal[string] , identifier[prefix] = identifier[prefix] )
keyword[try] :
keyword[return] identifier[zmag] [ identifier[uri] ]
keyword[except] :
keyword[pass]
keyword[try] :
identifier[zmag] [ identifier[uri] ]= identifier[float] ( identifier[open_vos_or_local] ( identifier[uri] ). identifier[read] ())
keyword[return] identifier[zmag] [ identifier[uri] ]
keyword[except] :
keyword[pass]
identifier[zmag] [ identifier[uri] ]= literal[int]
keyword[return] identifier[zmag] [ identifier[uri] ] | def get_zeropoint(expnum, ccd, prefix=None, version='p'):
"""Get the zeropoint for this exposure using the zeropoint.used file created during source planting..
This command expects that there is a file called #######p##.zeropoint.used which contains the zeropoint.
@param expnum: exposure to get zeropoint of
@param ccd: which ccd (extension - 1) to get zp
@param prefix: possible string prefixed to expsoure number.
@param version: one of [spo] as in #######p##
@return: zeropoint
"""
uri = get_uri(expnum, ccd, version, ext='zeropoint.used', prefix=prefix)
try:
return zmag[uri] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
try:
zmag[uri] = float(open_vos_or_local(uri).read())
return zmag[uri] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
zmag[uri] = 0.0
return zmag[uri] |
def transform_sources(self, sources, with_string=False):
"""Get the defintions of needed strings and functions
after replacement.
"""
modules = {}
updater = partial(
self.replace_source, modules=modules, prefix='string_')
for filename in sources:
updated = update_func_body(sources[filename], updater)
sources[filename] = EXTERN_AND_SEG + updated
logging.debug('modules: %s', modules)
return sources, self.build_funcs(modules) | def function[transform_sources, parameter[self, sources, with_string]]:
constant[Get the defintions of needed strings and functions
after replacement.
]
variable[modules] assign[=] dictionary[[], []]
variable[updater] assign[=] call[name[partial], parameter[name[self].replace_source]]
for taget[name[filename]] in starred[name[sources]] begin[:]
variable[updated] assign[=] call[name[update_func_body], parameter[call[name[sources]][name[filename]], name[updater]]]
call[name[sources]][name[filename]] assign[=] binary_operation[name[EXTERN_AND_SEG] + name[updated]]
call[name[logging].debug, parameter[constant[modules: %s], name[modules]]]
return[tuple[[<ast.Name object at 0x7da18bcc9900>, <ast.Call object at 0x7da18bccba00>]]] | keyword[def] identifier[transform_sources] ( identifier[self] , identifier[sources] , identifier[with_string] = keyword[False] ):
literal[string]
identifier[modules] ={}
identifier[updater] = identifier[partial] (
identifier[self] . identifier[replace_source] , identifier[modules] = identifier[modules] , identifier[prefix] = literal[string] )
keyword[for] identifier[filename] keyword[in] identifier[sources] :
identifier[updated] = identifier[update_func_body] ( identifier[sources] [ identifier[filename] ], identifier[updater] )
identifier[sources] [ identifier[filename] ]= identifier[EXTERN_AND_SEG] + identifier[updated]
identifier[logging] . identifier[debug] ( literal[string] , identifier[modules] )
keyword[return] identifier[sources] , identifier[self] . identifier[build_funcs] ( identifier[modules] ) | def transform_sources(self, sources, with_string=False):
"""Get the defintions of needed strings and functions
after replacement.
"""
modules = {}
updater = partial(self.replace_source, modules=modules, prefix='string_')
for filename in sources:
updated = update_func_body(sources[filename], updater)
sources[filename] = EXTERN_AND_SEG + updated # depends on [control=['for'], data=['filename']]
logging.debug('modules: %s', modules)
return (sources, self.build_funcs(modules)) |
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(u'US/Eastern') is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> timezone('Asia/Shangri-La')
Traceback (most recent call last):
...
UnknownTimeZoneError: 'Asia/Shangri-La'
>>> timezone(u'\N{TRADE MARK SIGN}')
Traceback (most recent call last):
...
UnknownTimeZoneError: u'\u2122'
'''
if zone.upper() == 'UTC':
return utc
try:
zone = zone.encode('US-ASCII')
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if resource_exists(zone):
_tzinfo_cache[zone] = build_tzinfo(zone, open_resource(zone))
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone] | def function[timezone, parameter[zone]]:
constant[ Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(u'US/Eastern') is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> timezone('Asia/Shangri-La')
Traceback (most recent call last):
...
UnknownTimeZoneError: 'Asia/Shangri-La'
>>> timezone(u'\N{TRADE MARK SIGN}')
Traceback (most recent call last):
...
UnknownTimeZoneError: u'\u2122'
]
if compare[call[name[zone].upper, parameter[]] equal[==] constant[UTC]] begin[:]
return[name[utc]]
<ast.Try object at 0x7da18bc73040>
variable[zone] assign[=] call[name[_unmunge_zone], parameter[name[zone]]]
if compare[name[zone] <ast.NotIn object at 0x7da2590d7190> name[_tzinfo_cache]] begin[:]
if call[name[resource_exists], parameter[name[zone]]] begin[:]
call[name[_tzinfo_cache]][name[zone]] assign[=] call[name[build_tzinfo], parameter[name[zone], call[name[open_resource], parameter[name[zone]]]]]
return[call[name[_tzinfo_cache]][name[zone]]] | keyword[def] identifier[timezone] ( identifier[zone] ):
literal[string]
keyword[if] identifier[zone] . identifier[upper] ()== literal[string] :
keyword[return] identifier[utc]
keyword[try] :
identifier[zone] = identifier[zone] . identifier[encode] ( literal[string] )
keyword[except] identifier[UnicodeEncodeError] :
keyword[raise] identifier[UnknownTimeZoneError] ( identifier[zone] )
identifier[zone] = identifier[_unmunge_zone] ( identifier[zone] )
keyword[if] identifier[zone] keyword[not] keyword[in] identifier[_tzinfo_cache] :
keyword[if] identifier[resource_exists] ( identifier[zone] ):
identifier[_tzinfo_cache] [ identifier[zone] ]= identifier[build_tzinfo] ( identifier[zone] , identifier[open_resource] ( identifier[zone] ))
keyword[else] :
keyword[raise] identifier[UnknownTimeZoneError] ( identifier[zone] )
keyword[return] identifier[_tzinfo_cache] [ identifier[zone] ] | def timezone(zone):
""" Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(u'US/Eastern') is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> timezone('Asia/Shangri-La')
Traceback (most recent call last):
...
UnknownTimeZoneError: 'Asia/Shangri-La'
>>> timezone(u'\\N{TRADE MARK SIGN}')
Traceback (most recent call last):
...
UnknownTimeZoneError: u'\\u2122'
"""
if zone.upper() == 'UTC':
return utc # depends on [control=['if'], data=[]]
try:
zone = zone.encode('US-ASCII') # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone) # depends on [control=['except'], data=[]]
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if resource_exists(zone):
_tzinfo_cache[zone] = build_tzinfo(zone, open_resource(zone)) # depends on [control=['if'], data=[]]
else:
raise UnknownTimeZoneError(zone) # depends on [control=['if'], data=['zone', '_tzinfo_cache']]
return _tzinfo_cache[zone] |
def react(self, **kwargs):
"""
The time of recation is ignored hereTime is ignored here
and should be handled by whatever called this function.
"""
react_type = kwargs.get("type", self._type)
if react_type == 'hover':
return self.__hover(**kwargs)
elif react_type == 'stick':
pass
elif react_type == 'reverse':
return self.__reverse(**kwargs)
else:
raise ValueError("Bathymetry interaction type not supported") | def function[react, parameter[self]]:
constant[
The time of recation is ignored hereTime is ignored here
and should be handled by whatever called this function.
]
variable[react_type] assign[=] call[name[kwargs].get, parameter[constant[type], name[self]._type]]
if compare[name[react_type] equal[==] constant[hover]] begin[:]
return[call[name[self].__hover, parameter[]]] | keyword[def] identifier[react] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[react_type] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[_type] )
keyword[if] identifier[react_type] == literal[string] :
keyword[return] identifier[self] . identifier[__hover] (** identifier[kwargs] )
keyword[elif] identifier[react_type] == literal[string] :
keyword[pass]
keyword[elif] identifier[react_type] == literal[string] :
keyword[return] identifier[self] . identifier[__reverse] (** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def react(self, **kwargs):
"""
The time of recation is ignored hereTime is ignored here
and should be handled by whatever called this function.
"""
react_type = kwargs.get('type', self._type)
if react_type == 'hover':
return self.__hover(**kwargs) # depends on [control=['if'], data=[]]
elif react_type == 'stick':
pass # depends on [control=['if'], data=[]]
elif react_type == 'reverse':
return self.__reverse(**kwargs) # depends on [control=['if'], data=[]]
else:
raise ValueError('Bathymetry interaction type not supported') |
def lines_from_points(points):
"""
Generates line from points. Assumes points are ordered as line segments.
Parameters
----------
points : np.ndarray
Points representing line segments. For example, two line segments
would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
Returns
-------
lines : vtki.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other line.
>>> import vtki
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = vtki.lines_from_points(points)
>>> lines.plot() # doctest:+SKIP
"""
# Assuming ordered points, create array defining line order
npoints = points.shape[0] - 1
lines = np.vstack((2 * np.ones(npoints, np.int),
np.arange(npoints),
np.arange(1, npoints + 1))).T.ravel()
return vtki.PolyData(points, lines) | def function[lines_from_points, parameter[points]]:
constant[
Generates line from points. Assumes points are ordered as line segments.
Parameters
----------
points : np.ndarray
Points representing line segments. For example, two line segments
would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
Returns
-------
lines : vtki.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other line.
>>> import vtki
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = vtki.lines_from_points(points)
>>> lines.plot() # doctest:+SKIP
]
variable[npoints] assign[=] binary_operation[call[name[points].shape][constant[0]] - constant[1]]
variable[lines] assign[=] call[call[name[np].vstack, parameter[tuple[[<ast.BinOp object at 0x7da18f810a00>, <ast.Call object at 0x7da18f8125f0>, <ast.Call object at 0x7da18f813100>]]]].T.ravel, parameter[]]
return[call[name[vtki].PolyData, parameter[name[points], name[lines]]]] | keyword[def] identifier[lines_from_points] ( identifier[points] ):
literal[string]
identifier[npoints] = identifier[points] . identifier[shape] [ literal[int] ]- literal[int]
identifier[lines] = identifier[np] . identifier[vstack] (( literal[int] * identifier[np] . identifier[ones] ( identifier[npoints] , identifier[np] . identifier[int] ),
identifier[np] . identifier[arange] ( identifier[npoints] ),
identifier[np] . identifier[arange] ( literal[int] , identifier[npoints] + literal[int] ))). identifier[T] . identifier[ravel] ()
keyword[return] identifier[vtki] . identifier[PolyData] ( identifier[points] , identifier[lines] ) | def lines_from_points(points):
"""
Generates line from points. Assumes points are ordered as line segments.
Parameters
----------
points : np.ndarray
Points representing line segments. For example, two line segments
would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
Returns
-------
lines : vtki.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other line.
>>> import vtki
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = vtki.lines_from_points(points)
>>> lines.plot() # doctest:+SKIP
"""
# Assuming ordered points, create array defining line order
npoints = points.shape[0] - 1
lines = np.vstack((2 * np.ones(npoints, np.int), np.arange(npoints), np.arange(1, npoints + 1))).T.ravel()
return vtki.PolyData(points, lines) |
def point(cls): # No range because it's unknown at this time.
"""Create an ArgumentType that is represented by a point.Point."""
def factory(i, name):
return cls(i, name, (0, 0), lambda a: point.Point(*a).floor(), None)
return factory | def function[point, parameter[cls]]:
constant[Create an ArgumentType that is represented by a point.Point.]
def function[factory, parameter[i, name]]:
return[call[name[cls], parameter[name[i], name[name], tuple[[<ast.Constant object at 0x7da18f813430>, <ast.Constant object at 0x7da18f813cd0>]], <ast.Lambda object at 0x7da18f813fa0>, constant[None]]]]
return[name[factory]] | keyword[def] identifier[point] ( identifier[cls] ):
literal[string]
keyword[def] identifier[factory] ( identifier[i] , identifier[name] ):
keyword[return] identifier[cls] ( identifier[i] , identifier[name] ,( literal[int] , literal[int] ), keyword[lambda] identifier[a] : identifier[point] . identifier[Point] (* identifier[a] ). identifier[floor] (), keyword[None] )
keyword[return] identifier[factory] | def point(cls): # No range because it's unknown at this time.
'Create an ArgumentType that is represented by a point.Point.'
def factory(i, name):
return cls(i, name, (0, 0), lambda a: point.Point(*a).floor(), None)
return factory |
def set_mute(self, enable):
"""Mute or unmute the TV."""
data = '1' if enable else '0'
params = ('<InstanceID>0</InstanceID><Channel>Master</Channel>'
'<DesiredMute>{}</DesiredMute>').format(data)
self.soap_request(URL_CONTROL_DMR, URN_RENDERING_CONTROL,
'SetMute', params) | def function[set_mute, parameter[self, enable]]:
constant[Mute or unmute the TV.]
variable[data] assign[=] <ast.IfExp object at 0x7da204621300>
variable[params] assign[=] call[constant[<InstanceID>0</InstanceID><Channel>Master</Channel><DesiredMute>{}</DesiredMute>].format, parameter[name[data]]]
call[name[self].soap_request, parameter[name[URL_CONTROL_DMR], name[URN_RENDERING_CONTROL], constant[SetMute], name[params]]] | keyword[def] identifier[set_mute] ( identifier[self] , identifier[enable] ):
literal[string]
identifier[data] = literal[string] keyword[if] identifier[enable] keyword[else] literal[string]
identifier[params] =( literal[string]
literal[string] ). identifier[format] ( identifier[data] )
identifier[self] . identifier[soap_request] ( identifier[URL_CONTROL_DMR] , identifier[URN_RENDERING_CONTROL] ,
literal[string] , identifier[params] ) | def set_mute(self, enable):
"""Mute or unmute the TV."""
data = '1' if enable else '0'
params = '<InstanceID>0</InstanceID><Channel>Master</Channel><DesiredMute>{}</DesiredMute>'.format(data)
self.soap_request(URL_CONTROL_DMR, URN_RENDERING_CONTROL, 'SetMute', params) |
def _preprocess_yml(path):
"""Dynamically create PY3 version of the file by re-writing 'unicode' to 'str'."""
with open(path) as f:
tmp_yaml = f.read()
return re.sub(r"unicode", "str", tmp_yaml) | def function[_preprocess_yml, parameter[path]]:
constant[Dynamically create PY3 version of the file by re-writing 'unicode' to 'str'.]
with call[name[open], parameter[name[path]]] begin[:]
variable[tmp_yaml] assign[=] call[name[f].read, parameter[]]
return[call[name[re].sub, parameter[constant[unicode], constant[str], name[tmp_yaml]]]] | keyword[def] identifier[_preprocess_yml] ( identifier[path] ):
literal[string]
keyword[with] identifier[open] ( identifier[path] ) keyword[as] identifier[f] :
identifier[tmp_yaml] = identifier[f] . identifier[read] ()
keyword[return] identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[tmp_yaml] ) | def _preprocess_yml(path):
"""Dynamically create PY3 version of the file by re-writing 'unicode' to 'str'."""
with open(path) as f:
tmp_yaml = f.read() # depends on [control=['with'], data=['f']]
return re.sub('unicode', 'str', tmp_yaml) |
def addresses(self):
"""
Return a new raw REST interface to address resources
:rtype: :py:class:`ns1.rest.ipam.Adresses`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Addresses(self.config) | def function[addresses, parameter[self]]:
constant[
Return a new raw REST interface to address resources
:rtype: :py:class:`ns1.rest.ipam.Adresses`
]
import module[ns1.rest.ipam]
return[call[name[ns1].rest.ipam.Addresses, parameter[name[self].config]]] | keyword[def] identifier[addresses] ( identifier[self] ):
literal[string]
keyword[import] identifier[ns1] . identifier[rest] . identifier[ipam]
keyword[return] identifier[ns1] . identifier[rest] . identifier[ipam] . identifier[Addresses] ( identifier[self] . identifier[config] ) | def addresses(self):
"""
Return a new raw REST interface to address resources
:rtype: :py:class:`ns1.rest.ipam.Adresses`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Addresses(self.config) |
def _compile_pattern(self, rule):
''' Return a regular expression with named groups for each wildcard. '''
out = ''
for i, part in enumerate(self.syntax.split(rule)):
if i%3 == 0: out += re.escape(part.replace('\\:',':'))
elif i%3 == 1: out += '(?P<%s>' % part if part else '(?:'
else: out += '%s)' % (part or '[^/]+')
return re.compile('^%s$'%out) | def function[_compile_pattern, parameter[self, rule]]:
constant[ Return a regular expression with named groups for each wildcard. ]
variable[out] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da1b170e200>, <ast.Name object at 0x7da1b170f400>]]] in starred[call[name[enumerate], parameter[call[name[self].syntax.split, parameter[name[rule]]]]]] begin[:]
if compare[binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> constant[3]] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b170dde0>
return[call[name[re].compile, parameter[binary_operation[constant[^%s$] <ast.Mod object at 0x7da2590d6920> name[out]]]]] | keyword[def] identifier[_compile_pattern] ( identifier[self] , identifier[rule] ):
literal[string]
identifier[out] = literal[string]
keyword[for] identifier[i] , identifier[part] keyword[in] identifier[enumerate] ( identifier[self] . identifier[syntax] . identifier[split] ( identifier[rule] )):
keyword[if] identifier[i] % literal[int] == literal[int] : identifier[out] += identifier[re] . identifier[escape] ( identifier[part] . identifier[replace] ( literal[string] , literal[string] ))
keyword[elif] identifier[i] % literal[int] == literal[int] : identifier[out] += literal[string] % identifier[part] keyword[if] identifier[part] keyword[else] literal[string]
keyword[else] : identifier[out] += literal[string] %( identifier[part] keyword[or] literal[string] )
keyword[return] identifier[re] . identifier[compile] ( literal[string] % identifier[out] ) | def _compile_pattern(self, rule):
""" Return a regular expression with named groups for each wildcard. """
out = ''
for (i, part) in enumerate(self.syntax.split(rule)):
if i % 3 == 0:
out += re.escape(part.replace('\\:', ':')) # depends on [control=['if'], data=[]]
elif i % 3 == 1:
out += '(?P<%s>' % part if part else '(?:' # depends on [control=['if'], data=[]]
else:
out += '%s)' % (part or '[^/]+') # depends on [control=['for'], data=[]]
return re.compile('^%s$' % out) |
def switch_on(self):
"""Turn the switch on."""
success = self.set_status(CONST.STATUS_ON_INT)
if success:
self._json_state['status'] = CONST.STATUS_ON
return success | def function[switch_on, parameter[self]]:
constant[Turn the switch on.]
variable[success] assign[=] call[name[self].set_status, parameter[name[CONST].STATUS_ON_INT]]
if name[success] begin[:]
call[name[self]._json_state][constant[status]] assign[=] name[CONST].STATUS_ON
return[name[success]] | keyword[def] identifier[switch_on] ( identifier[self] ):
literal[string]
identifier[success] = identifier[self] . identifier[set_status] ( identifier[CONST] . identifier[STATUS_ON_INT] )
keyword[if] identifier[success] :
identifier[self] . identifier[_json_state] [ literal[string] ]= identifier[CONST] . identifier[STATUS_ON]
keyword[return] identifier[success] | def switch_on(self):
"""Turn the switch on."""
success = self.set_status(CONST.STATUS_ON_INT)
if success:
self._json_state['status'] = CONST.STATUS_ON # depends on [control=['if'], data=[]]
return success |
def get_status(self):
"""
Gets the Status
:return: The Status
:rtype: string
"""
entries = self.__query('/samlp:LogoutResponse/samlp:Status/samlp:StatusCode')
if len(entries) == 0:
return None
status = entries[0].attrib['Value']
return status | def function[get_status, parameter[self]]:
constant[
Gets the Status
:return: The Status
:rtype: string
]
variable[entries] assign[=] call[name[self].__query, parameter[constant[/samlp:LogoutResponse/samlp:Status/samlp:StatusCode]]]
if compare[call[name[len], parameter[name[entries]]] equal[==] constant[0]] begin[:]
return[constant[None]]
variable[status] assign[=] call[call[name[entries]][constant[0]].attrib][constant[Value]]
return[name[status]] | keyword[def] identifier[get_status] ( identifier[self] ):
literal[string]
identifier[entries] = identifier[self] . identifier[__query] ( literal[string] )
keyword[if] identifier[len] ( identifier[entries] )== literal[int] :
keyword[return] keyword[None]
identifier[status] = identifier[entries] [ literal[int] ]. identifier[attrib] [ literal[string] ]
keyword[return] identifier[status] | def get_status(self):
"""
Gets the Status
:return: The Status
:rtype: string
"""
entries = self.__query('/samlp:LogoutResponse/samlp:Status/samlp:StatusCode')
if len(entries) == 0:
return None # depends on [control=['if'], data=[]]
status = entries[0].attrib['Value']
return status |
def __get_empty_config(self):
"""
Returns the config file contents as a string. The config file is generated and then deleted.
"""
self._generate_config()
path = self._get_config_path()
with open(path, 'r') as readable:
contents = readable.read()
os.remove(path)
return contents | def function[__get_empty_config, parameter[self]]:
constant[
Returns the config file contents as a string. The config file is generated and then deleted.
]
call[name[self]._generate_config, parameter[]]
variable[path] assign[=] call[name[self]._get_config_path, parameter[]]
with call[name[open], parameter[name[path], constant[r]]] begin[:]
variable[contents] assign[=] call[name[readable].read, parameter[]]
call[name[os].remove, parameter[name[path]]]
return[name[contents]] | keyword[def] identifier[__get_empty_config] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_generate_config] ()
identifier[path] = identifier[self] . identifier[_get_config_path] ()
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[readable] :
identifier[contents] = identifier[readable] . identifier[read] ()
identifier[os] . identifier[remove] ( identifier[path] )
keyword[return] identifier[contents] | def __get_empty_config(self):
"""
Returns the config file contents as a string. The config file is generated and then deleted.
"""
self._generate_config()
path = self._get_config_path()
with open(path, 'r') as readable:
contents = readable.read() # depends on [control=['with'], data=['readable']]
os.remove(path)
return contents |
def coarsen(self, dim: Optional[Mapping[Hashable, int]] = None,
boundary: str = 'exact',
side: Union[str, Mapping[Hashable, str]] = 'left',
coord_func: str = 'mean',
**dim_kwargs: int):
"""
Coarsen object.
Parameters
----------
dim: dict, optional
Mapping from the dimension name to the window size.
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
boundary : 'exact' | 'trim' | 'pad'
If 'exact', a ValueError will be raised if dimension size is not a
multiple of the window size. If 'trim', the excess entries are
dropped. If 'pad', NA will be padded.
side : 'left' or 'right' or mapping from dimension to 'left' or 'right'
coord_func: function (name) that is applied to the coordintes,
or a mapping from coordinate name to function (name).
Returns
-------
Coarsen object (core.rolling.DataArrayCoarsen for DataArray,
core.rolling.DatasetCoarsen for Dataset.)
Examples
--------
Coarsen the long time series by averaging over every four days.
>>> da = xr.DataArray(np.linspace(0, 364, num=364),
... dims='time',
... coords={'time': pd.date_range(
... '15/12/1999', periods=364)})
>>> da
<xarray.DataArray (time: 364)>
array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245,
364. ])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12
>>>
>>> da.coarsen(time=3, boundary='trim').mean()
<xarray.DataArray (time: 121)>
array([ 1.002755, 4.011019, 7.019284, ..., 358.986226,
361.99449 ])
Coordinates:
* time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10
>>>
See Also
--------
core.rolling.DataArrayCoarsen
core.rolling.DatasetCoarsen
"""
dim = either_dict_or_kwargs(dim, dim_kwargs, 'coarsen')
return self._coarsen_cls(
self, dim, boundary=boundary, side=side,
coord_func=coord_func) | def function[coarsen, parameter[self, dim, boundary, side, coord_func]]:
constant[
Coarsen object.
Parameters
----------
dim: dict, optional
Mapping from the dimension name to the window size.
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
boundary : 'exact' | 'trim' | 'pad'
If 'exact', a ValueError will be raised if dimension size is not a
multiple of the window size. If 'trim', the excess entries are
dropped. If 'pad', NA will be padded.
side : 'left' or 'right' or mapping from dimension to 'left' or 'right'
coord_func: function (name) that is applied to the coordintes,
or a mapping from coordinate name to function (name).
Returns
-------
Coarsen object (core.rolling.DataArrayCoarsen for DataArray,
core.rolling.DatasetCoarsen for Dataset.)
Examples
--------
Coarsen the long time series by averaging over every four days.
>>> da = xr.DataArray(np.linspace(0, 364, num=364),
... dims='time',
... coords={'time': pd.date_range(
... '15/12/1999', periods=364)})
>>> da
<xarray.DataArray (time: 364)>
array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245,
364. ])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12
>>>
>>> da.coarsen(time=3, boundary='trim').mean()
<xarray.DataArray (time: 121)>
array([ 1.002755, 4.011019, 7.019284, ..., 358.986226,
361.99449 ])
Coordinates:
* time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10
>>>
See Also
--------
core.rolling.DataArrayCoarsen
core.rolling.DatasetCoarsen
]
variable[dim] assign[=] call[name[either_dict_or_kwargs], parameter[name[dim], name[dim_kwargs], constant[coarsen]]]
return[call[name[self]._coarsen_cls, parameter[name[self], name[dim]]]] | keyword[def] identifier[coarsen] ( identifier[self] , identifier[dim] : identifier[Optional] [ identifier[Mapping] [ identifier[Hashable] , identifier[int] ]]= keyword[None] ,
identifier[boundary] : identifier[str] = literal[string] ,
identifier[side] : identifier[Union] [ identifier[str] , identifier[Mapping] [ identifier[Hashable] , identifier[str] ]]= literal[string] ,
identifier[coord_func] : identifier[str] = literal[string] ,
** identifier[dim_kwargs] : identifier[int] ):
literal[string]
identifier[dim] = identifier[either_dict_or_kwargs] ( identifier[dim] , identifier[dim_kwargs] , literal[string] )
keyword[return] identifier[self] . identifier[_coarsen_cls] (
identifier[self] , identifier[dim] , identifier[boundary] = identifier[boundary] , identifier[side] = identifier[side] ,
identifier[coord_func] = identifier[coord_func] ) | def coarsen(self, dim: Optional[Mapping[Hashable, int]]=None, boundary: str='exact', side: Union[str, Mapping[Hashable, str]]='left', coord_func: str='mean', **dim_kwargs: int):
"""
Coarsen object.
Parameters
----------
dim: dict, optional
Mapping from the dimension name to the window size.
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
boundary : 'exact' | 'trim' | 'pad'
If 'exact', a ValueError will be raised if dimension size is not a
multiple of the window size. If 'trim', the excess entries are
dropped. If 'pad', NA will be padded.
side : 'left' or 'right' or mapping from dimension to 'left' or 'right'
coord_func: function (name) that is applied to the coordintes,
or a mapping from coordinate name to function (name).
Returns
-------
Coarsen object (core.rolling.DataArrayCoarsen for DataArray,
core.rolling.DatasetCoarsen for Dataset.)
Examples
--------
Coarsen the long time series by averaging over every four days.
>>> da = xr.DataArray(np.linspace(0, 364, num=364),
... dims='time',
... coords={'time': pd.date_range(
... '15/12/1999', periods=364)})
>>> da
<xarray.DataArray (time: 364)>
array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245,
364. ])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12
>>>
>>> da.coarsen(time=3, boundary='trim').mean()
<xarray.DataArray (time: 121)>
array([ 1.002755, 4.011019, 7.019284, ..., 358.986226,
361.99449 ])
Coordinates:
* time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10
>>>
See Also
--------
core.rolling.DataArrayCoarsen
core.rolling.DatasetCoarsen
"""
dim = either_dict_or_kwargs(dim, dim_kwargs, 'coarsen')
return self._coarsen_cls(self, dim, boundary=boundary, side=side, coord_func=coord_func) |
async def ListFilesystems(self, filters):
'''
filters : typing.Sequence[~FilesystemFilter]
Returns -> typing.Sequence[~FilesystemDetailsListResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Storage',
request='ListFilesystems',
version=4,
params=_params)
_params['filters'] = filters
reply = await self.rpc(msg)
return reply | <ast.AsyncFunctionDef object at 0x7da1b0d0fdc0> | keyword[async] keyword[def] identifier[ListFilesystems] ( identifier[self] , identifier[filters] ):
literal[string]
identifier[_params] = identifier[dict] ()
identifier[msg] = identifier[dict] ( identifier[type] = literal[string] ,
identifier[request] = literal[string] ,
identifier[version] = literal[int] ,
identifier[params] = identifier[_params] )
identifier[_params] [ literal[string] ]= identifier[filters]
identifier[reply] = keyword[await] identifier[self] . identifier[rpc] ( identifier[msg] )
keyword[return] identifier[reply] | async def ListFilesystems(self, filters):
"""
filters : typing.Sequence[~FilesystemFilter]
Returns -> typing.Sequence[~FilesystemDetailsListResult]
"""
# map input types to rpc msg
_params = dict()
msg = dict(type='Storage', request='ListFilesystems', version=4, params=_params)
_params['filters'] = filters
reply = await self.rpc(msg)
return reply |
def connect(self, **kwargs):
"""Connect to Google Compute Engine.
"""
try:
self.gce = get_driver(Provider.GCE)(
self.user_id,
self.key,
project=self.project,
**kwargs)
except:
raise ComputeEngineManagerException("Unable to connect to Google Compute Engine.") | def function[connect, parameter[self]]:
constant[Connect to Google Compute Engine.
]
<ast.Try object at 0x7da1b1233df0> | keyword[def] identifier[connect] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[self] . identifier[gce] = identifier[get_driver] ( identifier[Provider] . identifier[GCE] )(
identifier[self] . identifier[user_id] ,
identifier[self] . identifier[key] ,
identifier[project] = identifier[self] . identifier[project] ,
** identifier[kwargs] )
keyword[except] :
keyword[raise] identifier[ComputeEngineManagerException] ( literal[string] ) | def connect(self, **kwargs):
"""Connect to Google Compute Engine.
"""
try:
self.gce = get_driver(Provider.GCE)(self.user_id, self.key, project=self.project, **kwargs) # depends on [control=['try'], data=[]]
except:
raise ComputeEngineManagerException('Unable to connect to Google Compute Engine.') # depends on [control=['except'], data=[]] |
def load_excel(self, filepath, **kwargs):
"""Set the main dataframe with the content of an Excel file
:param filepath: url of the csv file to load,
can be absolute if it starts with ``/``
or relative if it starts with ``./``
:type filepath: str
:param kwargs: keyword arguments to pass to
Pandas ``read_excel`` function
:example: ``ds.load_excel("./myfile.xlsx")``
"""
try:
df = pd.read_excel(filepath, **kwargs)
if len(df.index) == 0:
self.warning("Empty Excel file. Can not set the dataframe.")
return
self.df = df
except Exception as e:
self.err(e, "Can not load Excel file") | def function[load_excel, parameter[self, filepath]]:
constant[Set the main dataframe with the content of an Excel file
:param filepath: url of the csv file to load,
can be absolute if it starts with ``/``
or relative if it starts with ``./``
:type filepath: str
:param kwargs: keyword arguments to pass to
Pandas ``read_excel`` function
:example: ``ds.load_excel("./myfile.xlsx")``
]
<ast.Try object at 0x7da20c76e020> | keyword[def] identifier[load_excel] ( identifier[self] , identifier[filepath] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[df] = identifier[pd] . identifier[read_excel] ( identifier[filepath] ,** identifier[kwargs] )
keyword[if] identifier[len] ( identifier[df] . identifier[index] )== literal[int] :
identifier[self] . identifier[warning] ( literal[string] )
keyword[return]
identifier[self] . identifier[df] = identifier[df]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[err] ( identifier[e] , literal[string] ) | def load_excel(self, filepath, **kwargs):
"""Set the main dataframe with the content of an Excel file
:param filepath: url of the csv file to load,
can be absolute if it starts with ``/``
or relative if it starts with ``./``
:type filepath: str
:param kwargs: keyword arguments to pass to
Pandas ``read_excel`` function
:example: ``ds.load_excel("./myfile.xlsx")``
"""
try:
df = pd.read_excel(filepath, **kwargs)
if len(df.index) == 0:
self.warning('Empty Excel file. Can not set the dataframe.')
return # depends on [control=['if'], data=[]]
self.df = df # depends on [control=['try'], data=[]]
except Exception as e:
self.err(e, 'Can not load Excel file') # depends on [control=['except'], data=['e']] |
def alerts(self, alert_level='High'):
"""Get a filtered list of alerts at the given alert level, and sorted by alert level."""
alerts = self.zap.core.alerts()
alert_level_value = self.alert_levels[alert_level]
alerts = sorted((a for a in alerts if self.alert_levels[a['risk']] >= alert_level_value),
key=lambda k: self.alert_levels[k['risk']], reverse=True)
return alerts | def function[alerts, parameter[self, alert_level]]:
constant[Get a filtered list of alerts at the given alert level, and sorted by alert level.]
variable[alerts] assign[=] call[name[self].zap.core.alerts, parameter[]]
variable[alert_level_value] assign[=] call[name[self].alert_levels][name[alert_level]]
variable[alerts] assign[=] call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da1b08e79a0>]]
return[name[alerts]] | keyword[def] identifier[alerts] ( identifier[self] , identifier[alert_level] = literal[string] ):
literal[string]
identifier[alerts] = identifier[self] . identifier[zap] . identifier[core] . identifier[alerts] ()
identifier[alert_level_value] = identifier[self] . identifier[alert_levels] [ identifier[alert_level] ]
identifier[alerts] = identifier[sorted] (( identifier[a] keyword[for] identifier[a] keyword[in] identifier[alerts] keyword[if] identifier[self] . identifier[alert_levels] [ identifier[a] [ literal[string] ]]>= identifier[alert_level_value] ),
identifier[key] = keyword[lambda] identifier[k] : identifier[self] . identifier[alert_levels] [ identifier[k] [ literal[string] ]], identifier[reverse] = keyword[True] )
keyword[return] identifier[alerts] | def alerts(self, alert_level='High'):
"""Get a filtered list of alerts at the given alert level, and sorted by alert level."""
alerts = self.zap.core.alerts()
alert_level_value = self.alert_levels[alert_level]
alerts = sorted((a for a in alerts if self.alert_levels[a['risk']] >= alert_level_value), key=lambda k: self.alert_levels[k['risk']], reverse=True)
return alerts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.