code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _make_persistent(self, model_name, pkg_name):
"""Monkey-patch object persistence (ex: to/from database) into a
bravado-core model class"""
#
# WARNING: ugly piece of monkey-patching below. Hopefully will replace
# with native bravado-core code in the future...
#
# Load class at path pkg_name
c = get_function(pkg_name)
for name in ('load_from_db', 'save_to_db'):
if not hasattr(c, name):
raise PyMacaronCoreException("Class %s has no static method '%s'" % (pkg_name, name))
log.info("Making %s persistent via %s" % (model_name, pkg_name))
# Replace model generator with one that adds 'save_to_db' to every instance
model = getattr(self.model, model_name)
n = self._wrap_bravado_model_generator(model, c.save_to_db, pkg_name)
setattr(self.model, model_name, n)
# Add class method load_from_db to model generator
model = getattr(self.model, model_name)
setattr(model, 'load_from_db', c.load_from_db) | def function[_make_persistent, parameter[self, model_name, pkg_name]]:
constant[Monkey-patch object persistence (ex: to/from database) into a
bravado-core model class]
variable[c] assign[=] call[name[get_function], parameter[name[pkg_name]]]
for taget[name[name]] in starred[tuple[[<ast.Constant object at 0x7da1b11c5210>, <ast.Constant object at 0x7da1b11c4af0>]]] begin[:]
if <ast.UnaryOp object at 0x7da1b11c5780> begin[:]
<ast.Raise object at 0x7da1b11c6a10>
call[name[log].info, parameter[binary_operation[constant[Making %s persistent via %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b11c5570>, <ast.Name object at 0x7da1b11c68c0>]]]]]
variable[model] assign[=] call[name[getattr], parameter[name[self].model, name[model_name]]]
variable[n] assign[=] call[name[self]._wrap_bravado_model_generator, parameter[name[model], name[c].save_to_db, name[pkg_name]]]
call[name[setattr], parameter[name[self].model, name[model_name], name[n]]]
variable[model] assign[=] call[name[getattr], parameter[name[self].model, name[model_name]]]
call[name[setattr], parameter[name[model], constant[load_from_db], name[c].load_from_db]] | keyword[def] identifier[_make_persistent] ( identifier[self] , identifier[model_name] , identifier[pkg_name] ):
literal[string]
identifier[c] = identifier[get_function] ( identifier[pkg_name] )
keyword[for] identifier[name] keyword[in] ( literal[string] , literal[string] ):
keyword[if] keyword[not] identifier[hasattr] ( identifier[c] , identifier[name] ):
keyword[raise] identifier[PyMacaronCoreException] ( literal[string] %( identifier[pkg_name] , identifier[name] ))
identifier[log] . identifier[info] ( literal[string] %( identifier[model_name] , identifier[pkg_name] ))
identifier[model] = identifier[getattr] ( identifier[self] . identifier[model] , identifier[model_name] )
identifier[n] = identifier[self] . identifier[_wrap_bravado_model_generator] ( identifier[model] , identifier[c] . identifier[save_to_db] , identifier[pkg_name] )
identifier[setattr] ( identifier[self] . identifier[model] , identifier[model_name] , identifier[n] )
identifier[model] = identifier[getattr] ( identifier[self] . identifier[model] , identifier[model_name] )
identifier[setattr] ( identifier[model] , literal[string] , identifier[c] . identifier[load_from_db] ) | def _make_persistent(self, model_name, pkg_name):
"""Monkey-patch object persistence (ex: to/from database) into a
bravado-core model class"""
#
# WARNING: ugly piece of monkey-patching below. Hopefully will replace
# with native bravado-core code in the future...
#
# Load class at path pkg_name
c = get_function(pkg_name)
for name in ('load_from_db', 'save_to_db'):
if not hasattr(c, name):
raise PyMacaronCoreException("Class %s has no static method '%s'" % (pkg_name, name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
log.info('Making %s persistent via %s' % (model_name, pkg_name))
# Replace model generator with one that adds 'save_to_db' to every instance
model = getattr(self.model, model_name)
n = self._wrap_bravado_model_generator(model, c.save_to_db, pkg_name)
setattr(self.model, model_name, n)
# Add class method load_from_db to model generator
model = getattr(self.model, model_name)
setattr(model, 'load_from_db', c.load_from_db) |
def _set_transport_service(self, v, load=False):
"""
Setter method for transport_service, mapped from YANG variable /interface_vlan/interface/vlan/transport_service (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_transport_service is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_transport_service() directly.
YANG Description: Transparent vlan
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'1..1000']}), is_leaf=True, yang_name="transport-service", rest_name="transport-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Set tlsid for Transparent vlan ', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_L2_TRANSPORT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """transport_service must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'1..1000']}), is_leaf=True, yang_name="transport-service", rest_name="transport-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Set tlsid for Transparent vlan ', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_L2_TRANSPORT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint16', is_config=True)""",
})
self.__transport_service = t
if hasattr(self, '_set'):
self._set() | def function[_set_transport_service, parameter[self, v, load]]:
constant[
Setter method for transport_service, mapped from YANG variable /interface_vlan/interface/vlan/transport_service (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_transport_service is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_transport_service() directly.
YANG Description: Transparent vlan
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f00d780>
name[self].__transport_service assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_transport_service] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[int] , identifier[restriction_dict] ={ literal[string] :[ literal[string] ]}, identifier[int_size] = literal[int] ), identifier[restriction_dict] ={ literal[string] :[ literal[string] ]}), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__transport_service] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_transport_service(self, v, load=False):
"""
Setter method for transport_service, mapped from YANG variable /interface_vlan/interface/vlan/transport_service (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_transport_service is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_transport_service() directly.
YANG Description: Transparent vlan
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']}, int_size=16), restriction_dict={'range': [u'1..1000']}), is_leaf=True, yang_name='transport-service', rest_name='transport-service', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Set tlsid for Transparent vlan ', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_L2_TRANSPORT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint16', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'transport_service must be of a type compatible with uint16', 'defined-type': 'uint16', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={\'range\': [\'0..65535\']},int_size=16), restriction_dict={\'range\': [u\'1..1000\']}), is_leaf=True, yang_name="transport-service", rest_name="transport-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\' Set tlsid for Transparent vlan \', u\'sort-priority\': u\'RUNNCFG_INTERFACE_LEVEL_L2_TRANSPORT_CONFIG\'}}, namespace=\'urn:brocade.com:mgmt:brocade-interface\', defining_module=\'brocade-interface\', yang_type=\'uint16\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__transport_service = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def _make_cron_re():
"""
Make the regular expression that matches a crontab 'cron' line.
Each field has a set of allowed values, and can then be in a range, and be
listed with dashes. A range can be stepped with the '/' modifier, and
ranges can be in a list. A field can also be '*', or '*' divided in steps.
The best way to do this is to have a template for a single field that
encapsulates the syntax of that field, regardless of what that field
matches. We then fill in the actual template's value with the pattern
that matches that field. Each field is named, so we can pull them out as
a dictionary later.
"""
range_ = r"{val}(?:-{val}(?:/\d+)?)?"
template = r"(?P<{name}>" + "(?:\*(?:/\d+)?|{r}(?:,{r})*)".format(r=range_) + ")\s+"
return (
r'^\s*' +
template.format(name='minute', val=r'(?:\d|[012345]\d)') +
template.format(name='hour', val=r'(?:\d|[01]\d|2[0123])') +
template.format(name='day_of_month', val=r'(?:0?[1-9]|[12]\d|3[01])') +
template.format(name='month', val=r'(?:0?[1-9]|1[012]|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)') +
template.format(name='day_of_week', val=r'(?:[0-7]|mon|tue|wed|thur|fri|sat|sun)') +
r'(?P<command>\S.*)$'
) | def function[_make_cron_re, parameter[]]:
constant[
Make the regular expression that matches a crontab 'cron' line.
Each field has a set of allowed values, and can then be in a range, and be
listed with dashes. A range can be stepped with the '/' modifier, and
ranges can be in a list. A field can also be '*', or '*' divided in steps.
The best way to do this is to have a template for a single field that
encapsulates the syntax of that field, regardless of what that field
matches. We then fill in the actual template's value with the pattern
that matches that field. Each field is named, so we can pull them out as
a dictionary later.
]
variable[range_] assign[=] constant[{val}(?:-{val}(?:/\d+)?)?]
variable[template] assign[=] binary_operation[binary_operation[constant[(?P<{name}>] + call[constant[(?:\*(?:/\d+)?|{r}(?:,{r})*)].format, parameter[]]] + constant[)\s+]]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[^\s*] + call[name[template].format, parameter[]]] + call[name[template].format, parameter[]]] + call[name[template].format, parameter[]]] + call[name[template].format, parameter[]]] + call[name[template].format, parameter[]]] + constant[(?P<command>\S.*)$]]] | keyword[def] identifier[_make_cron_re] ():
literal[string]
identifier[range_] = literal[string]
identifier[template] = literal[string] + literal[string] . identifier[format] ( identifier[r] = identifier[range_] )+ literal[string]
keyword[return] (
literal[string] +
identifier[template] . identifier[format] ( identifier[name] = literal[string] , identifier[val] = literal[string] )+
identifier[template] . identifier[format] ( identifier[name] = literal[string] , identifier[val] = literal[string] )+
identifier[template] . identifier[format] ( identifier[name] = literal[string] , identifier[val] = literal[string] )+
identifier[template] . identifier[format] ( identifier[name] = literal[string] , identifier[val] = literal[string] )+
identifier[template] . identifier[format] ( identifier[name] = literal[string] , identifier[val] = literal[string] )+
literal[string]
) | def _make_cron_re():
"""
Make the regular expression that matches a crontab 'cron' line.
Each field has a set of allowed values, and can then be in a range, and be
listed with dashes. A range can be stepped with the '/' modifier, and
ranges can be in a list. A field can also be '*', or '*' divided in steps.
The best way to do this is to have a template for a single field that
encapsulates the syntax of that field, regardless of what that field
matches. We then fill in the actual template's value with the pattern
that matches that field. Each field is named, so we can pull them out as
a dictionary later.
"""
range_ = '{val}(?:-{val}(?:/\\d+)?)?'
template = '(?P<{name}>' + '(?:\\*(?:/\\d+)?|{r}(?:,{r})*)'.format(r=range_) + ')\\s+'
return '^\\s*' + template.format(name='minute', val='(?:\\d|[012345]\\d)') + template.format(name='hour', val='(?:\\d|[01]\\d|2[0123])') + template.format(name='day_of_month', val='(?:0?[1-9]|[12]\\d|3[01])') + template.format(name='month', val='(?:0?[1-9]|1[012]|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)') + template.format(name='day_of_week', val='(?:[0-7]|mon|tue|wed|thur|fri|sat|sun)') + '(?P<command>\\S.*)$' |
def _create_scheduler(self, save_sensors):
"""Return function to schedule saving sensors."""
@asyncio.coroutine
def schedule_save():
"""Save sensors and schedule a new save."""
yield from self.loop.run_in_executor(None, save_sensors)
callback = partial(self.loop.create_task, schedule_save())
task = self.loop.call_later(10.0, callback)
self._cancel_save = task.cancel
return schedule_save | def function[_create_scheduler, parameter[self, save_sensors]]:
constant[Return function to schedule saving sensors.]
def function[schedule_save, parameter[]]:
constant[Save sensors and schedule a new save.]
<ast.YieldFrom object at 0x7da1b27bb2b0>
variable[callback] assign[=] call[name[partial], parameter[name[self].loop.create_task, call[name[schedule_save], parameter[]]]]
variable[task] assign[=] call[name[self].loop.call_later, parameter[constant[10.0], name[callback]]]
name[self]._cancel_save assign[=] name[task].cancel
return[name[schedule_save]] | keyword[def] identifier[_create_scheduler] ( identifier[self] , identifier[save_sensors] ):
literal[string]
@ identifier[asyncio] . identifier[coroutine]
keyword[def] identifier[schedule_save] ():
literal[string]
keyword[yield] keyword[from] identifier[self] . identifier[loop] . identifier[run_in_executor] ( keyword[None] , identifier[save_sensors] )
identifier[callback] = identifier[partial] ( identifier[self] . identifier[loop] . identifier[create_task] , identifier[schedule_save] ())
identifier[task] = identifier[self] . identifier[loop] . identifier[call_later] ( literal[int] , identifier[callback] )
identifier[self] . identifier[_cancel_save] = identifier[task] . identifier[cancel]
keyword[return] identifier[schedule_save] | def _create_scheduler(self, save_sensors):
"""Return function to schedule saving sensors."""
@asyncio.coroutine
def schedule_save():
"""Save sensors and schedule a new save."""
yield from self.loop.run_in_executor(None, save_sensors)
callback = partial(self.loop.create_task, schedule_save())
task = self.loop.call_later(10.0, callback)
self._cancel_save = task.cancel
return schedule_save |
def delete_association(self, target, api_type=None, api_sub_type=None, unique_id=None):
"""
Deletes a association from a Indicator/Group/Victim
Args:
target:
api_type:
api_sub_type:
unique_id:
Returns:
"""
api_type = api_type or target.api_type
api_sub_type = api_sub_type or target.api_sub_type
unique_id = unique_id or target.unique_id
if not self.can_update():
self._tcex.handle_error(910, [self.type])
if not target.can_update():
self._tcex.handle_error(910, [target.type])
return self.tc_requests.delete_association(
self.api_type,
self.api_sub_type,
self.unique_id,
api_type,
api_sub_type,
unique_id,
owner=self.owner,
) | def function[delete_association, parameter[self, target, api_type, api_sub_type, unique_id]]:
constant[
Deletes a association from a Indicator/Group/Victim
Args:
target:
api_type:
api_sub_type:
unique_id:
Returns:
]
variable[api_type] assign[=] <ast.BoolOp object at 0x7da18dc077c0>
variable[api_sub_type] assign[=] <ast.BoolOp object at 0x7da18dc05ff0>
variable[unique_id] assign[=] <ast.BoolOp object at 0x7da18dc060e0>
if <ast.UnaryOp object at 0x7da18dc05240> begin[:]
call[name[self]._tcex.handle_error, parameter[constant[910], list[[<ast.Attribute object at 0x7da18dc06ce0>]]]]
if <ast.UnaryOp object at 0x7da18dc07610> begin[:]
call[name[self]._tcex.handle_error, parameter[constant[910], list[[<ast.Attribute object at 0x7da18dc04a00>]]]]
return[call[name[self].tc_requests.delete_association, parameter[name[self].api_type, name[self].api_sub_type, name[self].unique_id, name[api_type], name[api_sub_type], name[unique_id]]]] | keyword[def] identifier[delete_association] ( identifier[self] , identifier[target] , identifier[api_type] = keyword[None] , identifier[api_sub_type] = keyword[None] , identifier[unique_id] = keyword[None] ):
literal[string]
identifier[api_type] = identifier[api_type] keyword[or] identifier[target] . identifier[api_type]
identifier[api_sub_type] = identifier[api_sub_type] keyword[or] identifier[target] . identifier[api_sub_type]
identifier[unique_id] = identifier[unique_id] keyword[or] identifier[target] . identifier[unique_id]
keyword[if] keyword[not] identifier[self] . identifier[can_update] ():
identifier[self] . identifier[_tcex] . identifier[handle_error] ( literal[int] ,[ identifier[self] . identifier[type] ])
keyword[if] keyword[not] identifier[target] . identifier[can_update] ():
identifier[self] . identifier[_tcex] . identifier[handle_error] ( literal[int] ,[ identifier[target] . identifier[type] ])
keyword[return] identifier[self] . identifier[tc_requests] . identifier[delete_association] (
identifier[self] . identifier[api_type] ,
identifier[self] . identifier[api_sub_type] ,
identifier[self] . identifier[unique_id] ,
identifier[api_type] ,
identifier[api_sub_type] ,
identifier[unique_id] ,
identifier[owner] = identifier[self] . identifier[owner] ,
) | def delete_association(self, target, api_type=None, api_sub_type=None, unique_id=None):
"""
Deletes a association from a Indicator/Group/Victim
Args:
target:
api_type:
api_sub_type:
unique_id:
Returns:
"""
api_type = api_type or target.api_type
api_sub_type = api_sub_type or target.api_sub_type
unique_id = unique_id or target.unique_id
if not self.can_update():
self._tcex.handle_error(910, [self.type]) # depends on [control=['if'], data=[]]
if not target.can_update():
self._tcex.handle_error(910, [target.type]) # depends on [control=['if'], data=[]]
return self.tc_requests.delete_association(self.api_type, self.api_sub_type, self.unique_id, api_type, api_sub_type, unique_id, owner=self.owner) |
def parse(self, file, skipheader=False, outfile=None):
"""Parse a line-oriented association file into a list of association dict objects
Note the returned list is of dict objects. TODO: These will
later be specified using marshmallow and it should be possible
to generate objects
Arguments
---------
file : file or string
The file is parsed into association objects. Can be a http URL, filename or `file-like-object`, for input assoc file
outfile : file
Optional output file in which processed lines are written. This a file or `file-like-object`
Return
------
list
Associations generated from the file
"""
associations = self.association_generator(file, skipheader=skipheader, outfile=outfile)
a = list(associations)
return a | def function[parse, parameter[self, file, skipheader, outfile]]:
constant[Parse a line-oriented association file into a list of association dict objects
Note the returned list is of dict objects. TODO: These will
later be specified using marshmallow and it should be possible
to generate objects
Arguments
---------
file : file or string
The file is parsed into association objects. Can be a http URL, filename or `file-like-object`, for input assoc file
outfile : file
Optional output file in which processed lines are written. This a file or `file-like-object`
Return
------
list
Associations generated from the file
]
variable[associations] assign[=] call[name[self].association_generator, parameter[name[file]]]
variable[a] assign[=] call[name[list], parameter[name[associations]]]
return[name[a]] | keyword[def] identifier[parse] ( identifier[self] , identifier[file] , identifier[skipheader] = keyword[False] , identifier[outfile] = keyword[None] ):
literal[string]
identifier[associations] = identifier[self] . identifier[association_generator] ( identifier[file] , identifier[skipheader] = identifier[skipheader] , identifier[outfile] = identifier[outfile] )
identifier[a] = identifier[list] ( identifier[associations] )
keyword[return] identifier[a] | def parse(self, file, skipheader=False, outfile=None):
"""Parse a line-oriented association file into a list of association dict objects
Note the returned list is of dict objects. TODO: These will
later be specified using marshmallow and it should be possible
to generate objects
Arguments
---------
file : file or string
The file is parsed into association objects. Can be a http URL, filename or `file-like-object`, for input assoc file
outfile : file
Optional output file in which processed lines are written. This a file or `file-like-object`
Return
------
list
Associations generated from the file
"""
associations = self.association_generator(file, skipheader=skipheader, outfile=outfile)
a = list(associations)
return a |
def go_to_line(self, line=None):
"""Open 'go to line' dialog"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
editorstack.go_to_line(line) | def function[go_to_line, parameter[self, line]]:
constant[Open 'go to line' dialog]
variable[editorstack] assign[=] call[name[self].get_current_editorstack, parameter[]]
if compare[name[editorstack] is_not constant[None]] begin[:]
call[name[editorstack].go_to_line, parameter[name[line]]] | keyword[def] identifier[go_to_line] ( identifier[self] , identifier[line] = keyword[None] ):
literal[string]
identifier[editorstack] = identifier[self] . identifier[get_current_editorstack] ()
keyword[if] identifier[editorstack] keyword[is] keyword[not] keyword[None] :
identifier[editorstack] . identifier[go_to_line] ( identifier[line] ) | def go_to_line(self, line=None):
"""Open 'go to line' dialog"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
editorstack.go_to_line(line) # depends on [control=['if'], data=['editorstack']] |
def apply_sdb(opts, sdb_opts=None):
'''
Recurse for sdb:// links for opts
'''
# Late load of SDB to keep CLI light
import salt.utils.sdb
if sdb_opts is None:
sdb_opts = opts
if isinstance(sdb_opts, six.string_types) and sdb_opts.startswith('sdb://'):
return salt.utils.sdb.sdb_get(sdb_opts, opts)
elif isinstance(sdb_opts, dict):
for key, value in six.iteritems(sdb_opts):
if value is None:
continue
sdb_opts[key] = apply_sdb(opts, value)
elif isinstance(sdb_opts, list):
for key, value in enumerate(sdb_opts):
if value is None:
continue
sdb_opts[key] = apply_sdb(opts, value)
return sdb_opts | def function[apply_sdb, parameter[opts, sdb_opts]]:
constant[
Recurse for sdb:// links for opts
]
import module[salt.utils.sdb]
if compare[name[sdb_opts] is constant[None]] begin[:]
variable[sdb_opts] assign[=] name[opts]
if <ast.BoolOp object at 0x7da1b1fa21d0> begin[:]
return[call[name[salt].utils.sdb.sdb_get, parameter[name[sdb_opts], name[opts]]]]
return[name[sdb_opts]] | keyword[def] identifier[apply_sdb] ( identifier[opts] , identifier[sdb_opts] = keyword[None] ):
literal[string]
keyword[import] identifier[salt] . identifier[utils] . identifier[sdb]
keyword[if] identifier[sdb_opts] keyword[is] keyword[None] :
identifier[sdb_opts] = identifier[opts]
keyword[if] identifier[isinstance] ( identifier[sdb_opts] , identifier[six] . identifier[string_types] ) keyword[and] identifier[sdb_opts] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[salt] . identifier[utils] . identifier[sdb] . identifier[sdb_get] ( identifier[sdb_opts] , identifier[opts] )
keyword[elif] identifier[isinstance] ( identifier[sdb_opts] , identifier[dict] ):
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[six] . identifier[iteritems] ( identifier[sdb_opts] ):
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[continue]
identifier[sdb_opts] [ identifier[key] ]= identifier[apply_sdb] ( identifier[opts] , identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[sdb_opts] , identifier[list] ):
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[enumerate] ( identifier[sdb_opts] ):
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[continue]
identifier[sdb_opts] [ identifier[key] ]= identifier[apply_sdb] ( identifier[opts] , identifier[value] )
keyword[return] identifier[sdb_opts] | def apply_sdb(opts, sdb_opts=None):
"""
Recurse for sdb:// links for opts
"""
# Late load of SDB to keep CLI light
import salt.utils.sdb
if sdb_opts is None:
sdb_opts = opts # depends on [control=['if'], data=['sdb_opts']]
if isinstance(sdb_opts, six.string_types) and sdb_opts.startswith('sdb://'):
return salt.utils.sdb.sdb_get(sdb_opts, opts) # depends on [control=['if'], data=[]]
elif isinstance(sdb_opts, dict):
for (key, value) in six.iteritems(sdb_opts):
if value is None:
continue # depends on [control=['if'], data=[]]
sdb_opts[key] = apply_sdb(opts, value) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(sdb_opts, list):
for (key, value) in enumerate(sdb_opts):
if value is None:
continue # depends on [control=['if'], data=[]]
sdb_opts[key] = apply_sdb(opts, value) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return sdb_opts |
def roll_data(self, data):
"""Append new data to the right side of every line strip and remove
as much data from the left.
Parameters
----------
data : array-like
A data array to append.
"""
data = data.astype('float32')[..., np.newaxis]
s1 = self._data_shape[1] - self._offset
if data.shape[1] > s1:
self._pos_tex[:, self._offset:] = data[:, :s1]
self._pos_tex[:, :data.shape[1] - s1] = data[:, s1:]
self._offset = (self._offset + data.shape[1]) % self._data_shape[1]
else:
self._pos_tex[:, self._offset:self._offset+data.shape[1]] = data
self._offset += data.shape[1]
self.shared_program['offset'] = self._offset
self.update() | def function[roll_data, parameter[self, data]]:
constant[Append new data to the right side of every line strip and remove
as much data from the left.
Parameters
----------
data : array-like
A data array to append.
]
variable[data] assign[=] call[call[name[data].astype, parameter[constant[float32]]]][tuple[[<ast.Constant object at 0x7da1b0feea70>, <ast.Attribute object at 0x7da1b0fecb20>]]]
variable[s1] assign[=] binary_operation[call[name[self]._data_shape][constant[1]] - name[self]._offset]
if compare[call[name[data].shape][constant[1]] greater[>] name[s1]] begin[:]
call[name[self]._pos_tex][tuple[[<ast.Slice object at 0x7da1b0fede70>, <ast.Slice object at 0x7da1b0fed450>]]] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da1b0fef8b0>, <ast.Slice object at 0x7da1b0fef010>]]]
call[name[self]._pos_tex][tuple[[<ast.Slice object at 0x7da1b0fedcf0>, <ast.Slice object at 0x7da1b0fecbe0>]]] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da1b0fee6b0>, <ast.Slice object at 0x7da1b0fec970>]]]
name[self]._offset assign[=] binary_operation[binary_operation[name[self]._offset + call[name[data].shape][constant[1]]] <ast.Mod object at 0x7da2590d6920> call[name[self]._data_shape][constant[1]]]
call[name[self].shared_program][constant[offset]] assign[=] name[self]._offset
call[name[self].update, parameter[]] | keyword[def] identifier[roll_data] ( identifier[self] , identifier[data] ):
literal[string]
identifier[data] = identifier[data] . identifier[astype] ( literal[string] )[..., identifier[np] . identifier[newaxis] ]
identifier[s1] = identifier[self] . identifier[_data_shape] [ literal[int] ]- identifier[self] . identifier[_offset]
keyword[if] identifier[data] . identifier[shape] [ literal[int] ]> identifier[s1] :
identifier[self] . identifier[_pos_tex] [:, identifier[self] . identifier[_offset] :]= identifier[data] [:,: identifier[s1] ]
identifier[self] . identifier[_pos_tex] [:,: identifier[data] . identifier[shape] [ literal[int] ]- identifier[s1] ]= identifier[data] [:, identifier[s1] :]
identifier[self] . identifier[_offset] =( identifier[self] . identifier[_offset] + identifier[data] . identifier[shape] [ literal[int] ])% identifier[self] . identifier[_data_shape] [ literal[int] ]
keyword[else] :
identifier[self] . identifier[_pos_tex] [:, identifier[self] . identifier[_offset] : identifier[self] . identifier[_offset] + identifier[data] . identifier[shape] [ literal[int] ]]= identifier[data]
identifier[self] . identifier[_offset] += identifier[data] . identifier[shape] [ literal[int] ]
identifier[self] . identifier[shared_program] [ literal[string] ]= identifier[self] . identifier[_offset]
identifier[self] . identifier[update] () | def roll_data(self, data):
"""Append new data to the right side of every line strip and remove
as much data from the left.
Parameters
----------
data : array-like
A data array to append.
"""
data = data.astype('float32')[..., np.newaxis]
s1 = self._data_shape[1] - self._offset
if data.shape[1] > s1:
self._pos_tex[:, self._offset:] = data[:, :s1]
self._pos_tex[:, :data.shape[1] - s1] = data[:, s1:]
self._offset = (self._offset + data.shape[1]) % self._data_shape[1] # depends on [control=['if'], data=['s1']]
else:
self._pos_tex[:, self._offset:self._offset + data.shape[1]] = data
self._offset += data.shape[1]
self.shared_program['offset'] = self._offset
self.update() |
def classinstances(cls):
"""Return all instances of the current class
JB_Gui will not return the instances of subclasses
A subclass will only return the instances that have the same
type as the subclass. So it won\'t return instances of further subclasses.
:returns: all instnaces of the current class
:rtype: list
:raises: None
"""
l = [i for i in cls.allinstances() if type(i) == cls]
return l | def function[classinstances, parameter[cls]]:
constant[Return all instances of the current class
JB_Gui will not return the instances of subclasses
A subclass will only return the instances that have the same
type as the subclass. So it won't return instances of further subclasses.
:returns: all instnaces of the current class
:rtype: list
:raises: None
]
variable[l] assign[=] <ast.ListComp object at 0x7da18c4cd420>
return[name[l]] | keyword[def] identifier[classinstances] ( identifier[cls] ):
literal[string]
identifier[l] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[cls] . identifier[allinstances] () keyword[if] identifier[type] ( identifier[i] )== identifier[cls] ]
keyword[return] identifier[l] | def classinstances(cls):
"""Return all instances of the current class
JB_Gui will not return the instances of subclasses
A subclass will only return the instances that have the same
type as the subclass. So it won't return instances of further subclasses.
:returns: all instnaces of the current class
:rtype: list
:raises: None
"""
l = [i for i in cls.allinstances() if type(i) == cls]
return l |
def norm_l0(x, axis=None, eps=0.0):
r"""Compute the :math:`\ell_0` "norm" (it is not really a norm)
.. math::
\| \mathbf{x} \|_0 = \sum_i \left\{ \begin{array}{ccc}
0 & \text{if} & x_i = 0 \\ 1 &\text{if} & x_i \neq 0
\end{array} \right.
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\ell_0` "norm". If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
eps : float, optional (default 0.0)
Absolute value threshold below which a number is considered to be
zero.
Returns
-------
nl0 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector
"""
nl0 = np.sum(np.abs(x) > eps, axis=axis, keepdims=True)
# If the result has a single element, convert it to a scalar
if nl0.size == 1:
nl0 = nl0.ravel()[0]
return nl0 | def function[norm_l0, parameter[x, axis, eps]]:
constant[Compute the :math:`\ell_0` "norm" (it is not really a norm)
.. math::
\| \mathbf{x} \|_0 = \sum_i \left\{ \begin{array}{ccc}
0 & \text{if} & x_i = 0 \\ 1 &\text{if} & x_i \neq 0
\end{array} \right.
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\ell_0` "norm". If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
eps : float, optional (default 0.0)
Absolute value threshold below which a number is considered to be
zero.
Returns
-------
nl0 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector
]
variable[nl0] assign[=] call[name[np].sum, parameter[compare[call[name[np].abs, parameter[name[x]]] greater[>] name[eps]]]]
if compare[name[nl0].size equal[==] constant[1]] begin[:]
variable[nl0] assign[=] call[call[name[nl0].ravel, parameter[]]][constant[0]]
return[name[nl0]] | keyword[def] identifier[norm_l0] ( identifier[x] , identifier[axis] = keyword[None] , identifier[eps] = literal[int] ):
literal[string]
identifier[nl0] = identifier[np] . identifier[sum] ( identifier[np] . identifier[abs] ( identifier[x] )> identifier[eps] , identifier[axis] = identifier[axis] , identifier[keepdims] = keyword[True] )
keyword[if] identifier[nl0] . identifier[size] == literal[int] :
identifier[nl0] = identifier[nl0] . identifier[ravel] ()[ literal[int] ]
keyword[return] identifier[nl0] | def norm_l0(x, axis=None, eps=0.0):
"""Compute the :math:`\\ell_0` "norm" (it is not really a norm)
.. math::
\\| \\mathbf{x} \\|_0 = \\sum_i \\left\\{ \\begin{array}{ccc}
0 & \\text{if} & x_i = 0 \\\\ 1 &\\text{if} & x_i \\neq 0
\\end{array} \\right.
where :math:`x_i` is element :math:`i` of vector :math:`\\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\\ell_0` "norm". If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
eps : float, optional (default 0.0)
Absolute value threshold below which a number is considered to be
zero.
Returns
-------
nl0 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector
"""
nl0 = np.sum(np.abs(x) > eps, axis=axis, keepdims=True)
# If the result has a single element, convert it to a scalar
if nl0.size == 1:
nl0 = nl0.ravel()[0] # depends on [control=['if'], data=[]]
return nl0 |
def advance(self, word_id: int) -> 'ConstrainedHypothesis':
"""
Updates the constraints object based on advancing on word_id.
There is a complication, in that we may have started but not
yet completed a multi-word constraint. We need to allow constraints
to be added as unconstrained words, so if the next word is
invalid, we must "back out" of the current (incomplete) phrase,
re-setting all of its words as unmet.
:param word_id: The word ID to advance on.
:return: A deep copy of the object, advanced on word_id.
"""
obj = copy.deepcopy(self)
# First, check if we're updating a sequential constraint.
if obj.last_met != -1 and obj.is_sequence[obj.last_met] == 1:
if word_id == obj.constraints[obj.last_met + 1]:
# Here, the word matches what we expect next in the constraint, so we update everything
obj.met[obj.last_met + 1] = True
obj.last_met += 1
else:
# Here, the word is not the expected next word of the constraint, so we back out of the constraint.
index = obj.last_met
while obj.is_sequence[index]:
obj.met[index] = False
index -= 1
obj.last_met = -1
# If not, check whether we're meeting a single-word constraint
else:
# Build a list from all constraints of tuples of the
# form (constraint, whether it's a non-initial sequential, whether it's been met)
constraint_tuples = list(zip(obj.constraints, [False] + obj.is_sequence[:-1], obj.met))
# We are searching for an unmet constraint (word_id) that is not the middle of a phrase and is not met
query = (word_id, False, False)
try:
pos = constraint_tuples.index(query)
obj.met[pos] = True
obj.last_met = pos
except ValueError:
# query not found; identical but duplicated object will be returned
pass
return obj | def function[advance, parameter[self, word_id]]:
constant[
Updates the constraints object based on advancing on word_id.
There is a complication, in that we may have started but not
yet completed a multi-word constraint. We need to allow constraints
to be added as unconstrained words, so if the next word is
invalid, we must "back out" of the current (incomplete) phrase,
re-setting all of its words as unmet.
:param word_id: The word ID to advance on.
:return: A deep copy of the object, advanced on word_id.
]
variable[obj] assign[=] call[name[copy].deepcopy, parameter[name[self]]]
if <ast.BoolOp object at 0x7da1b1d74dc0> begin[:]
if compare[name[word_id] equal[==] call[name[obj].constraints][binary_operation[name[obj].last_met + constant[1]]]] begin[:]
call[name[obj].met][binary_operation[name[obj].last_met + constant[1]]] assign[=] constant[True]
<ast.AugAssign object at 0x7da1b1d75960>
return[name[obj]] | keyword[def] identifier[advance] ( identifier[self] , identifier[word_id] : identifier[int] )-> literal[string] :
literal[string]
identifier[obj] = identifier[copy] . identifier[deepcopy] ( identifier[self] )
keyword[if] identifier[obj] . identifier[last_met] !=- literal[int] keyword[and] identifier[obj] . identifier[is_sequence] [ identifier[obj] . identifier[last_met] ]== literal[int] :
keyword[if] identifier[word_id] == identifier[obj] . identifier[constraints] [ identifier[obj] . identifier[last_met] + literal[int] ]:
identifier[obj] . identifier[met] [ identifier[obj] . identifier[last_met] + literal[int] ]= keyword[True]
identifier[obj] . identifier[last_met] += literal[int]
keyword[else] :
identifier[index] = identifier[obj] . identifier[last_met]
keyword[while] identifier[obj] . identifier[is_sequence] [ identifier[index] ]:
identifier[obj] . identifier[met] [ identifier[index] ]= keyword[False]
identifier[index] -= literal[int]
identifier[obj] . identifier[last_met] =- literal[int]
keyword[else] :
identifier[constraint_tuples] = identifier[list] ( identifier[zip] ( identifier[obj] . identifier[constraints] ,[ keyword[False] ]+ identifier[obj] . identifier[is_sequence] [:- literal[int] ], identifier[obj] . identifier[met] ))
identifier[query] =( identifier[word_id] , keyword[False] , keyword[False] )
keyword[try] :
identifier[pos] = identifier[constraint_tuples] . identifier[index] ( identifier[query] )
identifier[obj] . identifier[met] [ identifier[pos] ]= keyword[True]
identifier[obj] . identifier[last_met] = identifier[pos]
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[obj] | def advance(self, word_id: int) -> 'ConstrainedHypothesis':
"""
Updates the constraints object based on advancing on word_id.
There is a complication, in that we may have started but not
yet completed a multi-word constraint. We need to allow constraints
to be added as unconstrained words, so if the next word is
invalid, we must "back out" of the current (incomplete) phrase,
re-setting all of its words as unmet.
:param word_id: The word ID to advance on.
:return: A deep copy of the object, advanced on word_id.
"""
obj = copy.deepcopy(self)
# First, check if we're updating a sequential constraint.
if obj.last_met != -1 and obj.is_sequence[obj.last_met] == 1:
if word_id == obj.constraints[obj.last_met + 1]:
# Here, the word matches what we expect next in the constraint, so we update everything
obj.met[obj.last_met + 1] = True
obj.last_met += 1 # depends on [control=['if'], data=[]]
else:
# Here, the word is not the expected next word of the constraint, so we back out of the constraint.
index = obj.last_met
while obj.is_sequence[index]:
obj.met[index] = False
index -= 1 # depends on [control=['while'], data=[]]
obj.last_met = -1 # depends on [control=['if'], data=[]]
else:
# If not, check whether we're meeting a single-word constraint
# Build a list from all constraints of tuples of the
# form (constraint, whether it's a non-initial sequential, whether it's been met)
constraint_tuples = list(zip(obj.constraints, [False] + obj.is_sequence[:-1], obj.met))
# We are searching for an unmet constraint (word_id) that is not the middle of a phrase and is not met
query = (word_id, False, False)
try:
pos = constraint_tuples.index(query)
obj.met[pos] = True
obj.last_met = pos # depends on [control=['try'], data=[]]
except ValueError:
# query not found; identical but duplicated object will be returned
pass # depends on [control=['except'], data=[]]
return obj |
async def get_ticket(self, request):
"""Called to return the ticket for a request.
Args:
request: aiohttp Request object.
Returns:
A ticket (string like) object, or None if no ticket is available
for the passed request.
"""
session = await get_session(request)
return session.get(self.cookie_name) | <ast.AsyncFunctionDef object at 0x7da207f03f70> | keyword[async] keyword[def] identifier[get_ticket] ( identifier[self] , identifier[request] ):
literal[string]
identifier[session] = keyword[await] identifier[get_session] ( identifier[request] )
keyword[return] identifier[session] . identifier[get] ( identifier[self] . identifier[cookie_name] ) | async def get_ticket(self, request):
"""Called to return the ticket for a request.
Args:
request: aiohttp Request object.
Returns:
A ticket (string like) object, or None if no ticket is available
for the passed request.
"""
session = await get_session(request)
return session.get(self.cookie_name) |
def get(self, key, default=None):
""" Return the key if exists or a default value
:param str value: Value
:param str default: Default value if key not present
"""
if key in self:
return self.__getitem__(key)
else:
return default | def function[get, parameter[self, key, default]]:
constant[ Return the key if exists or a default value
:param str value: Value
:param str default: Default value if key not present
]
if compare[name[key] in name[self]] begin[:]
return[call[name[self].__getitem__, parameter[name[key]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[key] , identifier[default] = keyword[None] ):
literal[string]
keyword[if] identifier[key] keyword[in] identifier[self] :
keyword[return] identifier[self] . identifier[__getitem__] ( identifier[key] )
keyword[else] :
keyword[return] identifier[default] | def get(self, key, default=None):
""" Return the key if exists or a default value
:param str value: Value
:param str default: Default value if key not present
"""
if key in self:
return self.__getitem__(key) # depends on [control=['if'], data=['key', 'self']]
else:
return default |
def error(self, text):
"""
Posts an error message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.ERROR))) | def function[error, parameter[self, text]]:
constant[
Posts an error message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
]
call[name[self].queue.put, parameter[call[name[dill].dumps, parameter[call[name[LogMessageCommand], parameter[]]]]]] | keyword[def] identifier[error] ( identifier[self] , identifier[text] ):
literal[string]
identifier[self] . identifier[queue] . identifier[put] ( identifier[dill] . identifier[dumps] ( identifier[LogMessageCommand] ( identifier[text] = identifier[text] , identifier[level] = identifier[logging] . identifier[ERROR] ))) | def error(self, text):
"""
Posts an error message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.ERROR))) |
def flag_message(current):
"""
Flag inappropriate messages
.. code-block:: python
# request:
{
'view':'_zops_flag_message',
'message_key': key,
}
# response:
{
'
'status': 'Created',
'code': 201,
}
"""
current.output = {'status': 'Created', 'code': 201}
FlaggedMessage.objects.get_or_create(user_id=current.user_id,
message_id=current.input['key']) | def function[flag_message, parameter[current]]:
constant[
Flag inappropriate messages
.. code-block:: python
# request:
{
'view':'_zops_flag_message',
'message_key': key,
}
# response:
{
'
'status': 'Created',
'code': 201,
}
]
name[current].output assign[=] dictionary[[<ast.Constant object at 0x7da20c992260>, <ast.Constant object at 0x7da20c990160>], [<ast.Constant object at 0x7da20c993430>, <ast.Constant object at 0x7da20c993550>]]
call[name[FlaggedMessage].objects.get_or_create, parameter[]] | keyword[def] identifier[flag_message] ( identifier[current] ):
literal[string]
identifier[current] . identifier[output] ={ literal[string] : literal[string] , literal[string] : literal[int] }
identifier[FlaggedMessage] . identifier[objects] . identifier[get_or_create] ( identifier[user_id] = identifier[current] . identifier[user_id] ,
identifier[message_id] = identifier[current] . identifier[input] [ literal[string] ]) | def flag_message(current):
"""
Flag inappropriate messages
.. code-block:: python
# request:
{
'view':'_zops_flag_message',
'message_key': key,
}
# response:
{
'
'status': 'Created',
'code': 201,
}
"""
current.output = {'status': 'Created', 'code': 201}
FlaggedMessage.objects.get_or_create(user_id=current.user_id, message_id=current.input['key']) |
def binary(self):
"""
return encoded representation
"""
if isinstance(self.value, int):
return b_chr(_TAG_ATOM_CACHE_REF) + b_chr(self.value)
elif isinstance(self.value, TypeUnicode):
value_encoded = self.value.encode('utf-8')
length = len(value_encoded)
if length <= 255:
return (
b_chr(_TAG_SMALL_ATOM_UTF8_EXT) +
b_chr(length) + value_encoded
)
elif length <= 65535:
return (
b_chr(_TAG_ATOM_UTF8_EXT) +
struct.pack(b'>H', length) + value_encoded
)
else:
raise OutputException('uint16 overflow')
elif isinstance(self.value, bytes):
length = len(self.value)
if length <= 255:
return b_chr(_TAG_SMALL_ATOM_EXT) + b_chr(length) + self.value
elif length <= 65535:
return (
b_chr(_TAG_ATOM_EXT) +
struct.pack(b'>H', length) + self.value
)
else:
raise OutputException('uint16 overflow')
else:
raise OutputException('unknown atom type') | def function[binary, parameter[self]]:
constant[
return encoded representation
]
if call[name[isinstance], parameter[name[self].value, name[int]]] begin[:]
return[binary_operation[call[name[b_chr], parameter[name[_TAG_ATOM_CACHE_REF]]] + call[name[b_chr], parameter[name[self].value]]]] | keyword[def] identifier[binary] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[value] , identifier[int] ):
keyword[return] identifier[b_chr] ( identifier[_TAG_ATOM_CACHE_REF] )+ identifier[b_chr] ( identifier[self] . identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[value] , identifier[TypeUnicode] ):
identifier[value_encoded] = identifier[self] . identifier[value] . identifier[encode] ( literal[string] )
identifier[length] = identifier[len] ( identifier[value_encoded] )
keyword[if] identifier[length] <= literal[int] :
keyword[return] (
identifier[b_chr] ( identifier[_TAG_SMALL_ATOM_UTF8_EXT] )+
identifier[b_chr] ( identifier[length] )+ identifier[value_encoded]
)
keyword[elif] identifier[length] <= literal[int] :
keyword[return] (
identifier[b_chr] ( identifier[_TAG_ATOM_UTF8_EXT] )+
identifier[struct] . identifier[pack] ( literal[string] , identifier[length] )+ identifier[value_encoded]
)
keyword[else] :
keyword[raise] identifier[OutputException] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[value] , identifier[bytes] ):
identifier[length] = identifier[len] ( identifier[self] . identifier[value] )
keyword[if] identifier[length] <= literal[int] :
keyword[return] identifier[b_chr] ( identifier[_TAG_SMALL_ATOM_EXT] )+ identifier[b_chr] ( identifier[length] )+ identifier[self] . identifier[value]
keyword[elif] identifier[length] <= literal[int] :
keyword[return] (
identifier[b_chr] ( identifier[_TAG_ATOM_EXT] )+
identifier[struct] . identifier[pack] ( literal[string] , identifier[length] )+ identifier[self] . identifier[value]
)
keyword[else] :
keyword[raise] identifier[OutputException] ( literal[string] )
keyword[else] :
keyword[raise] identifier[OutputException] ( literal[string] ) | def binary(self):
"""
return encoded representation
"""
if isinstance(self.value, int):
return b_chr(_TAG_ATOM_CACHE_REF) + b_chr(self.value) # depends on [control=['if'], data=[]]
elif isinstance(self.value, TypeUnicode):
value_encoded = self.value.encode('utf-8')
length = len(value_encoded)
if length <= 255:
return b_chr(_TAG_SMALL_ATOM_UTF8_EXT) + b_chr(length) + value_encoded # depends on [control=['if'], data=['length']]
elif length <= 65535:
return b_chr(_TAG_ATOM_UTF8_EXT) + struct.pack(b'>H', length) + value_encoded # depends on [control=['if'], data=['length']]
else:
raise OutputException('uint16 overflow') # depends on [control=['if'], data=[]]
elif isinstance(self.value, bytes):
length = len(self.value)
if length <= 255:
return b_chr(_TAG_SMALL_ATOM_EXT) + b_chr(length) + self.value # depends on [control=['if'], data=['length']]
elif length <= 65535:
return b_chr(_TAG_ATOM_EXT) + struct.pack(b'>H', length) + self.value # depends on [control=['if'], data=['length']]
else:
raise OutputException('uint16 overflow') # depends on [control=['if'], data=[]]
else:
raise OutputException('unknown atom type') |
def add_element(self, location, element, delete_elem=False):
"""
Create an entry located at ``location``.
Args:
location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for
instance).
element: Element to store.
delete_elem: Delete old element or not if it exist?
Returns:
The created node with the element.
Notes:
The different sub locations entries **must** exist and the last may or may not already exist.
Use the more strict :meth:`add_unique_element` method if needed.
You don't need to have a common root node. We internally use a dummy root node.
"""
return self._create_entry(location, element, unique=False, delete_element=delete_elem) | def function[add_element, parameter[self, location, element, delete_elem]]:
constant[
Create an entry located at ``location``.
Args:
location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for
instance).
element: Element to store.
delete_elem: Delete old element or not if it exist?
Returns:
The created node with the element.
Notes:
The different sub locations entries **must** exist and the last may or may not already exist.
Use the more strict :meth:`add_unique_element` method if needed.
You don't need to have a common root node. We internally use a dummy root node.
]
return[call[name[self]._create_entry, parameter[name[location], name[element]]]] | keyword[def] identifier[add_element] ( identifier[self] , identifier[location] , identifier[element] , identifier[delete_elem] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[_create_entry] ( identifier[location] , identifier[element] , identifier[unique] = keyword[False] , identifier[delete_element] = identifier[delete_elem] ) | def add_element(self, location, element, delete_elem=False):
"""
Create an entry located at ``location``.
Args:
location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for
instance).
element: Element to store.
delete_elem: Delete old element or not if it exist?
Returns:
The created node with the element.
Notes:
The different sub locations entries **must** exist and the last may or may not already exist.
Use the more strict :meth:`add_unique_element` method if needed.
You don't need to have a common root node. We internally use a dummy root node.
"""
return self._create_entry(location, element, unique=False, delete_element=delete_elem) |
def air_gap(self,
volume: float = None,
height: float = None) -> 'InstrumentContext':
"""
Pull air into the pipette current tip at the current location
:param volume: The amount in uL to aspirate air into the tube.
(Default will use all remaining volume in tip)
:type volume: float
:param height: The number of millimiters to move above the current Well
to air-gap aspirate. (Default: 5mm above current Well)
:type height: float
:raises NoTipAttachedError: If no tip is attached to the pipette
:raises RuntimeError: If location cache is None.
This should happen if `touch_tip` is called
without first calling a method that takes a
location (eg, :py:meth:`.aspirate`,
:py:meth:`dispense`)
:returns: This instance
"""
if not self.hw_pipette['has_tip']:
raise hc.NoTipAttachedError('Pipette has no tip. Aborting air_gap')
if height is None:
height = 5
loc = self._ctx.location_cache
if not loc or not isinstance(loc.labware, Well):
raise RuntimeError('No previous Well cached to perform air gap')
target = loc.labware.top(height)
self.move_to(target)
self.aspirate(volume)
return self | def function[air_gap, parameter[self, volume, height]]:
constant[
Pull air into the pipette current tip at the current location
:param volume: The amount in uL to aspirate air into the tube.
(Default will use all remaining volume in tip)
:type volume: float
:param height: The number of millimiters to move above the current Well
to air-gap aspirate. (Default: 5mm above current Well)
:type height: float
:raises NoTipAttachedError: If no tip is attached to the pipette
:raises RuntimeError: If location cache is None.
This should happen if `touch_tip` is called
without first calling a method that takes a
location (eg, :py:meth:`.aspirate`,
:py:meth:`dispense`)
:returns: This instance
]
if <ast.UnaryOp object at 0x7da1b08ab820> begin[:]
<ast.Raise object at 0x7da1b08ab1f0>
if compare[name[height] is constant[None]] begin[:]
variable[height] assign[=] constant[5]
variable[loc] assign[=] name[self]._ctx.location_cache
if <ast.BoolOp object at 0x7da18f09d960> begin[:]
<ast.Raise object at 0x7da18f09dcf0>
variable[target] assign[=] call[name[loc].labware.top, parameter[name[height]]]
call[name[self].move_to, parameter[name[target]]]
call[name[self].aspirate, parameter[name[volume]]]
return[name[self]] | keyword[def] identifier[air_gap] ( identifier[self] ,
identifier[volume] : identifier[float] = keyword[None] ,
identifier[height] : identifier[float] = keyword[None] )-> literal[string] :
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[hw_pipette] [ literal[string] ]:
keyword[raise] identifier[hc] . identifier[NoTipAttachedError] ( literal[string] )
keyword[if] identifier[height] keyword[is] keyword[None] :
identifier[height] = literal[int]
identifier[loc] = identifier[self] . identifier[_ctx] . identifier[location_cache]
keyword[if] keyword[not] identifier[loc] keyword[or] keyword[not] identifier[isinstance] ( identifier[loc] . identifier[labware] , identifier[Well] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[target] = identifier[loc] . identifier[labware] . identifier[top] ( identifier[height] )
identifier[self] . identifier[move_to] ( identifier[target] )
identifier[self] . identifier[aspirate] ( identifier[volume] )
keyword[return] identifier[self] | def air_gap(self, volume: float=None, height: float=None) -> 'InstrumentContext':
"""
Pull air into the pipette current tip at the current location
:param volume: The amount in uL to aspirate air into the tube.
(Default will use all remaining volume in tip)
:type volume: float
:param height: The number of millimiters to move above the current Well
to air-gap aspirate. (Default: 5mm above current Well)
:type height: float
:raises NoTipAttachedError: If no tip is attached to the pipette
:raises RuntimeError: If location cache is None.
This should happen if `touch_tip` is called
without first calling a method that takes a
location (eg, :py:meth:`.aspirate`,
:py:meth:`dispense`)
:returns: This instance
"""
if not self.hw_pipette['has_tip']:
raise hc.NoTipAttachedError('Pipette has no tip. Aborting air_gap') # depends on [control=['if'], data=[]]
if height is None:
height = 5 # depends on [control=['if'], data=['height']]
loc = self._ctx.location_cache
if not loc or not isinstance(loc.labware, Well):
raise RuntimeError('No previous Well cached to perform air gap') # depends on [control=['if'], data=[]]
target = loc.labware.top(height)
self.move_to(target)
self.aspirate(volume)
return self |
def _check_in_cshm_polygon(self, rup):
"""
Checks if any part of the rupture surface mesh is located within the
intended boundaries of the Canterbury Seismic Hazard Model in
Gerstenberger et al. (2014), Seismic hazard modelling for the recovery
of Christchurch, Earthquake Spectra, 30(1), 17-29.
"""
lats = np.ravel(rup.surface.mesh.array[1])
lons = np.ravel(rup.surface.mesh.array[0])
# These coordinates are provided by M Gerstenberger (personal
# communication, 10 August 2018)
polygon = shapely.geometry.Polygon([(171.6, -43.3), (173.2, -43.3),
(173.2, -43.9), (171.6, -43.9)])
points_in_polygon = [
shapely.geometry.Point(lons[i], lats[i]).within(polygon)
for i in np.arange(len(lons))]
in_cshm = any(points_in_polygon)
return in_cshm | def function[_check_in_cshm_polygon, parameter[self, rup]]:
constant[
Checks if any part of the rupture surface mesh is located within the
intended boundaries of the Canterbury Seismic Hazard Model in
Gerstenberger et al. (2014), Seismic hazard modelling for the recovery
of Christchurch, Earthquake Spectra, 30(1), 17-29.
]
variable[lats] assign[=] call[name[np].ravel, parameter[call[name[rup].surface.mesh.array][constant[1]]]]
variable[lons] assign[=] call[name[np].ravel, parameter[call[name[rup].surface.mesh.array][constant[0]]]]
variable[polygon] assign[=] call[name[shapely].geometry.Polygon, parameter[list[[<ast.Tuple object at 0x7da20c990130>, <ast.Tuple object at 0x7da20c993610>, <ast.Tuple object at 0x7da20c990760>, <ast.Tuple object at 0x7da20c992dd0>]]]]
variable[points_in_polygon] assign[=] <ast.ListComp object at 0x7da20c992e60>
variable[in_cshm] assign[=] call[name[any], parameter[name[points_in_polygon]]]
return[name[in_cshm]] | keyword[def] identifier[_check_in_cshm_polygon] ( identifier[self] , identifier[rup] ):
literal[string]
identifier[lats] = identifier[np] . identifier[ravel] ( identifier[rup] . identifier[surface] . identifier[mesh] . identifier[array] [ literal[int] ])
identifier[lons] = identifier[np] . identifier[ravel] ( identifier[rup] . identifier[surface] . identifier[mesh] . identifier[array] [ literal[int] ])
identifier[polygon] = identifier[shapely] . identifier[geometry] . identifier[Polygon] ([( literal[int] ,- literal[int] ),( literal[int] ,- literal[int] ),
( literal[int] ,- literal[int] ),( literal[int] ,- literal[int] )])
identifier[points_in_polygon] =[
identifier[shapely] . identifier[geometry] . identifier[Point] ( identifier[lons] [ identifier[i] ], identifier[lats] [ identifier[i] ]). identifier[within] ( identifier[polygon] )
keyword[for] identifier[i] keyword[in] identifier[np] . identifier[arange] ( identifier[len] ( identifier[lons] ))]
identifier[in_cshm] = identifier[any] ( identifier[points_in_polygon] )
keyword[return] identifier[in_cshm] | def _check_in_cshm_polygon(self, rup):
"""
Checks if any part of the rupture surface mesh is located within the
intended boundaries of the Canterbury Seismic Hazard Model in
Gerstenberger et al. (2014), Seismic hazard modelling for the recovery
of Christchurch, Earthquake Spectra, 30(1), 17-29.
"""
lats = np.ravel(rup.surface.mesh.array[1])
lons = np.ravel(rup.surface.mesh.array[0])
# These coordinates are provided by M Gerstenberger (personal
# communication, 10 August 2018)
polygon = shapely.geometry.Polygon([(171.6, -43.3), (173.2, -43.3), (173.2, -43.9), (171.6, -43.9)])
points_in_polygon = [shapely.geometry.Point(lons[i], lats[i]).within(polygon) for i in np.arange(len(lons))]
in_cshm = any(points_in_polygon)
return in_cshm |
def get_task_result(self, task_id):
"""
Get task result from worker. If the task is not finished, return None.
It's prefered to use :class:`carotte.Task` object directly.
:param string task_id: Task ID
:returns: Task dict
:rtype: dict
"""
data = {
'action': 'get_result',
'id': task_id
}
self.__send_pyobj(data)
task = self.__recv_pyobj()
return task | def function[get_task_result, parameter[self, task_id]]:
constant[
Get task result from worker. If the task is not finished, return None.
It's prefered to use :class:`carotte.Task` object directly.
:param string task_id: Task ID
:returns: Task dict
:rtype: dict
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0948a60>, <ast.Constant object at 0x7da1b0949720>], [<ast.Constant object at 0x7da1b0948970>, <ast.Name object at 0x7da1b0948310>]]
call[name[self].__send_pyobj, parameter[name[data]]]
variable[task] assign[=] call[name[self].__recv_pyobj, parameter[]]
return[name[task]] | keyword[def] identifier[get_task_result] ( identifier[self] , identifier[task_id] ):
literal[string]
identifier[data] ={
literal[string] : literal[string] ,
literal[string] : identifier[task_id]
}
identifier[self] . identifier[__send_pyobj] ( identifier[data] )
identifier[task] = identifier[self] . identifier[__recv_pyobj] ()
keyword[return] identifier[task] | def get_task_result(self, task_id):
"""
Get task result from worker. If the task is not finished, return None.
It's prefered to use :class:`carotte.Task` object directly.
:param string task_id: Task ID
:returns: Task dict
:rtype: dict
"""
data = {'action': 'get_result', 'id': task_id}
self.__send_pyobj(data)
task = self.__recv_pyobj()
return task |
def get(self, key: str, default=None) -> Signature:
""" Get a signature instance by its internal_name """
item = default
if key in self._hsig:
item = self._hsig[key]
return item | def function[get, parameter[self, key, default]]:
constant[ Get a signature instance by its internal_name ]
variable[item] assign[=] name[default]
if compare[name[key] in name[self]._hsig] begin[:]
variable[item] assign[=] call[name[self]._hsig][name[key]]
return[name[item]] | keyword[def] identifier[get] ( identifier[self] , identifier[key] : identifier[str] , identifier[default] = keyword[None] )-> identifier[Signature] :
literal[string]
identifier[item] = identifier[default]
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[_hsig] :
identifier[item] = identifier[self] . identifier[_hsig] [ identifier[key] ]
keyword[return] identifier[item] | def get(self, key: str, default=None) -> Signature:
""" Get a signature instance by its internal_name """
item = default
if key in self._hsig:
item = self._hsig[key] # depends on [control=['if'], data=['key']]
return item |
def FloatProperty(name, default=0.0, readonly=False, docs=None):
'''
:name: string - property name
:default: float - property default value
:readonly: boolean - if True, setter method is NOT generated
Returns a property object that can be used to initialize a
class instance variable as a property.
'''
private_name = '_' + name
def getf(self):
if not hasattr(self, private_name):
setattr(self, private_name, default)
return getattr(self, private_name)
if readonly:
setf = None
else:
def setf(self, newValue):
def epsilon_set(v):
# epsilon_set: creates a float from v unless that
# float is less than epsilon, which will
# be considered effectively zero.
fv = float(v)
return 0.0 if nearly_zero(fv) else fv
try:
setattr(self, private_name, epsilon_set(newValue))
return
except TypeError:
pass
if isinstance(newValue, collections.Mapping):
try:
setattr(self, private_name, epsilon_set(newValue[name]))
except KeyError:
pass
return
if isinstance(newValue, collections.Iterable):
try:
setattr(self, private_name, epsilon_set(newValue[0]))
return
except (IndexError, TypeError):
pass
try:
mapping = vars(newValue)
setattr(self, private_name, epsilon_set(mapping[name]))
return
except (TypeError, KeyError):
pass
if newValue is None:
setattr(self, private_name, epsilon_set(default))
return
raise ValueError(newValue)
return property(getf, setf, None, docs) | def function[FloatProperty, parameter[name, default, readonly, docs]]:
constant[
:name: string - property name
:default: float - property default value
:readonly: boolean - if True, setter method is NOT generated
Returns a property object that can be used to initialize a
class instance variable as a property.
]
variable[private_name] assign[=] binary_operation[constant[_] + name[name]]
def function[getf, parameter[self]]:
if <ast.UnaryOp object at 0x7da18fe92f80> begin[:]
call[name[setattr], parameter[name[self], name[private_name], name[default]]]
return[call[name[getattr], parameter[name[self], name[private_name]]]]
if name[readonly] begin[:]
variable[setf] assign[=] constant[None]
return[call[name[property], parameter[name[getf], name[setf], constant[None], name[docs]]]] | keyword[def] identifier[FloatProperty] ( identifier[name] , identifier[default] = literal[int] , identifier[readonly] = keyword[False] , identifier[docs] = keyword[None] ):
literal[string]
identifier[private_name] = literal[string] + identifier[name]
keyword[def] identifier[getf] ( identifier[self] ):
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , identifier[private_name] ):
identifier[setattr] ( identifier[self] , identifier[private_name] , identifier[default] )
keyword[return] identifier[getattr] ( identifier[self] , identifier[private_name] )
keyword[if] identifier[readonly] :
identifier[setf] = keyword[None]
keyword[else] :
keyword[def] identifier[setf] ( identifier[self] , identifier[newValue] ):
keyword[def] identifier[epsilon_set] ( identifier[v] ):
identifier[fv] = identifier[float] ( identifier[v] )
keyword[return] literal[int] keyword[if] identifier[nearly_zero] ( identifier[fv] ) keyword[else] identifier[fv]
keyword[try] :
identifier[setattr] ( identifier[self] , identifier[private_name] , identifier[epsilon_set] ( identifier[newValue] ))
keyword[return]
keyword[except] identifier[TypeError] :
keyword[pass]
keyword[if] identifier[isinstance] ( identifier[newValue] , identifier[collections] . identifier[Mapping] ):
keyword[try] :
identifier[setattr] ( identifier[self] , identifier[private_name] , identifier[epsilon_set] ( identifier[newValue] [ identifier[name] ]))
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[return]
keyword[if] identifier[isinstance] ( identifier[newValue] , identifier[collections] . identifier[Iterable] ):
keyword[try] :
identifier[setattr] ( identifier[self] , identifier[private_name] , identifier[epsilon_set] ( identifier[newValue] [ literal[int] ]))
keyword[return]
keyword[except] ( identifier[IndexError] , identifier[TypeError] ):
keyword[pass]
keyword[try] :
identifier[mapping] = identifier[vars] ( identifier[newValue] )
identifier[setattr] ( identifier[self] , identifier[private_name] , identifier[epsilon_set] ( identifier[mapping] [ identifier[name] ]))
keyword[return]
keyword[except] ( identifier[TypeError] , identifier[KeyError] ):
keyword[pass]
keyword[if] identifier[newValue] keyword[is] keyword[None] :
identifier[setattr] ( identifier[self] , identifier[private_name] , identifier[epsilon_set] ( identifier[default] ))
keyword[return]
keyword[raise] identifier[ValueError] ( identifier[newValue] )
keyword[return] identifier[property] ( identifier[getf] , identifier[setf] , keyword[None] , identifier[docs] ) | def FloatProperty(name, default=0.0, readonly=False, docs=None):
"""
:name: string - property name
:default: float - property default value
:readonly: boolean - if True, setter method is NOT generated
Returns a property object that can be used to initialize a
class instance variable as a property.
"""
private_name = '_' + name
def getf(self):
if not hasattr(self, private_name):
setattr(self, private_name, default) # depends on [control=['if'], data=[]]
return getattr(self, private_name)
if readonly:
setf = None # depends on [control=['if'], data=[]]
else:
def setf(self, newValue):
def epsilon_set(v):
# epsilon_set: creates a float from v unless that
# float is less than epsilon, which will
# be considered effectively zero.
fv = float(v)
return 0.0 if nearly_zero(fv) else fv
try:
setattr(self, private_name, epsilon_set(newValue))
return # depends on [control=['try'], data=[]]
except TypeError:
pass # depends on [control=['except'], data=[]]
if isinstance(newValue, collections.Mapping):
try:
setattr(self, private_name, epsilon_set(newValue[name])) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
return # depends on [control=['if'], data=[]]
if isinstance(newValue, collections.Iterable):
try:
setattr(self, private_name, epsilon_set(newValue[0]))
return # depends on [control=['try'], data=[]]
except (IndexError, TypeError):
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
try:
mapping = vars(newValue)
setattr(self, private_name, epsilon_set(mapping[name]))
return # depends on [control=['try'], data=[]]
except (TypeError, KeyError):
pass # depends on [control=['except'], data=[]]
if newValue is None:
setattr(self, private_name, epsilon_set(default))
return # depends on [control=['if'], data=[]]
raise ValueError(newValue)
return property(getf, setf, None, docs) |
def binned_pixelrange(self, waverange, **kwargs):
"""Calculate the number of pixels within the given wavelength
range and `binset`.
Parameters
----------
waverange : tuple of float or `~astropy.units.quantity.Quantity`
Lower and upper limits of the desired wavelength range.
If not a Quantity, assumed to be in Angstrom.
kwargs : dict
Keywords accepted by :func:`synphot.binning.pixel_range`.
Returns
-------
npix : int
Number of pixels.
"""
x = units.validate_quantity(
waverange, self._internal_wave_unit, equivalencies=u.spectral())
return binning.pixel_range(self.binset.value, x.value, **kwargs) | def function[binned_pixelrange, parameter[self, waverange]]:
constant[Calculate the number of pixels within the given wavelength
range and `binset`.
Parameters
----------
waverange : tuple of float or `~astropy.units.quantity.Quantity`
Lower and upper limits of the desired wavelength range.
If not a Quantity, assumed to be in Angstrom.
kwargs : dict
Keywords accepted by :func:`synphot.binning.pixel_range`.
Returns
-------
npix : int
Number of pixels.
]
variable[x] assign[=] call[name[units].validate_quantity, parameter[name[waverange], name[self]._internal_wave_unit]]
return[call[name[binning].pixel_range, parameter[name[self].binset.value, name[x].value]]] | keyword[def] identifier[binned_pixelrange] ( identifier[self] , identifier[waverange] ,** identifier[kwargs] ):
literal[string]
identifier[x] = identifier[units] . identifier[validate_quantity] (
identifier[waverange] , identifier[self] . identifier[_internal_wave_unit] , identifier[equivalencies] = identifier[u] . identifier[spectral] ())
keyword[return] identifier[binning] . identifier[pixel_range] ( identifier[self] . identifier[binset] . identifier[value] , identifier[x] . identifier[value] ,** identifier[kwargs] ) | def binned_pixelrange(self, waverange, **kwargs):
"""Calculate the number of pixels within the given wavelength
range and `binset`.
Parameters
----------
waverange : tuple of float or `~astropy.units.quantity.Quantity`
Lower and upper limits of the desired wavelength range.
If not a Quantity, assumed to be in Angstrom.
kwargs : dict
Keywords accepted by :func:`synphot.binning.pixel_range`.
Returns
-------
npix : int
Number of pixels.
"""
x = units.validate_quantity(waverange, self._internal_wave_unit, equivalencies=u.spectral())
return binning.pixel_range(self.binset.value, x.value, **kwargs) |
def select(self, crit):
"""
Select subset of values that match a given index criterion.
Parameters
----------
crit : function, list, str, int
Criterion function to map to indices, specific index value,
or list of indices.
"""
import types
# handle lists, strings, and ints
if not isinstance(crit, types.FunctionType):
# set("foo") -> {"f", "o"}; wrap in list to prevent:
if isinstance(crit, string_types):
critlist = set([crit])
else:
try:
critlist = set(crit)
except TypeError:
# typically means crit is not an iterable type; for instance, crit is an int
critlist = set([crit])
crit = lambda x: x in critlist
# if only one index, return it directly or throw an error
index = self.index
if size(index) == 1:
if crit(index[0]):
return self
else:
raise Exception('No indices found matching criterion')
# determine new index and check the result
newindex = [i for i in index if crit(i)]
if len(newindex) == 0:
raise Exception('No indices found matching criterion')
if array(newindex == index).all():
return self
# use fast logical indexing to get the new values
subinds = where([crit(i) for i in index])
new = self.map(lambda x: x[subinds], index=newindex)
# if singleton, need to check whether it's an array or a scalar/int
# if array, recompute a new set of indices
if len(newindex) == 1:
new = new.map(lambda x: x[0], index=newindex)
val = new.first()
if size(val) == 1:
newindex = [newindex[0]]
else:
newindex = arange(0, size(val))
new._index = newindex
return new | def function[select, parameter[self, crit]]:
constant[
Select subset of values that match a given index criterion.
Parameters
----------
crit : function, list, str, int
Criterion function to map to indices, specific index value,
or list of indices.
]
import module[types]
if <ast.UnaryOp object at 0x7da18f09e4d0> begin[:]
if call[name[isinstance], parameter[name[crit], name[string_types]]] begin[:]
variable[critlist] assign[=] call[name[set], parameter[list[[<ast.Name object at 0x7da18f09e0e0>]]]]
variable[crit] assign[=] <ast.Lambda object at 0x7da18f09fb80>
variable[index] assign[=] name[self].index
if compare[call[name[size], parameter[name[index]]] equal[==] constant[1]] begin[:]
if call[name[crit], parameter[call[name[index]][constant[0]]]] begin[:]
return[name[self]]
variable[newindex] assign[=] <ast.ListComp object at 0x7da18f09d390>
if compare[call[name[len], parameter[name[newindex]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18f09e1a0>
if call[call[name[array], parameter[compare[name[newindex] equal[==] name[index]]]].all, parameter[]] begin[:]
return[name[self]]
variable[subinds] assign[=] call[name[where], parameter[<ast.ListComp object at 0x7da18f09ec50>]]
variable[new] assign[=] call[name[self].map, parameter[<ast.Lambda object at 0x7da18f09dfc0>]]
if compare[call[name[len], parameter[name[newindex]]] equal[==] constant[1]] begin[:]
variable[new] assign[=] call[name[new].map, parameter[<ast.Lambda object at 0x7da18f09db10>]]
variable[val] assign[=] call[name[new].first, parameter[]]
if compare[call[name[size], parameter[name[val]]] equal[==] constant[1]] begin[:]
variable[newindex] assign[=] list[[<ast.Subscript object at 0x7da18f09ca60>]]
name[new]._index assign[=] name[newindex]
return[name[new]] | keyword[def] identifier[select] ( identifier[self] , identifier[crit] ):
literal[string]
keyword[import] identifier[types]
keyword[if] keyword[not] identifier[isinstance] ( identifier[crit] , identifier[types] . identifier[FunctionType] ):
keyword[if] identifier[isinstance] ( identifier[crit] , identifier[string_types] ):
identifier[critlist] = identifier[set] ([ identifier[crit] ])
keyword[else] :
keyword[try] :
identifier[critlist] = identifier[set] ( identifier[crit] )
keyword[except] identifier[TypeError] :
identifier[critlist] = identifier[set] ([ identifier[crit] ])
identifier[crit] = keyword[lambda] identifier[x] : identifier[x] keyword[in] identifier[critlist]
identifier[index] = identifier[self] . identifier[index]
keyword[if] identifier[size] ( identifier[index] )== literal[int] :
keyword[if] identifier[crit] ( identifier[index] [ literal[int] ]):
keyword[return] identifier[self]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[newindex] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[index] keyword[if] identifier[crit] ( identifier[i] )]
keyword[if] identifier[len] ( identifier[newindex] )== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[array] ( identifier[newindex] == identifier[index] ). identifier[all] ():
keyword[return] identifier[self]
identifier[subinds] = identifier[where] ([ identifier[crit] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[index] ])
identifier[new] = identifier[self] . identifier[map] ( keyword[lambda] identifier[x] : identifier[x] [ identifier[subinds] ], identifier[index] = identifier[newindex] )
keyword[if] identifier[len] ( identifier[newindex] )== literal[int] :
identifier[new] = identifier[new] . identifier[map] ( keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[index] = identifier[newindex] )
identifier[val] = identifier[new] . identifier[first] ()
keyword[if] identifier[size] ( identifier[val] )== literal[int] :
identifier[newindex] =[ identifier[newindex] [ literal[int] ]]
keyword[else] :
identifier[newindex] = identifier[arange] ( literal[int] , identifier[size] ( identifier[val] ))
identifier[new] . identifier[_index] = identifier[newindex]
keyword[return] identifier[new] | def select(self, crit):
"""
Select subset of values that match a given index criterion.
Parameters
----------
crit : function, list, str, int
Criterion function to map to indices, specific index value,
or list of indices.
"""
import types
# handle lists, strings, and ints
if not isinstance(crit, types.FunctionType):
# set("foo") -> {"f", "o"}; wrap in list to prevent:
if isinstance(crit, string_types):
critlist = set([crit]) # depends on [control=['if'], data=[]]
else:
try:
critlist = set(crit) # depends on [control=['try'], data=[]]
except TypeError:
# typically means crit is not an iterable type; for instance, crit is an int
critlist = set([crit]) # depends on [control=['except'], data=[]]
crit = lambda x: x in critlist # depends on [control=['if'], data=[]]
# if only one index, return it directly or throw an error
index = self.index
if size(index) == 1:
if crit(index[0]):
return self # depends on [control=['if'], data=[]]
else:
raise Exception('No indices found matching criterion') # depends on [control=['if'], data=[]]
# determine new index and check the result
newindex = [i for i in index if crit(i)]
if len(newindex) == 0:
raise Exception('No indices found matching criterion') # depends on [control=['if'], data=[]]
if array(newindex == index).all():
return self # depends on [control=['if'], data=[]]
# use fast logical indexing to get the new values
subinds = where([crit(i) for i in index])
new = self.map(lambda x: x[subinds], index=newindex)
# if singleton, need to check whether it's an array or a scalar/int
# if array, recompute a new set of indices
if len(newindex) == 1:
new = new.map(lambda x: x[0], index=newindex)
val = new.first()
if size(val) == 1:
newindex = [newindex[0]] # depends on [control=['if'], data=[]]
else:
newindex = arange(0, size(val)) # depends on [control=['if'], data=[]]
new._index = newindex
return new |
def add_transition_to_state(from_port, to_port):
"""Interface method between Gaphas and RAFCON core for adding transitions
The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary
parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports.
:param from_port: Port from which the transition starts
:param to_port: Port to which the transition goes to
:return: True if a transition was added, False if an error occurred
"""
from rafcon.gui.mygaphas.items.ports import IncomeView, OutcomeView
from_state_v = from_port.parent
to_state_v = to_port.parent
from_state_m = from_state_v.model
to_state_m = to_state_v.model
# Gather necessary information to create transition
from_state_id = from_state_m.state.state_id
to_state_id = to_state_m.state.state_id
responsible_parent_m = None
# Start transition
if isinstance(from_port, IncomeView):
from_state_id = None
from_outcome_id = None
responsible_parent_m = from_state_m
# Transition from parent income to child income
if isinstance(to_port, IncomeView):
to_outcome_id = None
# Transition from parent income to parent outcome
elif isinstance(to_port, OutcomeView):
to_outcome_id = to_port.outcome_id
elif isinstance(from_port, OutcomeView):
from_outcome_id = from_port.outcome_id
# Transition from child outcome to child income
if isinstance(to_port, IncomeView):
responsible_parent_m = from_state_m.parent
to_outcome_id = None
# Transition from child outcome to parent outcome
elif isinstance(to_port, OutcomeView):
responsible_parent_m = to_state_m
to_outcome_id = to_port.outcome_id
else:
raise ValueError("Invalid port type")
from rafcon.gui.models.container_state import ContainerStateModel
if not responsible_parent_m:
logger.error("Transitions only exist between incomes and outcomes. Given: {0} and {1}".format(type(
from_port), type(to_port)))
return False
elif not isinstance(responsible_parent_m, ContainerStateModel):
logger.error("Transitions only exist in container states (e.g. hierarchy states)")
return False
try:
t_id = responsible_parent_m.state.add_transition(from_state_id, from_outcome_id, to_state_id, to_outcome_id)
if from_state_id == to_state_id:
gui_helper_meta_data.insert_self_transition_meta_data(responsible_parent_m.states[from_state_id], t_id,
combined_action=True)
return True
except (ValueError, AttributeError, TypeError) as e:
logger.error("Transition couldn't be added: {0}".format(e))
return False | def function[add_transition_to_state, parameter[from_port, to_port]]:
constant[Interface method between Gaphas and RAFCON core for adding transitions
The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary
parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports.
:param from_port: Port from which the transition starts
:param to_port: Port to which the transition goes to
:return: True if a transition was added, False if an error occurred
]
from relative_module[rafcon.gui.mygaphas.items.ports] import module[IncomeView], module[OutcomeView]
variable[from_state_v] assign[=] name[from_port].parent
variable[to_state_v] assign[=] name[to_port].parent
variable[from_state_m] assign[=] name[from_state_v].model
variable[to_state_m] assign[=] name[to_state_v].model
variable[from_state_id] assign[=] name[from_state_m].state.state_id
variable[to_state_id] assign[=] name[to_state_m].state.state_id
variable[responsible_parent_m] assign[=] constant[None]
if call[name[isinstance], parameter[name[from_port], name[IncomeView]]] begin[:]
variable[from_state_id] assign[=] constant[None]
variable[from_outcome_id] assign[=] constant[None]
variable[responsible_parent_m] assign[=] name[from_state_m]
if call[name[isinstance], parameter[name[to_port], name[IncomeView]]] begin[:]
variable[to_outcome_id] assign[=] constant[None]
from relative_module[rafcon.gui.models.container_state] import module[ContainerStateModel]
if <ast.UnaryOp object at 0x7da18bc70a60> begin[:]
call[name[logger].error, parameter[call[constant[Transitions only exist between incomes and outcomes. Given: {0} and {1}].format, parameter[call[name[type], parameter[name[from_port]]], call[name[type], parameter[name[to_port]]]]]]]
return[constant[False]]
<ast.Try object at 0x7da18bc71cc0> | keyword[def] identifier[add_transition_to_state] ( identifier[from_port] , identifier[to_port] ):
literal[string]
keyword[from] identifier[rafcon] . identifier[gui] . identifier[mygaphas] . identifier[items] . identifier[ports] keyword[import] identifier[IncomeView] , identifier[OutcomeView]
identifier[from_state_v] = identifier[from_port] . identifier[parent]
identifier[to_state_v] = identifier[to_port] . identifier[parent]
identifier[from_state_m] = identifier[from_state_v] . identifier[model]
identifier[to_state_m] = identifier[to_state_v] . identifier[model]
identifier[from_state_id] = identifier[from_state_m] . identifier[state] . identifier[state_id]
identifier[to_state_id] = identifier[to_state_m] . identifier[state] . identifier[state_id]
identifier[responsible_parent_m] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[from_port] , identifier[IncomeView] ):
identifier[from_state_id] = keyword[None]
identifier[from_outcome_id] = keyword[None]
identifier[responsible_parent_m] = identifier[from_state_m]
keyword[if] identifier[isinstance] ( identifier[to_port] , identifier[IncomeView] ):
identifier[to_outcome_id] = keyword[None]
keyword[elif] identifier[isinstance] ( identifier[to_port] , identifier[OutcomeView] ):
identifier[to_outcome_id] = identifier[to_port] . identifier[outcome_id]
keyword[elif] identifier[isinstance] ( identifier[from_port] , identifier[OutcomeView] ):
identifier[from_outcome_id] = identifier[from_port] . identifier[outcome_id]
keyword[if] identifier[isinstance] ( identifier[to_port] , identifier[IncomeView] ):
identifier[responsible_parent_m] = identifier[from_state_m] . identifier[parent]
identifier[to_outcome_id] = keyword[None]
keyword[elif] identifier[isinstance] ( identifier[to_port] , identifier[OutcomeView] ):
identifier[responsible_parent_m] = identifier[to_state_m]
identifier[to_outcome_id] = identifier[to_port] . identifier[outcome_id]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[from] identifier[rafcon] . identifier[gui] . identifier[models] . identifier[container_state] keyword[import] identifier[ContainerStateModel]
keyword[if] keyword[not] identifier[responsible_parent_m] :
identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[type] (
identifier[from_port] ), identifier[type] ( identifier[to_port] )))
keyword[return] keyword[False]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[responsible_parent_m] , identifier[ContainerStateModel] ):
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] keyword[False]
keyword[try] :
identifier[t_id] = identifier[responsible_parent_m] . identifier[state] . identifier[add_transition] ( identifier[from_state_id] , identifier[from_outcome_id] , identifier[to_state_id] , identifier[to_outcome_id] )
keyword[if] identifier[from_state_id] == identifier[to_state_id] :
identifier[gui_helper_meta_data] . identifier[insert_self_transition_meta_data] ( identifier[responsible_parent_m] . identifier[states] [ identifier[from_state_id] ], identifier[t_id] ,
identifier[combined_action] = keyword[True] )
keyword[return] keyword[True]
keyword[except] ( identifier[ValueError] , identifier[AttributeError] , identifier[TypeError] ) keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[return] keyword[False] | def add_transition_to_state(from_port, to_port):
"""Interface method between Gaphas and RAFCON core for adding transitions
The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary
parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports.
:param from_port: Port from which the transition starts
:param to_port: Port to which the transition goes to
:return: True if a transition was added, False if an error occurred
"""
from rafcon.gui.mygaphas.items.ports import IncomeView, OutcomeView
from_state_v = from_port.parent
to_state_v = to_port.parent
from_state_m = from_state_v.model
to_state_m = to_state_v.model
# Gather necessary information to create transition
from_state_id = from_state_m.state.state_id
to_state_id = to_state_m.state.state_id
responsible_parent_m = None
# Start transition
if isinstance(from_port, IncomeView):
from_state_id = None
from_outcome_id = None
responsible_parent_m = from_state_m
# Transition from parent income to child income
if isinstance(to_port, IncomeView):
to_outcome_id = None # depends on [control=['if'], data=[]]
# Transition from parent income to parent outcome
elif isinstance(to_port, OutcomeView):
to_outcome_id = to_port.outcome_id # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(from_port, OutcomeView):
from_outcome_id = from_port.outcome_id
# Transition from child outcome to child income
if isinstance(to_port, IncomeView):
responsible_parent_m = from_state_m.parent
to_outcome_id = None # depends on [control=['if'], data=[]]
# Transition from child outcome to parent outcome
elif isinstance(to_port, OutcomeView):
responsible_parent_m = to_state_m
to_outcome_id = to_port.outcome_id # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid port type')
from rafcon.gui.models.container_state import ContainerStateModel
if not responsible_parent_m:
logger.error('Transitions only exist between incomes and outcomes. Given: {0} and {1}'.format(type(from_port), type(to_port)))
return False # depends on [control=['if'], data=[]]
elif not isinstance(responsible_parent_m, ContainerStateModel):
logger.error('Transitions only exist in container states (e.g. hierarchy states)')
return False # depends on [control=['if'], data=[]]
try:
t_id = responsible_parent_m.state.add_transition(from_state_id, from_outcome_id, to_state_id, to_outcome_id)
if from_state_id == to_state_id:
gui_helper_meta_data.insert_self_transition_meta_data(responsible_parent_m.states[from_state_id], t_id, combined_action=True) # depends on [control=['if'], data=['from_state_id']]
return True # depends on [control=['try'], data=[]]
except (ValueError, AttributeError, TypeError) as e:
logger.error("Transition couldn't be added: {0}".format(e))
return False # depends on [control=['except'], data=['e']] |
def __make_scubadir(self):
'''Make temp directory where all ancillary files are bind-mounted
'''
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath) | def function[__make_scubadir, parameter[self]]:
constant[Make temp directory where all ancillary files are bind-mounted
]
name[self].__scubadir_hostpath assign[=] call[name[tempfile].mkdtemp, parameter[]]
name[self].__scubadir_contpath assign[=] constant[/.scuba]
call[name[self].add_volume, parameter[name[self].__scubadir_hostpath, name[self].__scubadir_contpath]] | keyword[def] identifier[__make_scubadir] ( identifier[self] ):
literal[string]
identifier[self] . identifier[__scubadir_hostpath] = identifier[tempfile] . identifier[mkdtemp] ( identifier[prefix] = literal[string] )
identifier[self] . identifier[__scubadir_contpath] = literal[string]
identifier[self] . identifier[add_volume] ( identifier[self] . identifier[__scubadir_hostpath] , identifier[self] . identifier[__scubadir_contpath] ) | def __make_scubadir(self):
"""Make temp directory where all ancillary files are bind-mounted
"""
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath) |
def _assess_resource_warnings(self, process, vals):
"""Assess whether the cpu load or memory usage is above the allocation
Parameters
----------
process : str
Process name
vals : vals
List of trace information for each tag of that process
Returns
-------
cpu_warnings : dict
Keys are tags and values are the excessive cpu load
mem_warnings : dict
Keys are tags and values are the excessive rss
"""
cpu_warnings = {}
mem_warnings = {}
for i in vals:
try:
expected_load = float(i["cpus"]) * 100
cpu_load = float(i["%cpu"].replace(",", ".").replace("%", ""))
if expected_load * 0.9 > cpu_load > expected_load * 1.10:
cpu_warnings[i["tag"]] = {
"expected": expected_load,
"value": cpu_load
}
except (ValueError, KeyError):
pass
try:
rss = self._size_coverter(i["rss"])
mem_allocated = self._size_coverter(i["memory"])
if rss > mem_allocated * 1.10:
mem_warnings[i["tag"]] = {
"expected": mem_allocated,
"value": rss
}
except (ValueError, KeyError):
pass
return cpu_warnings, mem_warnings | def function[_assess_resource_warnings, parameter[self, process, vals]]:
constant[Assess whether the cpu load or memory usage is above the allocation
Parameters
----------
process : str
Process name
vals : vals
List of trace information for each tag of that process
Returns
-------
cpu_warnings : dict
Keys are tags and values are the excessive cpu load
mem_warnings : dict
Keys are tags and values are the excessive rss
]
variable[cpu_warnings] assign[=] dictionary[[], []]
variable[mem_warnings] assign[=] dictionary[[], []]
for taget[name[i]] in starred[name[vals]] begin[:]
<ast.Try object at 0x7da1b02db1c0>
<ast.Try object at 0x7da1b02d8550>
return[tuple[[<ast.Name object at 0x7da1b02dba30>, <ast.Name object at 0x7da1b02dad70>]]] | keyword[def] identifier[_assess_resource_warnings] ( identifier[self] , identifier[process] , identifier[vals] ):
literal[string]
identifier[cpu_warnings] ={}
identifier[mem_warnings] ={}
keyword[for] identifier[i] keyword[in] identifier[vals] :
keyword[try] :
identifier[expected_load] = identifier[float] ( identifier[i] [ literal[string] ])* literal[int]
identifier[cpu_load] = identifier[float] ( identifier[i] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ))
keyword[if] identifier[expected_load] * literal[int] > identifier[cpu_load] > identifier[expected_load] * literal[int] :
identifier[cpu_warnings] [ identifier[i] [ literal[string] ]]={
literal[string] : identifier[expected_load] ,
literal[string] : identifier[cpu_load]
}
keyword[except] ( identifier[ValueError] , identifier[KeyError] ):
keyword[pass]
keyword[try] :
identifier[rss] = identifier[self] . identifier[_size_coverter] ( identifier[i] [ literal[string] ])
identifier[mem_allocated] = identifier[self] . identifier[_size_coverter] ( identifier[i] [ literal[string] ])
keyword[if] identifier[rss] > identifier[mem_allocated] * literal[int] :
identifier[mem_warnings] [ identifier[i] [ literal[string] ]]={
literal[string] : identifier[mem_allocated] ,
literal[string] : identifier[rss]
}
keyword[except] ( identifier[ValueError] , identifier[KeyError] ):
keyword[pass]
keyword[return] identifier[cpu_warnings] , identifier[mem_warnings] | def _assess_resource_warnings(self, process, vals):
"""Assess whether the cpu load or memory usage is above the allocation
Parameters
----------
process : str
Process name
vals : vals
List of trace information for each tag of that process
Returns
-------
cpu_warnings : dict
Keys are tags and values are the excessive cpu load
mem_warnings : dict
Keys are tags and values are the excessive rss
"""
cpu_warnings = {}
mem_warnings = {}
for i in vals:
try:
expected_load = float(i['cpus']) * 100
cpu_load = float(i['%cpu'].replace(',', '.').replace('%', ''))
if expected_load * 0.9 > cpu_load > expected_load * 1.1:
cpu_warnings[i['tag']] = {'expected': expected_load, 'value': cpu_load} # depends on [control=['if'], data=['cpu_load']] # depends on [control=['try'], data=[]]
except (ValueError, KeyError):
pass # depends on [control=['except'], data=[]]
try:
rss = self._size_coverter(i['rss'])
mem_allocated = self._size_coverter(i['memory'])
if rss > mem_allocated * 1.1:
mem_warnings[i['tag']] = {'expected': mem_allocated, 'value': rss} # depends on [control=['if'], data=['rss']] # depends on [control=['try'], data=[]]
except (ValueError, KeyError):
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['i']]
return (cpu_warnings, mem_warnings) |
def _load_idp_dynamic_entity_id(self, state):
"""
Loads an idp server with the entity id saved in state
:type state: satosa.state.State
:rtype: saml.server.Server
:param state: The current state
:return: An idp server
"""
# Change the idp entity id dynamically
idp_config_file = copy.deepcopy(self.idp_config)
idp_config_file["entityid"] = "{}/{}".format(self.idp_config["entityid"], state[self.name]["target_entity_id"])
idp_config = IdPConfig().load(idp_config_file, metadata_construction=False)
return Server(config=idp_config) | def function[_load_idp_dynamic_entity_id, parameter[self, state]]:
constant[
Loads an idp server with the entity id saved in state
:type state: satosa.state.State
:rtype: saml.server.Server
:param state: The current state
:return: An idp server
]
variable[idp_config_file] assign[=] call[name[copy].deepcopy, parameter[name[self].idp_config]]
call[name[idp_config_file]][constant[entityid]] assign[=] call[constant[{}/{}].format, parameter[call[name[self].idp_config][constant[entityid]], call[call[name[state]][name[self].name]][constant[target_entity_id]]]]
variable[idp_config] assign[=] call[call[name[IdPConfig], parameter[]].load, parameter[name[idp_config_file]]]
return[call[name[Server], parameter[]]] | keyword[def] identifier[_load_idp_dynamic_entity_id] ( identifier[self] , identifier[state] ):
literal[string]
identifier[idp_config_file] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[idp_config] )
identifier[idp_config_file] [ literal[string] ]= literal[string] . identifier[format] ( identifier[self] . identifier[idp_config] [ literal[string] ], identifier[state] [ identifier[self] . identifier[name] ][ literal[string] ])
identifier[idp_config] = identifier[IdPConfig] (). identifier[load] ( identifier[idp_config_file] , identifier[metadata_construction] = keyword[False] )
keyword[return] identifier[Server] ( identifier[config] = identifier[idp_config] ) | def _load_idp_dynamic_entity_id(self, state):
"""
Loads an idp server with the entity id saved in state
:type state: satosa.state.State
:rtype: saml.server.Server
:param state: The current state
:return: An idp server
"""
# Change the idp entity id dynamically
idp_config_file = copy.deepcopy(self.idp_config)
idp_config_file['entityid'] = '{}/{}'.format(self.idp_config['entityid'], state[self.name]['target_entity_id'])
idp_config = IdPConfig().load(idp_config_file, metadata_construction=False)
return Server(config=idp_config) |
def process_request():
"""
Retrieve a CameraStatus, Event or FileRecord from the request, based on the supplied type and ID. If the type is
'cached_request' then the ID must be specified in 'cached_request_id' - if this ID is not for an entity in the
cache this method will return None and clear the cache (this should only happen under conditions where we've
failed to correctly handle caching, such as a server restart or under extreme load, but will result in the
server having to re-request a previous value from the exporting party).
:return:
A dict containing 'entity' - the entity for this request or None if there was an issue causing an unexpected
cache miss, and 'entity-id' which will be the UUID of the entity requested.
The entity corresponding to this request, or None if we had an issue and there was an unexpected cache miss.
"""
g.request_dict = safe_load(request.get_data())
entity_type = g.request_dict['type']
entity_id = g.request_dict[entity_type]['id']
ImportRequest.logger.debug("Received request, type={0}, id={1}".format(entity_type, entity_id))
entity = ImportRequest._get_entity(entity_id)
ImportRequest.logger.debug("Entity with id={0} was {1}".format(entity_id, entity))
return ImportRequest(entity=entity, entity_id=entity_id) | def function[process_request, parameter[]]:
constant[
Retrieve a CameraStatus, Event or FileRecord from the request, based on the supplied type and ID. If the type is
'cached_request' then the ID must be specified in 'cached_request_id' - if this ID is not for an entity in the
cache this method will return None and clear the cache (this should only happen under conditions where we've
failed to correctly handle caching, such as a server restart or under extreme load, but will result in the
server having to re-request a previous value from the exporting party).
:return:
A dict containing 'entity' - the entity for this request or None if there was an issue causing an unexpected
cache miss, and 'entity-id' which will be the UUID of the entity requested.
The entity corresponding to this request, or None if we had an issue and there was an unexpected cache miss.
]
name[g].request_dict assign[=] call[name[safe_load], parameter[call[name[request].get_data, parameter[]]]]
variable[entity_type] assign[=] call[name[g].request_dict][constant[type]]
variable[entity_id] assign[=] call[call[name[g].request_dict][name[entity_type]]][constant[id]]
call[name[ImportRequest].logger.debug, parameter[call[constant[Received request, type={0}, id={1}].format, parameter[name[entity_type], name[entity_id]]]]]
variable[entity] assign[=] call[name[ImportRequest]._get_entity, parameter[name[entity_id]]]
call[name[ImportRequest].logger.debug, parameter[call[constant[Entity with id={0} was {1}].format, parameter[name[entity_id], name[entity]]]]]
return[call[name[ImportRequest], parameter[]]] | keyword[def] identifier[process_request] ():
literal[string]
identifier[g] . identifier[request_dict] = identifier[safe_load] ( identifier[request] . identifier[get_data] ())
identifier[entity_type] = identifier[g] . identifier[request_dict] [ literal[string] ]
identifier[entity_id] = identifier[g] . identifier[request_dict] [ identifier[entity_type] ][ literal[string] ]
identifier[ImportRequest] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[entity_type] , identifier[entity_id] ))
identifier[entity] = identifier[ImportRequest] . identifier[_get_entity] ( identifier[entity_id] )
identifier[ImportRequest] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[entity_id] , identifier[entity] ))
keyword[return] identifier[ImportRequest] ( identifier[entity] = identifier[entity] , identifier[entity_id] = identifier[entity_id] ) | def process_request():
"""
Retrieve a CameraStatus, Event or FileRecord from the request, based on the supplied type and ID. If the type is
'cached_request' then the ID must be specified in 'cached_request_id' - if this ID is not for an entity in the
cache this method will return None and clear the cache (this should only happen under conditions where we've
failed to correctly handle caching, such as a server restart or under extreme load, but will result in the
server having to re-request a previous value from the exporting party).
:return:
A dict containing 'entity' - the entity for this request or None if there was an issue causing an unexpected
cache miss, and 'entity-id' which will be the UUID of the entity requested.
The entity corresponding to this request, or None if we had an issue and there was an unexpected cache miss.
"""
g.request_dict = safe_load(request.get_data())
entity_type = g.request_dict['type']
entity_id = g.request_dict[entity_type]['id']
ImportRequest.logger.debug('Received request, type={0}, id={1}'.format(entity_type, entity_id))
entity = ImportRequest._get_entity(entity_id)
ImportRequest.logger.debug('Entity with id={0} was {1}'.format(entity_id, entity))
return ImportRequest(entity=entity, entity_id=entity_id) |
def append(self, electrode_id):
'''
Append the specified electrode to the route.
The route is not modified (i.e., electrode is not appended) if
electrode is not connected to the last electrode in the existing route.
Parameters
----------
electrode_id : str
Electrode identifier.
'''
do_append = False
if not self.electrode_ids:
do_append = True
elif self.device.shape_indexes.shape[0] > 0:
source = self.electrode_ids[-1]
target = electrode_id
if not (source == target):
source_id, target_id = self.device.shape_indexes[[source,
target]]
try:
if self.device.adjacency_matrix[source_id, target_id]:
# Electrodes are connected, so append target to current
# route.
do_append = True
except IndexError:
logger.warning('Electrodes `%s` and `%s` are not '
'connected.', source, target)
if do_append:
self.electrode_ids.append(electrode_id)
return do_append | def function[append, parameter[self, electrode_id]]:
constant[
Append the specified electrode to the route.
The route is not modified (i.e., electrode is not appended) if
electrode is not connected to the last electrode in the existing route.
Parameters
----------
electrode_id : str
Electrode identifier.
]
variable[do_append] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da1b2716a40> begin[:]
variable[do_append] assign[=] constant[True]
if name[do_append] begin[:]
call[name[self].electrode_ids.append, parameter[name[electrode_id]]]
return[name[do_append]] | keyword[def] identifier[append] ( identifier[self] , identifier[electrode_id] ):
literal[string]
identifier[do_append] = keyword[False]
keyword[if] keyword[not] identifier[self] . identifier[electrode_ids] :
identifier[do_append] = keyword[True]
keyword[elif] identifier[self] . identifier[device] . identifier[shape_indexes] . identifier[shape] [ literal[int] ]> literal[int] :
identifier[source] = identifier[self] . identifier[electrode_ids] [- literal[int] ]
identifier[target] = identifier[electrode_id]
keyword[if] keyword[not] ( identifier[source] == identifier[target] ):
identifier[source_id] , identifier[target_id] = identifier[self] . identifier[device] . identifier[shape_indexes] [[ identifier[source] ,
identifier[target] ]]
keyword[try] :
keyword[if] identifier[self] . identifier[device] . identifier[adjacency_matrix] [ identifier[source_id] , identifier[target_id] ]:
identifier[do_append] = keyword[True]
keyword[except] identifier[IndexError] :
identifier[logger] . identifier[warning] ( literal[string]
literal[string] , identifier[source] , identifier[target] )
keyword[if] identifier[do_append] :
identifier[self] . identifier[electrode_ids] . identifier[append] ( identifier[electrode_id] )
keyword[return] identifier[do_append] | def append(self, electrode_id):
"""
Append the specified electrode to the route.
The route is not modified (i.e., electrode is not appended) if
electrode is not connected to the last electrode in the existing route.
Parameters
----------
electrode_id : str
Electrode identifier.
"""
do_append = False
if not self.electrode_ids:
do_append = True # depends on [control=['if'], data=[]]
elif self.device.shape_indexes.shape[0] > 0:
source = self.electrode_ids[-1]
target = electrode_id
if not source == target:
(source_id, target_id) = self.device.shape_indexes[[source, target]]
try:
if self.device.adjacency_matrix[source_id, target_id]:
# Electrodes are connected, so append target to current
# route.
do_append = True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except IndexError:
logger.warning('Electrodes `%s` and `%s` are not connected.', source, target) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if do_append:
self.electrode_ids.append(electrode_id) # depends on [control=['if'], data=[]]
return do_append |
def submit_audit(self, item_list):
"""
将第三方提交的代码包提交审核
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4
:param item_list: 提交审核项的一个列表(至少填写1项,至多填写5项)
:type item_list: list[dict]
:return: 审核编号
:rtype: int
"""
return self._post(
'wxa/submit_audit',
data={
'item_list': item_list,
},
result_processor=lambda x: x['auditid'],
) | def function[submit_audit, parameter[self, item_list]]:
constant[
将第三方提交的代码包提交审核
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4
:param item_list: 提交审核项的一个列表(至少填写1项,至多填写5项)
:type item_list: list[dict]
:return: 审核编号
:rtype: int
]
return[call[name[self]._post, parameter[constant[wxa/submit_audit]]]] | keyword[def] identifier[submit_audit] ( identifier[self] , identifier[item_list] ):
literal[string]
keyword[return] identifier[self] . identifier[_post] (
literal[string] ,
identifier[data] ={
literal[string] : identifier[item_list] ,
},
identifier[result_processor] = keyword[lambda] identifier[x] : identifier[x] [ literal[string] ],
) | def submit_audit(self, item_list):
"""
将第三方提交的代码包提交审核
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4
:param item_list: 提交审核项的一个列表(至少填写1项,至多填写5项)
:type item_list: list[dict]
:return: 审核编号
:rtype: int
"""
return self._post('wxa/submit_audit', data={'item_list': item_list}, result_processor=lambda x: x['auditid']) |
def get_tile_images_by_rect(self, rect):
""" Speed up data access
More efficient because data is accessed and cached locally
"""
def rev(seq, start, stop):
if start < 0:
start = 0
return enumerate(seq[start:stop + 1], start)
x1, y1, x2, y2 = rect_to_bb(rect)
images = self.tmx.images
layers = self.tmx.layers
at = self._animated_tile
tracked_gids = self._tracked_gids
anim_map = self._animation_map
track = bool(self._animation_queue)
for l in self.tmx.visible_tile_layers:
for y, row in rev(layers[l].data, y1, y2):
for x, gid in [i for i in rev(row, x1, x2) if i[1]]:
# since the tile has been queried, assume it wants to be checked
# for animations sometime in the future
if track and gid in tracked_gids:
anim_map[gid].positions.add((x, y, l))
try:
# animated, so return the correct frame
yield x, y, l, at[(x, y, l)]
except KeyError:
# not animated, so return surface from data, if any
yield x, y, l, images[gid] | def function[get_tile_images_by_rect, parameter[self, rect]]:
constant[ Speed up data access
More efficient because data is accessed and cached locally
]
def function[rev, parameter[seq, start, stop]]:
if compare[name[start] less[<] constant[0]] begin[:]
variable[start] assign[=] constant[0]
return[call[name[enumerate], parameter[call[name[seq]][<ast.Slice object at 0x7da18bcc9f60>], name[start]]]]
<ast.Tuple object at 0x7da18bccb160> assign[=] call[name[rect_to_bb], parameter[name[rect]]]
variable[images] assign[=] name[self].tmx.images
variable[layers] assign[=] name[self].tmx.layers
variable[at] assign[=] name[self]._animated_tile
variable[tracked_gids] assign[=] name[self]._tracked_gids
variable[anim_map] assign[=] name[self]._animation_map
variable[track] assign[=] call[name[bool], parameter[name[self]._animation_queue]]
for taget[name[l]] in starred[name[self].tmx.visible_tile_layers] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18bcc83d0>, <ast.Name object at 0x7da18bcca2f0>]]] in starred[call[name[rev], parameter[call[name[layers]][name[l]].data, name[y1], name[y2]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da204567760>, <ast.Name object at 0x7da204567c70>]]] in starred[<ast.ListComp object at 0x7da204564820>] begin[:]
if <ast.BoolOp object at 0x7da2045663e0> begin[:]
call[call[name[anim_map]][name[gid]].positions.add, parameter[tuple[[<ast.Name object at 0x7da18dc06ec0>, <ast.Name object at 0x7da18dc06680>, <ast.Name object at 0x7da18dc06a40>]]]]
<ast.Try object at 0x7da18dc05fc0> | keyword[def] identifier[get_tile_images_by_rect] ( identifier[self] , identifier[rect] ):
literal[string]
keyword[def] identifier[rev] ( identifier[seq] , identifier[start] , identifier[stop] ):
keyword[if] identifier[start] < literal[int] :
identifier[start] = literal[int]
keyword[return] identifier[enumerate] ( identifier[seq] [ identifier[start] : identifier[stop] + literal[int] ], identifier[start] )
identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] = identifier[rect_to_bb] ( identifier[rect] )
identifier[images] = identifier[self] . identifier[tmx] . identifier[images]
identifier[layers] = identifier[self] . identifier[tmx] . identifier[layers]
identifier[at] = identifier[self] . identifier[_animated_tile]
identifier[tracked_gids] = identifier[self] . identifier[_tracked_gids]
identifier[anim_map] = identifier[self] . identifier[_animation_map]
identifier[track] = identifier[bool] ( identifier[self] . identifier[_animation_queue] )
keyword[for] identifier[l] keyword[in] identifier[self] . identifier[tmx] . identifier[visible_tile_layers] :
keyword[for] identifier[y] , identifier[row] keyword[in] identifier[rev] ( identifier[layers] [ identifier[l] ]. identifier[data] , identifier[y1] , identifier[y2] ):
keyword[for] identifier[x] , identifier[gid] keyword[in] [ identifier[i] keyword[for] identifier[i] keyword[in] identifier[rev] ( identifier[row] , identifier[x1] , identifier[x2] ) keyword[if] identifier[i] [ literal[int] ]]:
keyword[if] identifier[track] keyword[and] identifier[gid] keyword[in] identifier[tracked_gids] :
identifier[anim_map] [ identifier[gid] ]. identifier[positions] . identifier[add] (( identifier[x] , identifier[y] , identifier[l] ))
keyword[try] :
keyword[yield] identifier[x] , identifier[y] , identifier[l] , identifier[at] [( identifier[x] , identifier[y] , identifier[l] )]
keyword[except] identifier[KeyError] :
keyword[yield] identifier[x] , identifier[y] , identifier[l] , identifier[images] [ identifier[gid] ] | def get_tile_images_by_rect(self, rect):
""" Speed up data access
More efficient because data is accessed and cached locally
"""
def rev(seq, start, stop):
if start < 0:
start = 0 # depends on [control=['if'], data=['start']]
return enumerate(seq[start:stop + 1], start)
(x1, y1, x2, y2) = rect_to_bb(rect)
images = self.tmx.images
layers = self.tmx.layers
at = self._animated_tile
tracked_gids = self._tracked_gids
anim_map = self._animation_map
track = bool(self._animation_queue)
for l in self.tmx.visible_tile_layers:
for (y, row) in rev(layers[l].data, y1, y2):
for (x, gid) in [i for i in rev(row, x1, x2) if i[1]]:
# since the tile has been queried, assume it wants to be checked
# for animations sometime in the future
if track and gid in tracked_gids:
anim_map[gid].positions.add((x, y, l)) # depends on [control=['if'], data=[]]
try:
# animated, so return the correct frame
yield (x, y, l, at[x, y, l]) # depends on [control=['try'], data=[]]
except KeyError:
# not animated, so return surface from data, if any
yield (x, y, l, images[gid]) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['l']] |
def _load_params_of(self, effect):
"""
Called only when a effect has created
Param changes calls :meth:`~pluginsmanager.observer.host_observer.host_observer.HostObserver.on_param_value_changed()`
"""
for param in effect.params:
if param.value != param.default:
self._set_param_value(param) | def function[_load_params_of, parameter[self, effect]]:
constant[
Called only when a effect has created
Param changes calls :meth:`~pluginsmanager.observer.host_observer.host_observer.HostObserver.on_param_value_changed()`
]
for taget[name[param]] in starred[name[effect].params] begin[:]
if compare[name[param].value not_equal[!=] name[param].default] begin[:]
call[name[self]._set_param_value, parameter[name[param]]] | keyword[def] identifier[_load_params_of] ( identifier[self] , identifier[effect] ):
literal[string]
keyword[for] identifier[param] keyword[in] identifier[effect] . identifier[params] :
keyword[if] identifier[param] . identifier[value] != identifier[param] . identifier[default] :
identifier[self] . identifier[_set_param_value] ( identifier[param] ) | def _load_params_of(self, effect):
"""
Called only when a effect has created
Param changes calls :meth:`~pluginsmanager.observer.host_observer.host_observer.HostObserver.on_param_value_changed()`
"""
for param in effect.params:
if param.value != param.default:
self._set_param_value(param) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['param']] |
def rename(*args):
"""
Rename OLD_FILENAME to NEW_FILENAME or move OLD_FILENAME(s) to NEW_DIRECTORY
The rename and move commands are the same.
Files on disk, including unfollowed files, are moved, if they are not already in the new location.
"""
parser = argparse.ArgumentParser(prog="%s %s" % (__package__, rename.__name__), description=rename.__doc__)
parser.add_argument('OLD_FILENAME', help="old file name", nargs='+')
parser.add_argument('NEW_FILENAME', help="new file name", nargs=1)
args = parser.parse_args(args)
def _rename(config, old_name, new_name):
old_path = os.path.realpath(old_name)
old_key = os.path.relpath(old_path, config.root)
new_path = os.path.realpath(new_name)
new_key = os.path.relpath(new_path, config.root)
if old_key not in config['files']:
yield "Could not rename '%s', it is not being tracked" % old_name
elif new_key in config['files']:
yield "Could not rename '%s' to '%s', '%s' is already being tracked" % (old_name, new_name, new_name)
elif os.access(old_path, os.W_OK|os.R_OK) and os.access(new_path, os.W_OK|os.R_OK):
yield "Could not rename '%s' to '%s', both files already exist" % (old_name, new_name)
elif not os.access(old_path, os.W_OK|os.R_OK) and not os.access(new_path, os.W_OK|os.R_OK):
yield "Could not rename '%s' to '%s', neither file exists" % (old_name, new_name)
else:
new_sha = _file_key(new_key)
os.rename(os.path.join(config.directory, config['files'][old_key]), os.path.join(config.directory, new_sha))
config['files'][new_key] = new_sha
del config['files'][old_key]
if os.access(old_path, os.W_OK|os.R_OK):
os.rename(old_path, new_path)
config = FragmentsConfig()
dest_path = os.path.relpath(args.NEW_FILENAME[0])
dest_isdir = os.path.isdir(dest_path)
if len(args.OLD_FILENAME) > 1 and not dest_isdir:
yield "Could not rename multiple files, '%s' is not a directory." % os.path.relpath(dest_path)
return
for src_path in args.OLD_FILENAME:
if os.path.isdir(src_path):
old_names = list(_iterate_over_files([src_path], config, statuses='MDA '))
if os.access(dest_path, os.R_OK):
os.rename(src_path, os.path.join(dest_path, src_path))
for s, path in old_names:
old_name = os.path.relpath(path)
new_name = os.path.join(dest_path, old_name)
for y in _rename(config, old_name, new_name):
yield y
else:
os.rename(src_path, dest_path)
for s, path in old_names:
old_name = os.path.relpath(path)
new_name = os.path.join(dest_path, old_name[len(src_path)+1:])
for y in _rename(config, old_name, new_name):
yield y
else:
old_name = os.path.relpath(src_path)
if dest_isdir:
new_name = os.path.join(dest_path, os.path.basename(src_path))
else:
new_name = dest_path
for y in _rename(config, old_name, new_name):
yield y
config.dump() | def function[rename, parameter[]]:
constant[
Rename OLD_FILENAME to NEW_FILENAME or move OLD_FILENAME(s) to NEW_DIRECTORY
The rename and move commands are the same.
Files on disk, including unfollowed files, are moved, if they are not already in the new location.
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[OLD_FILENAME]]]
call[name[parser].add_argument, parameter[constant[NEW_FILENAME]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[args]]]
def function[_rename, parameter[config, old_name, new_name]]:
variable[old_path] assign[=] call[name[os].path.realpath, parameter[name[old_name]]]
variable[old_key] assign[=] call[name[os].path.relpath, parameter[name[old_path], name[config].root]]
variable[new_path] assign[=] call[name[os].path.realpath, parameter[name[new_name]]]
variable[new_key] assign[=] call[name[os].path.relpath, parameter[name[new_path], name[config].root]]
if compare[name[old_key] <ast.NotIn object at 0x7da2590d7190> call[name[config]][constant[files]]] begin[:]
<ast.Yield object at 0x7da1b086ec50>
variable[config] assign[=] call[name[FragmentsConfig], parameter[]]
variable[dest_path] assign[=] call[name[os].path.relpath, parameter[call[name[args].NEW_FILENAME][constant[0]]]]
variable[dest_isdir] assign[=] call[name[os].path.isdir, parameter[name[dest_path]]]
if <ast.BoolOp object at 0x7da1b086db40> begin[:]
<ast.Yield object at 0x7da1b086d7e0>
return[None]
for taget[name[src_path]] in starred[name[args].OLD_FILENAME] begin[:]
if call[name[os].path.isdir, parameter[name[src_path]]] begin[:]
variable[old_names] assign[=] call[name[list], parameter[call[name[_iterate_over_files], parameter[list[[<ast.Name object at 0x7da1b0804280>]], name[config]]]]]
if call[name[os].access, parameter[name[dest_path], name[os].R_OK]] begin[:]
call[name[os].rename, parameter[name[src_path], call[name[os].path.join, parameter[name[dest_path], name[src_path]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0804640>, <ast.Name object at 0x7da1b08040a0>]]] in starred[name[old_names]] begin[:]
variable[old_name] assign[=] call[name[os].path.relpath, parameter[name[path]]]
variable[new_name] assign[=] call[name[os].path.join, parameter[name[dest_path], name[old_name]]]
for taget[name[y]] in starred[call[name[_rename], parameter[name[config], name[old_name], name[new_name]]]] begin[:]
<ast.Yield object at 0x7da1b08318a0>
call[name[config].dump, parameter[]] | keyword[def] identifier[rename] (* identifier[args] ):
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[prog] = literal[string] %( identifier[__package__] , identifier[rename] . identifier[__name__] ), identifier[description] = identifier[rename] . identifier[__doc__] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] , identifier[nargs] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] , identifier[nargs] = literal[int] )
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[args] )
keyword[def] identifier[_rename] ( identifier[config] , identifier[old_name] , identifier[new_name] ):
identifier[old_path] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[old_name] )
identifier[old_key] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[old_path] , identifier[config] . identifier[root] )
identifier[new_path] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[new_name] )
identifier[new_key] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[new_path] , identifier[config] . identifier[root] )
keyword[if] identifier[old_key] keyword[not] keyword[in] identifier[config] [ literal[string] ]:
keyword[yield] literal[string] % identifier[old_name]
keyword[elif] identifier[new_key] keyword[in] identifier[config] [ literal[string] ]:
keyword[yield] literal[string] %( identifier[old_name] , identifier[new_name] , identifier[new_name] )
keyword[elif] identifier[os] . identifier[access] ( identifier[old_path] , identifier[os] . identifier[W_OK] | identifier[os] . identifier[R_OK] ) keyword[and] identifier[os] . identifier[access] ( identifier[new_path] , identifier[os] . identifier[W_OK] | identifier[os] . identifier[R_OK] ):
keyword[yield] literal[string] %( identifier[old_name] , identifier[new_name] )
keyword[elif] keyword[not] identifier[os] . identifier[access] ( identifier[old_path] , identifier[os] . identifier[W_OK] | identifier[os] . identifier[R_OK] ) keyword[and] keyword[not] identifier[os] . identifier[access] ( identifier[new_path] , identifier[os] . identifier[W_OK] | identifier[os] . identifier[R_OK] ):
keyword[yield] literal[string] %( identifier[old_name] , identifier[new_name] )
keyword[else] :
identifier[new_sha] = identifier[_file_key] ( identifier[new_key] )
identifier[os] . identifier[rename] ( identifier[os] . identifier[path] . identifier[join] ( identifier[config] . identifier[directory] , identifier[config] [ literal[string] ][ identifier[old_key] ]), identifier[os] . identifier[path] . identifier[join] ( identifier[config] . identifier[directory] , identifier[new_sha] ))
identifier[config] [ literal[string] ][ identifier[new_key] ]= identifier[new_sha]
keyword[del] identifier[config] [ literal[string] ][ identifier[old_key] ]
keyword[if] identifier[os] . identifier[access] ( identifier[old_path] , identifier[os] . identifier[W_OK] | identifier[os] . identifier[R_OK] ):
identifier[os] . identifier[rename] ( identifier[old_path] , identifier[new_path] )
identifier[config] = identifier[FragmentsConfig] ()
identifier[dest_path] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[args] . identifier[NEW_FILENAME] [ literal[int] ])
identifier[dest_isdir] = identifier[os] . identifier[path] . identifier[isdir] ( identifier[dest_path] )
keyword[if] identifier[len] ( identifier[args] . identifier[OLD_FILENAME] )> literal[int] keyword[and] keyword[not] identifier[dest_isdir] :
keyword[yield] literal[string] % identifier[os] . identifier[path] . identifier[relpath] ( identifier[dest_path] )
keyword[return]
keyword[for] identifier[src_path] keyword[in] identifier[args] . identifier[OLD_FILENAME] :
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[src_path] ):
identifier[old_names] = identifier[list] ( identifier[_iterate_over_files] ([ identifier[src_path] ], identifier[config] , identifier[statuses] = literal[string] ))
keyword[if] identifier[os] . identifier[access] ( identifier[dest_path] , identifier[os] . identifier[R_OK] ):
identifier[os] . identifier[rename] ( identifier[src_path] , identifier[os] . identifier[path] . identifier[join] ( identifier[dest_path] , identifier[src_path] ))
keyword[for] identifier[s] , identifier[path] keyword[in] identifier[old_names] :
identifier[old_name] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[path] )
identifier[new_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[dest_path] , identifier[old_name] )
keyword[for] identifier[y] keyword[in] identifier[_rename] ( identifier[config] , identifier[old_name] , identifier[new_name] ):
keyword[yield] identifier[y]
keyword[else] :
identifier[os] . identifier[rename] ( identifier[src_path] , identifier[dest_path] )
keyword[for] identifier[s] , identifier[path] keyword[in] identifier[old_names] :
identifier[old_name] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[path] )
identifier[new_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[dest_path] , identifier[old_name] [ identifier[len] ( identifier[src_path] )+ literal[int] :])
keyword[for] identifier[y] keyword[in] identifier[_rename] ( identifier[config] , identifier[old_name] , identifier[new_name] ):
keyword[yield] identifier[y]
keyword[else] :
identifier[old_name] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[src_path] )
keyword[if] identifier[dest_isdir] :
identifier[new_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[dest_path] , identifier[os] . identifier[path] . identifier[basename] ( identifier[src_path] ))
keyword[else] :
identifier[new_name] = identifier[dest_path]
keyword[for] identifier[y] keyword[in] identifier[_rename] ( identifier[config] , identifier[old_name] , identifier[new_name] ):
keyword[yield] identifier[y]
identifier[config] . identifier[dump] () | def rename(*args):
"""
Rename OLD_FILENAME to NEW_FILENAME or move OLD_FILENAME(s) to NEW_DIRECTORY
The rename and move commands are the same.
Files on disk, including unfollowed files, are moved, if they are not already in the new location.
"""
parser = argparse.ArgumentParser(prog='%s %s' % (__package__, rename.__name__), description=rename.__doc__)
parser.add_argument('OLD_FILENAME', help='old file name', nargs='+')
parser.add_argument('NEW_FILENAME', help='new file name', nargs=1)
args = parser.parse_args(args)
def _rename(config, old_name, new_name):
old_path = os.path.realpath(old_name)
old_key = os.path.relpath(old_path, config.root)
new_path = os.path.realpath(new_name)
new_key = os.path.relpath(new_path, config.root)
if old_key not in config['files']:
yield ("Could not rename '%s', it is not being tracked" % old_name) # depends on [control=['if'], data=[]]
elif new_key in config['files']:
yield ("Could not rename '%s' to '%s', '%s' is already being tracked" % (old_name, new_name, new_name)) # depends on [control=['if'], data=[]]
elif os.access(old_path, os.W_OK | os.R_OK) and os.access(new_path, os.W_OK | os.R_OK):
yield ("Could not rename '%s' to '%s', both files already exist" % (old_name, new_name)) # depends on [control=['if'], data=[]]
elif not os.access(old_path, os.W_OK | os.R_OK) and (not os.access(new_path, os.W_OK | os.R_OK)):
yield ("Could not rename '%s' to '%s', neither file exists" % (old_name, new_name)) # depends on [control=['if'], data=[]]
else:
new_sha = _file_key(new_key)
os.rename(os.path.join(config.directory, config['files'][old_key]), os.path.join(config.directory, new_sha))
config['files'][new_key] = new_sha
del config['files'][old_key]
if os.access(old_path, os.W_OK | os.R_OK):
os.rename(old_path, new_path) # depends on [control=['if'], data=[]]
config = FragmentsConfig()
dest_path = os.path.relpath(args.NEW_FILENAME[0])
dest_isdir = os.path.isdir(dest_path)
if len(args.OLD_FILENAME) > 1 and (not dest_isdir):
yield ("Could not rename multiple files, '%s' is not a directory." % os.path.relpath(dest_path))
return # depends on [control=['if'], data=[]]
for src_path in args.OLD_FILENAME:
if os.path.isdir(src_path):
old_names = list(_iterate_over_files([src_path], config, statuses='MDA '))
if os.access(dest_path, os.R_OK):
os.rename(src_path, os.path.join(dest_path, src_path))
for (s, path) in old_names:
old_name = os.path.relpath(path)
new_name = os.path.join(dest_path, old_name)
for y in _rename(config, old_name, new_name):
yield y # depends on [control=['for'], data=['y']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
os.rename(src_path, dest_path)
for (s, path) in old_names:
old_name = os.path.relpath(path)
new_name = os.path.join(dest_path, old_name[len(src_path) + 1:])
for y in _rename(config, old_name, new_name):
yield y # depends on [control=['for'], data=['y']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
old_name = os.path.relpath(src_path)
if dest_isdir:
new_name = os.path.join(dest_path, os.path.basename(src_path)) # depends on [control=['if'], data=[]]
else:
new_name = dest_path
for y in _rename(config, old_name, new_name):
yield y # depends on [control=['for'], data=['y']] # depends on [control=['for'], data=['src_path']]
config.dump() |
def shepherd(x, y, n_boot=200):
"""
Shepherd's Pi correlation, equivalent to Spearman's rho after outliers
removal.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
n_boot : int
Number of bootstrap samples to calculate.
Returns
-------
r : float
Pi correlation coefficient
pval : float
Two-tailed adjusted p-value.
outliers : array of bool
Indicate if value is an outlier or not
Notes
-----
It first bootstraps the Mahalanobis distances, removes all observations
with m >= 6 and finally calculates the correlation of the remaining data.
Pi is Spearman's Rho after outlier removal.
"""
from scipy.stats import spearmanr
X = np.column_stack((x, y))
# Bootstrapping on Mahalanobis distance
m = bsmahal(X, X, n_boot)
# Determine outliers
outliers = (m >= 6)
# Compute correlation
r, pval = spearmanr(x[~outliers], y[~outliers])
# (optional) double the p-value to achieve a nominal false alarm rate
# pval *= 2
# pval = 1 if pval > 1 else pval
return r, pval, outliers | def function[shepherd, parameter[x, y, n_boot]]:
constant[
Shepherd's Pi correlation, equivalent to Spearman's rho after outliers
removal.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
n_boot : int
Number of bootstrap samples to calculate.
Returns
-------
r : float
Pi correlation coefficient
pval : float
Two-tailed adjusted p-value.
outliers : array of bool
Indicate if value is an outlier or not
Notes
-----
It first bootstraps the Mahalanobis distances, removes all observations
with m >= 6 and finally calculates the correlation of the remaining data.
Pi is Spearman's Rho after outlier removal.
]
from relative_module[scipy.stats] import module[spearmanr]
variable[X] assign[=] call[name[np].column_stack, parameter[tuple[[<ast.Name object at 0x7da204565330>, <ast.Name object at 0x7da204566f80>]]]]
variable[m] assign[=] call[name[bsmahal], parameter[name[X], name[X], name[n_boot]]]
variable[outliers] assign[=] compare[name[m] greater_or_equal[>=] constant[6]]
<ast.Tuple object at 0x7da204567910> assign[=] call[name[spearmanr], parameter[call[name[x]][<ast.UnaryOp object at 0x7da204564b20>], call[name[y]][<ast.UnaryOp object at 0x7da204567430>]]]
return[tuple[[<ast.Name object at 0x7da204564760>, <ast.Name object at 0x7da204566140>, <ast.Name object at 0x7da204565ae0>]]] | keyword[def] identifier[shepherd] ( identifier[x] , identifier[y] , identifier[n_boot] = literal[int] ):
literal[string]
keyword[from] identifier[scipy] . identifier[stats] keyword[import] identifier[spearmanr]
identifier[X] = identifier[np] . identifier[column_stack] (( identifier[x] , identifier[y] ))
identifier[m] = identifier[bsmahal] ( identifier[X] , identifier[X] , identifier[n_boot] )
identifier[outliers] =( identifier[m] >= literal[int] )
identifier[r] , identifier[pval] = identifier[spearmanr] ( identifier[x] [~ identifier[outliers] ], identifier[y] [~ identifier[outliers] ])
keyword[return] identifier[r] , identifier[pval] , identifier[outliers] | def shepherd(x, y, n_boot=200):
"""
Shepherd's Pi correlation, equivalent to Spearman's rho after outliers
removal.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
n_boot : int
Number of bootstrap samples to calculate.
Returns
-------
r : float
Pi correlation coefficient
pval : float
Two-tailed adjusted p-value.
outliers : array of bool
Indicate if value is an outlier or not
Notes
-----
It first bootstraps the Mahalanobis distances, removes all observations
with m >= 6 and finally calculates the correlation of the remaining data.
Pi is Spearman's Rho after outlier removal.
"""
from scipy.stats import spearmanr
X = np.column_stack((x, y))
# Bootstrapping on Mahalanobis distance
m = bsmahal(X, X, n_boot)
# Determine outliers
outliers = m >= 6
# Compute correlation
(r, pval) = spearmanr(x[~outliers], y[~outliers])
# (optional) double the p-value to achieve a nominal false alarm rate
# pval *= 2
# pval = 1 if pval > 1 else pval
return (r, pval, outliers) |
def __init_configrs(self, rs_cfg):
"""Create and start a config replica set."""
# Use 'rs_id' to set the id for consistency, but need to rename
# to 'id' to use with ReplicaSets.create()
rs_cfg['id'] = rs_cfg.pop('rs_id', None)
for member in rs_cfg.setdefault('members', [{}]):
member['procParams'] = self._strip_auth(
member.get('procParams', {}))
member['procParams']['configsvr'] = True
if self.enable_ipv6:
common.enable_ipv6_single(member['procParams'])
rs_cfg['sslParams'] = self.sslParams
self._configsvrs.append(ReplicaSets().create(rs_cfg)) | def function[__init_configrs, parameter[self, rs_cfg]]:
constant[Create and start a config replica set.]
call[name[rs_cfg]][constant[id]] assign[=] call[name[rs_cfg].pop, parameter[constant[rs_id], constant[None]]]
for taget[name[member]] in starred[call[name[rs_cfg].setdefault, parameter[constant[members], list[[<ast.Dict object at 0x7da18ede7910>]]]]] begin[:]
call[name[member]][constant[procParams]] assign[=] call[name[self]._strip_auth, parameter[call[name[member].get, parameter[constant[procParams], dictionary[[], []]]]]]
call[call[name[member]][constant[procParams]]][constant[configsvr]] assign[=] constant[True]
if name[self].enable_ipv6 begin[:]
call[name[common].enable_ipv6_single, parameter[call[name[member]][constant[procParams]]]]
call[name[rs_cfg]][constant[sslParams]] assign[=] name[self].sslParams
call[name[self]._configsvrs.append, parameter[call[call[name[ReplicaSets], parameter[]].create, parameter[name[rs_cfg]]]]] | keyword[def] identifier[__init_configrs] ( identifier[self] , identifier[rs_cfg] ):
literal[string]
identifier[rs_cfg] [ literal[string] ]= identifier[rs_cfg] . identifier[pop] ( literal[string] , keyword[None] )
keyword[for] identifier[member] keyword[in] identifier[rs_cfg] . identifier[setdefault] ( literal[string] ,[{}]):
identifier[member] [ literal[string] ]= identifier[self] . identifier[_strip_auth] (
identifier[member] . identifier[get] ( literal[string] ,{}))
identifier[member] [ literal[string] ][ literal[string] ]= keyword[True]
keyword[if] identifier[self] . identifier[enable_ipv6] :
identifier[common] . identifier[enable_ipv6_single] ( identifier[member] [ literal[string] ])
identifier[rs_cfg] [ literal[string] ]= identifier[self] . identifier[sslParams]
identifier[self] . identifier[_configsvrs] . identifier[append] ( identifier[ReplicaSets] (). identifier[create] ( identifier[rs_cfg] )) | def __init_configrs(self, rs_cfg):
"""Create and start a config replica set."""
# Use 'rs_id' to set the id for consistency, but need to rename
# to 'id' to use with ReplicaSets.create()
rs_cfg['id'] = rs_cfg.pop('rs_id', None)
for member in rs_cfg.setdefault('members', [{}]):
member['procParams'] = self._strip_auth(member.get('procParams', {}))
member['procParams']['configsvr'] = True
if self.enable_ipv6:
common.enable_ipv6_single(member['procParams']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['member']]
rs_cfg['sslParams'] = self.sslParams
self._configsvrs.append(ReplicaSets().create(rs_cfg)) |
def get_lyric_by_songid(self, mid):
"""Get song lyric
:param mid: music id
:return: {
lrc: {
version: int,
lyric: str
},
tlyric: {
version: int,
lyric: str
}
sgc: bool,
qfy: bool,
sfy: bool,
transUser: {},
code: int,
}
"""
# tv 表示翻译。-1:表示要翻译,1:不要
url = uri + '/song/lyric?' + 'id=' + str(mid) + '&lv=1&kv=1&tv=-1'
return self.request('GET', url) | def function[get_lyric_by_songid, parameter[self, mid]]:
constant[Get song lyric
:param mid: music id
:return: {
lrc: {
version: int,
lyric: str
},
tlyric: {
version: int,
lyric: str
}
sgc: bool,
qfy: bool,
sfy: bool,
transUser: {},
code: int,
}
]
variable[url] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[uri] + constant[/song/lyric?]] + constant[id=]] + call[name[str], parameter[name[mid]]]] + constant[&lv=1&kv=1&tv=-1]]
return[call[name[self].request, parameter[constant[GET], name[url]]]] | keyword[def] identifier[get_lyric_by_songid] ( identifier[self] , identifier[mid] ):
literal[string]
identifier[url] = identifier[uri] + literal[string] + literal[string] + identifier[str] ( identifier[mid] )+ literal[string]
keyword[return] identifier[self] . identifier[request] ( literal[string] , identifier[url] ) | def get_lyric_by_songid(self, mid):
"""Get song lyric
:param mid: music id
:return: {
lrc: {
version: int,
lyric: str
},
tlyric: {
version: int,
lyric: str
}
sgc: bool,
qfy: bool,
sfy: bool,
transUser: {},
code: int,
}
"""
# tv 表示翻译。-1:表示要翻译,1:不要
url = uri + '/song/lyric?' + 'id=' + str(mid) + '&lv=1&kv=1&tv=-1'
return self.request('GET', url) |
def yum_install(self, packages, ignore_error=False):
"""Install some packages on the remote host.
:param packages: ist of packages to install.
"""
return self.run('yum install -y --quiet ' + ' '.join(packages), ignore_error=ignore_error, retry=5) | def function[yum_install, parameter[self, packages, ignore_error]]:
constant[Install some packages on the remote host.
:param packages: ist of packages to install.
]
return[call[name[self].run, parameter[binary_operation[constant[yum install -y --quiet ] + call[constant[ ].join, parameter[name[packages]]]]]]] | keyword[def] identifier[yum_install] ( identifier[self] , identifier[packages] , identifier[ignore_error] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[run] ( literal[string] + literal[string] . identifier[join] ( identifier[packages] ), identifier[ignore_error] = identifier[ignore_error] , identifier[retry] = literal[int] ) | def yum_install(self, packages, ignore_error=False):
"""Install some packages on the remote host.
:param packages: ist of packages to install.
"""
return self.run('yum install -y --quiet ' + ' '.join(packages), ignore_error=ignore_error, retry=5) |
def dump(
state, host,
remote_filename, database=None,
# Details for speaking to MySQL via `mysql` CLI
mysql_user=None, mysql_password=None,
mysql_host=None, mysql_port=None,
):
'''
Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ mysql_*: global module arguments, see above
'''
yield '{0} > {1}'.format(make_mysql_command(
executable='mysqldump',
database=database,
user=mysql_user,
password=mysql_password,
host=mysql_host,
port=mysql_port,
), remote_filename) | def function[dump, parameter[state, host, remote_filename, database, mysql_user, mysql_password, mysql_host, mysql_port]]:
constant[
Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ mysql_*: global module arguments, see above
]
<ast.Yield object at 0x7da20c993460> | keyword[def] identifier[dump] (
identifier[state] , identifier[host] ,
identifier[remote_filename] , identifier[database] = keyword[None] ,
identifier[mysql_user] = keyword[None] , identifier[mysql_password] = keyword[None] ,
identifier[mysql_host] = keyword[None] , identifier[mysql_port] = keyword[None] ,
):
literal[string]
keyword[yield] literal[string] . identifier[format] ( identifier[make_mysql_command] (
identifier[executable] = literal[string] ,
identifier[database] = identifier[database] ,
identifier[user] = identifier[mysql_user] ,
identifier[password] = identifier[mysql_password] ,
identifier[host] = identifier[mysql_host] ,
identifier[port] = identifier[mysql_port] ,
), identifier[remote_filename] ) | def dump(state, host, remote_filename, database=None, mysql_user=None, mysql_password=None, mysql_host=None, mysql_port=None):
# Details for speaking to MySQL via `mysql` CLI
'\n Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``.\n\n + database: name of the database to dump\n + remote_filename: name of the file to dump the SQL to\n + mysql_*: global module arguments, see above\n '
yield '{0} > {1}'.format(make_mysql_command(executable='mysqldump', database=database, user=mysql_user, password=mysql_password, host=mysql_host, port=mysql_port), remote_filename) |
def reload_pimms():
'''
reload_pimms() reloads the entire pimms module and returns it.
'''
import sys, six
try: from importlib import reload
except: from imp import reload
reload(sys.modules['pimms.util'])
reload(sys.modules['pimms.table'])
reload(sys.modules['pimms.immutable'])
reload(sys.modules['pimms.calculation'])
reload(sys.modules['pimms.cmdline'])
reload(sys.modules['pimms'])
return sys.modules['pimms'] | def function[reload_pimms, parameter[]]:
constant[
reload_pimms() reloads the entire pimms module and returns it.
]
import module[sys], module[six]
<ast.Try object at 0x7da20c6a9960>
call[name[reload], parameter[call[name[sys].modules][constant[pimms.util]]]]
call[name[reload], parameter[call[name[sys].modules][constant[pimms.table]]]]
call[name[reload], parameter[call[name[sys].modules][constant[pimms.immutable]]]]
call[name[reload], parameter[call[name[sys].modules][constant[pimms.calculation]]]]
call[name[reload], parameter[call[name[sys].modules][constant[pimms.cmdline]]]]
call[name[reload], parameter[call[name[sys].modules][constant[pimms]]]]
return[call[name[sys].modules][constant[pimms]]] | keyword[def] identifier[reload_pimms] ():
literal[string]
keyword[import] identifier[sys] , identifier[six]
keyword[try] : keyword[from] identifier[importlib] keyword[import] identifier[reload]
keyword[except] : keyword[from] identifier[imp] keyword[import] identifier[reload]
identifier[reload] ( identifier[sys] . identifier[modules] [ literal[string] ])
identifier[reload] ( identifier[sys] . identifier[modules] [ literal[string] ])
identifier[reload] ( identifier[sys] . identifier[modules] [ literal[string] ])
identifier[reload] ( identifier[sys] . identifier[modules] [ literal[string] ])
identifier[reload] ( identifier[sys] . identifier[modules] [ literal[string] ])
identifier[reload] ( identifier[sys] . identifier[modules] [ literal[string] ])
keyword[return] identifier[sys] . identifier[modules] [ literal[string] ] | def reload_pimms():
"""
reload_pimms() reloads the entire pimms module and returns it.
"""
import sys, six
try:
from importlib import reload # depends on [control=['try'], data=[]]
except:
from imp import reload # depends on [control=['except'], data=[]]
reload(sys.modules['pimms.util'])
reload(sys.modules['pimms.table'])
reload(sys.modules['pimms.immutable'])
reload(sys.modules['pimms.calculation'])
reload(sys.modules['pimms.cmdline'])
reload(sys.modules['pimms'])
return sys.modules['pimms'] |
def load_module(name, original_module):
"""
Load a copy of a module, distinct from what you'd get if you imported
it directly.
@param str name: The name of the new module.
@param original_module: The original module we're recreating.
@return: A new, distinct module.
"""
module = ModuleType(name)
if PY3:
import importlib.util
spec = importlib.util.find_spec(original_module.__name__)
source = spec.loader.get_code(original_module.__name__)
else:
if getattr(sys, "frozen", False):
raise NotImplementedError("Can't load modules on Python 2 with PyInstaller")
path = original_module.__file__
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
with open(path) as f:
source = f.read()
exec_(source, module.__dict__, module.__dict__)
return module | def function[load_module, parameter[name, original_module]]:
constant[
Load a copy of a module, distinct from what you'd get if you imported
it directly.
@param str name: The name of the new module.
@param original_module: The original module we're recreating.
@return: A new, distinct module.
]
variable[module] assign[=] call[name[ModuleType], parameter[name[name]]]
if name[PY3] begin[:]
import module[importlib.util]
variable[spec] assign[=] call[name[importlib].util.find_spec, parameter[name[original_module].__name__]]
variable[source] assign[=] call[name[spec].loader.get_code, parameter[name[original_module].__name__]]
call[name[exec_], parameter[name[source], name[module].__dict__, name[module].__dict__]]
return[name[module]] | keyword[def] identifier[load_module] ( identifier[name] , identifier[original_module] ):
literal[string]
identifier[module] = identifier[ModuleType] ( identifier[name] )
keyword[if] identifier[PY3] :
keyword[import] identifier[importlib] . identifier[util]
identifier[spec] = identifier[importlib] . identifier[util] . identifier[find_spec] ( identifier[original_module] . identifier[__name__] )
identifier[source] = identifier[spec] . identifier[loader] . identifier[get_code] ( identifier[original_module] . identifier[__name__] )
keyword[else] :
keyword[if] identifier[getattr] ( identifier[sys] , literal[string] , keyword[False] ):
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[path] = identifier[original_module] . identifier[__file__]
keyword[if] identifier[path] . identifier[endswith] ( literal[string] ) keyword[or] identifier[path] . identifier[endswith] ( literal[string] ):
identifier[path] = identifier[path] [:- literal[int] ]
keyword[with] identifier[open] ( identifier[path] ) keyword[as] identifier[f] :
identifier[source] = identifier[f] . identifier[read] ()
identifier[exec_] ( identifier[source] , identifier[module] . identifier[__dict__] , identifier[module] . identifier[__dict__] )
keyword[return] identifier[module] | def load_module(name, original_module):
"""
Load a copy of a module, distinct from what you'd get if you imported
it directly.
@param str name: The name of the new module.
@param original_module: The original module we're recreating.
@return: A new, distinct module.
"""
module = ModuleType(name)
if PY3:
import importlib.util
spec = importlib.util.find_spec(original_module.__name__)
source = spec.loader.get_code(original_module.__name__) # depends on [control=['if'], data=[]]
else:
if getattr(sys, 'frozen', False):
raise NotImplementedError("Can't load modules on Python 2 with PyInstaller") # depends on [control=['if'], data=[]]
path = original_module.__file__
if path.endswith('.pyc') or path.endswith('.pyo'):
path = path[:-1] # depends on [control=['if'], data=[]]
with open(path) as f:
source = f.read() # depends on [control=['with'], data=['f']]
exec_(source, module.__dict__, module.__dict__)
return module |
def execOnSubArrays(arrs, fn, splitX, splitY):
"""
execute a function(on or multiple arrays)
only on sub sections
works only on 2d arrays at the moment
>>> a1 = np.ones((1000,1000))
>>> a2 = np.ones((1000,1000))
>>> out = execOnSubArrays((a1,a2), lambda sa1,sa2: sa1+as2, splitX=10, splitY=10)
"""
if type(arrs) not in (tuple, list):
arrs = (arrs,)
s0, s1 = arrs[0].shape
ss0 = s0 // splitX
ss1 = s1 // splitY
px, py = 0, 0
out = None
for ix in range(splitX):
if ix == splitX - 1:
ss0 = s0 - px
for iy in range(splitY):
if iy == splitY - 1:
ss1 = s1 - py
# current sub arrays:
sarrs = [a[px:px + ss0, py:py + ss1] for a in arrs]
sub = fn(*tuple(sarrs))
if out is None:
out = np.empty(shape=(s0, s1), dtype=sub.dtype)
out[px:px + ss0, py:py + ss1] = sub
py += ss1
py = 0
px += ss0
return out | def function[execOnSubArrays, parameter[arrs, fn, splitX, splitY]]:
constant[
execute a function(on or multiple arrays)
only on sub sections
works only on 2d arrays at the moment
>>> a1 = np.ones((1000,1000))
>>> a2 = np.ones((1000,1000))
>>> out = execOnSubArrays((a1,a2), lambda sa1,sa2: sa1+as2, splitX=10, splitY=10)
]
if compare[call[name[type], parameter[name[arrs]]] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Name object at 0x7da18fe92290>, <ast.Name object at 0x7da18fe93d90>]]] begin[:]
variable[arrs] assign[=] tuple[[<ast.Name object at 0x7da18fe93ca0>]]
<ast.Tuple object at 0x7da18fe90fd0> assign[=] call[name[arrs]][constant[0]].shape
variable[ss0] assign[=] binary_operation[name[s0] <ast.FloorDiv object at 0x7da2590d6bc0> name[splitX]]
variable[ss1] assign[=] binary_operation[name[s1] <ast.FloorDiv object at 0x7da2590d6bc0> name[splitY]]
<ast.Tuple object at 0x7da1b1320040> assign[=] tuple[[<ast.Constant object at 0x7da1b1320b50>, <ast.Constant object at 0x7da1b13201c0>]]
variable[out] assign[=] constant[None]
for taget[name[ix]] in starred[call[name[range], parameter[name[splitX]]]] begin[:]
if compare[name[ix] equal[==] binary_operation[name[splitX] - constant[1]]] begin[:]
variable[ss0] assign[=] binary_operation[name[s0] - name[px]]
for taget[name[iy]] in starred[call[name[range], parameter[name[splitY]]]] begin[:]
if compare[name[iy] equal[==] binary_operation[name[splitY] - constant[1]]] begin[:]
variable[ss1] assign[=] binary_operation[name[s1] - name[py]]
variable[sarrs] assign[=] <ast.ListComp object at 0x7da20c76ea40>
variable[sub] assign[=] call[name[fn], parameter[<ast.Starred object at 0x7da18fe92ad0>]]
if compare[name[out] is constant[None]] begin[:]
variable[out] assign[=] call[name[np].empty, parameter[]]
call[name[out]][tuple[[<ast.Slice object at 0x7da18fe93550>, <ast.Slice object at 0x7da18fe93c10>]]] assign[=] name[sub]
<ast.AugAssign object at 0x7da18fe91ba0>
variable[py] assign[=] constant[0]
<ast.AugAssign object at 0x7da18fe924d0>
return[name[out]] | keyword[def] identifier[execOnSubArrays] ( identifier[arrs] , identifier[fn] , identifier[splitX] , identifier[splitY] ):
literal[string]
keyword[if] identifier[type] ( identifier[arrs] ) keyword[not] keyword[in] ( identifier[tuple] , identifier[list] ):
identifier[arrs] =( identifier[arrs] ,)
identifier[s0] , identifier[s1] = identifier[arrs] [ literal[int] ]. identifier[shape]
identifier[ss0] = identifier[s0] // identifier[splitX]
identifier[ss1] = identifier[s1] // identifier[splitY]
identifier[px] , identifier[py] = literal[int] , literal[int]
identifier[out] = keyword[None]
keyword[for] identifier[ix] keyword[in] identifier[range] ( identifier[splitX] ):
keyword[if] identifier[ix] == identifier[splitX] - literal[int] :
identifier[ss0] = identifier[s0] - identifier[px]
keyword[for] identifier[iy] keyword[in] identifier[range] ( identifier[splitY] ):
keyword[if] identifier[iy] == identifier[splitY] - literal[int] :
identifier[ss1] = identifier[s1] - identifier[py]
identifier[sarrs] =[ identifier[a] [ identifier[px] : identifier[px] + identifier[ss0] , identifier[py] : identifier[py] + identifier[ss1] ] keyword[for] identifier[a] keyword[in] identifier[arrs] ]
identifier[sub] = identifier[fn] (* identifier[tuple] ( identifier[sarrs] ))
keyword[if] identifier[out] keyword[is] keyword[None] :
identifier[out] = identifier[np] . identifier[empty] ( identifier[shape] =( identifier[s0] , identifier[s1] ), identifier[dtype] = identifier[sub] . identifier[dtype] )
identifier[out] [ identifier[px] : identifier[px] + identifier[ss0] , identifier[py] : identifier[py] + identifier[ss1] ]= identifier[sub]
identifier[py] += identifier[ss1]
identifier[py] = literal[int]
identifier[px] += identifier[ss0]
keyword[return] identifier[out] | def execOnSubArrays(arrs, fn, splitX, splitY):
"""
execute a function(on or multiple arrays)
only on sub sections
works only on 2d arrays at the moment
>>> a1 = np.ones((1000,1000))
>>> a2 = np.ones((1000,1000))
>>> out = execOnSubArrays((a1,a2), lambda sa1,sa2: sa1+as2, splitX=10, splitY=10)
"""
if type(arrs) not in (tuple, list):
arrs = (arrs,) # depends on [control=['if'], data=[]]
(s0, s1) = arrs[0].shape
ss0 = s0 // splitX
ss1 = s1 // splitY
(px, py) = (0, 0)
out = None
for ix in range(splitX):
if ix == splitX - 1:
ss0 = s0 - px # depends on [control=['if'], data=[]]
for iy in range(splitY):
if iy == splitY - 1:
ss1 = s1 - py # depends on [control=['if'], data=[]]
# current sub arrays:
sarrs = [a[px:px + ss0, py:py + ss1] for a in arrs]
sub = fn(*tuple(sarrs))
if out is None:
out = np.empty(shape=(s0, s1), dtype=sub.dtype) # depends on [control=['if'], data=['out']]
out[px:px + ss0, py:py + ss1] = sub
py += ss1 # depends on [control=['for'], data=['iy']]
py = 0
px += ss0 # depends on [control=['for'], data=['ix']]
return out |
def compileInterpolatableOTFsFromDS(
designSpaceDoc,
preProcessorClass=OTFPreProcessor,
outlineCompilerClass=OutlineOTFCompiler,
featureCompilerClass=None,
featureWriters=None,
glyphOrder=None,
useProductionNames=None,
roundTolerance=None,
inplace=False,
):
"""Create FontTools CFF fonts from the DesignSpaceDocument UFO sources
with interpolatable outlines.
Interpolatable means without subroutinization and specializer optimizations
and no removal of overlaps.
If the Designspace contains a "public.skipExportGlyphs" lib key, these
glyphs will not be exported to the final font. If these glyphs are used as
components in any other glyph, those components get decomposed. If the lib
key doesn't exist in the Designspace, all glyphs are exported (keys in
individual UFOs are ignored). UFO groups and kerning will be pruned of
skipped glyphs.
The DesignSpaceDocument should contain SourceDescriptor objects with 'font'
attribute set to an already loaded defcon.Font object (or compatible UFO
Font class). If 'font' attribute is unset or None, an AttributeError exception
is thrown.
Return a copy of the DesignSpaceDocument object (or the same one if
inplace=True) with the source's 'font' attribute set to the corresponding
TTFont instance.
For sources that have the 'layerName' attribute defined, the corresponding TTFont
object will contain only a minimum set of tables ("head", "hmtx", "CFF ", "maxp",
"vmtx" and "VORG"), and no OpenType layout tables.
"""
for source in designSpaceDoc.sources:
if source.font is None:
raise AttributeError(
"designspace source '%s' is missing required 'font' attribute"
% getattr(source, "name", "<Unknown>")
)
skipExportGlyphs = designSpaceDoc.lib.get("public.skipExportGlyphs", [])
otfs = []
for source in designSpaceDoc.sources:
otfs.append(
compileOTF(
ufo=source.font,
layerName=source.layerName,
preProcessorClass=preProcessorClass,
outlineCompilerClass=outlineCompilerClass,
featureCompilerClass=featureCompilerClass,
featureWriters=featureWriters,
glyphOrder=glyphOrder,
useProductionNames=useProductionNames,
optimizeCFF=CFFOptimization.NONE,
roundTolerance=roundTolerance,
removeOverlaps=False,
overlapsBackend=None,
inplace=inplace,
skipExportGlyphs=skipExportGlyphs,
_tables=SPARSE_OTF_MASTER_TABLES if source.layerName else None,
)
)
if inplace:
result = designSpaceDoc
else:
# TODO try a more efficient copy method that doesn't involve (de)serializing
result = designSpaceDoc.__class__.fromstring(designSpaceDoc.tostring())
for source, otf in zip(result.sources, otfs):
source.font = otf
return result | def function[compileInterpolatableOTFsFromDS, parameter[designSpaceDoc, preProcessorClass, outlineCompilerClass, featureCompilerClass, featureWriters, glyphOrder, useProductionNames, roundTolerance, inplace]]:
constant[Create FontTools CFF fonts from the DesignSpaceDocument UFO sources
with interpolatable outlines.
Interpolatable means without subroutinization and specializer optimizations
and no removal of overlaps.
If the Designspace contains a "public.skipExportGlyphs" lib key, these
glyphs will not be exported to the final font. If these glyphs are used as
components in any other glyph, those components get decomposed. If the lib
key doesn't exist in the Designspace, all glyphs are exported (keys in
individual UFOs are ignored). UFO groups and kerning will be pruned of
skipped glyphs.
The DesignSpaceDocument should contain SourceDescriptor objects with 'font'
attribute set to an already loaded defcon.Font object (or compatible UFO
Font class). If 'font' attribute is unset or None, an AttributeError exception
is thrown.
Return a copy of the DesignSpaceDocument object (or the same one if
inplace=True) with the source's 'font' attribute set to the corresponding
TTFont instance.
For sources that have the 'layerName' attribute defined, the corresponding TTFont
object will contain only a minimum set of tables ("head", "hmtx", "CFF ", "maxp",
"vmtx" and "VORG"), and no OpenType layout tables.
]
for taget[name[source]] in starred[name[designSpaceDoc].sources] begin[:]
if compare[name[source].font is constant[None]] begin[:]
<ast.Raise object at 0x7da1b26aebf0>
variable[skipExportGlyphs] assign[=] call[name[designSpaceDoc].lib.get, parameter[constant[public.skipExportGlyphs], list[[]]]]
variable[otfs] assign[=] list[[]]
for taget[name[source]] in starred[name[designSpaceDoc].sources] begin[:]
call[name[otfs].append, parameter[call[name[compileOTF], parameter[]]]]
if name[inplace] begin[:]
variable[result] assign[=] name[designSpaceDoc]
for taget[tuple[[<ast.Name object at 0x7da18bcc9f00>, <ast.Name object at 0x7da18bcc9e40>]]] in starred[call[name[zip], parameter[name[result].sources, name[otfs]]]] begin[:]
name[source].font assign[=] name[otf]
return[name[result]] | keyword[def] identifier[compileInterpolatableOTFsFromDS] (
identifier[designSpaceDoc] ,
identifier[preProcessorClass] = identifier[OTFPreProcessor] ,
identifier[outlineCompilerClass] = identifier[OutlineOTFCompiler] ,
identifier[featureCompilerClass] = keyword[None] ,
identifier[featureWriters] = keyword[None] ,
identifier[glyphOrder] = keyword[None] ,
identifier[useProductionNames] = keyword[None] ,
identifier[roundTolerance] = keyword[None] ,
identifier[inplace] = keyword[False] ,
):
literal[string]
keyword[for] identifier[source] keyword[in] identifier[designSpaceDoc] . identifier[sources] :
keyword[if] identifier[source] . identifier[font] keyword[is] keyword[None] :
keyword[raise] identifier[AttributeError] (
literal[string]
% identifier[getattr] ( identifier[source] , literal[string] , literal[string] )
)
identifier[skipExportGlyphs] = identifier[designSpaceDoc] . identifier[lib] . identifier[get] ( literal[string] ,[])
identifier[otfs] =[]
keyword[for] identifier[source] keyword[in] identifier[designSpaceDoc] . identifier[sources] :
identifier[otfs] . identifier[append] (
identifier[compileOTF] (
identifier[ufo] = identifier[source] . identifier[font] ,
identifier[layerName] = identifier[source] . identifier[layerName] ,
identifier[preProcessorClass] = identifier[preProcessorClass] ,
identifier[outlineCompilerClass] = identifier[outlineCompilerClass] ,
identifier[featureCompilerClass] = identifier[featureCompilerClass] ,
identifier[featureWriters] = identifier[featureWriters] ,
identifier[glyphOrder] = identifier[glyphOrder] ,
identifier[useProductionNames] = identifier[useProductionNames] ,
identifier[optimizeCFF] = identifier[CFFOptimization] . identifier[NONE] ,
identifier[roundTolerance] = identifier[roundTolerance] ,
identifier[removeOverlaps] = keyword[False] ,
identifier[overlapsBackend] = keyword[None] ,
identifier[inplace] = identifier[inplace] ,
identifier[skipExportGlyphs] = identifier[skipExportGlyphs] ,
identifier[_tables] = identifier[SPARSE_OTF_MASTER_TABLES] keyword[if] identifier[source] . identifier[layerName] keyword[else] keyword[None] ,
)
)
keyword[if] identifier[inplace] :
identifier[result] = identifier[designSpaceDoc]
keyword[else] :
identifier[result] = identifier[designSpaceDoc] . identifier[__class__] . identifier[fromstring] ( identifier[designSpaceDoc] . identifier[tostring] ())
keyword[for] identifier[source] , identifier[otf] keyword[in] identifier[zip] ( identifier[result] . identifier[sources] , identifier[otfs] ):
identifier[source] . identifier[font] = identifier[otf]
keyword[return] identifier[result] | def compileInterpolatableOTFsFromDS(designSpaceDoc, preProcessorClass=OTFPreProcessor, outlineCompilerClass=OutlineOTFCompiler, featureCompilerClass=None, featureWriters=None, glyphOrder=None, useProductionNames=None, roundTolerance=None, inplace=False):
"""Create FontTools CFF fonts from the DesignSpaceDocument UFO sources
with interpolatable outlines.
Interpolatable means without subroutinization and specializer optimizations
and no removal of overlaps.
If the Designspace contains a "public.skipExportGlyphs" lib key, these
glyphs will not be exported to the final font. If these glyphs are used as
components in any other glyph, those components get decomposed. If the lib
key doesn't exist in the Designspace, all glyphs are exported (keys in
individual UFOs are ignored). UFO groups and kerning will be pruned of
skipped glyphs.
The DesignSpaceDocument should contain SourceDescriptor objects with 'font'
attribute set to an already loaded defcon.Font object (or compatible UFO
Font class). If 'font' attribute is unset or None, an AttributeError exception
is thrown.
Return a copy of the DesignSpaceDocument object (or the same one if
inplace=True) with the source's 'font' attribute set to the corresponding
TTFont instance.
For sources that have the 'layerName' attribute defined, the corresponding TTFont
object will contain only a minimum set of tables ("head", "hmtx", "CFF ", "maxp",
"vmtx" and "VORG"), and no OpenType layout tables.
"""
for source in designSpaceDoc.sources:
if source.font is None:
raise AttributeError("designspace source '%s' is missing required 'font' attribute" % getattr(source, 'name', '<Unknown>')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['source']]
skipExportGlyphs = designSpaceDoc.lib.get('public.skipExportGlyphs', [])
otfs = []
for source in designSpaceDoc.sources:
otfs.append(compileOTF(ufo=source.font, layerName=source.layerName, preProcessorClass=preProcessorClass, outlineCompilerClass=outlineCompilerClass, featureCompilerClass=featureCompilerClass, featureWriters=featureWriters, glyphOrder=glyphOrder, useProductionNames=useProductionNames, optimizeCFF=CFFOptimization.NONE, roundTolerance=roundTolerance, removeOverlaps=False, overlapsBackend=None, inplace=inplace, skipExportGlyphs=skipExportGlyphs, _tables=SPARSE_OTF_MASTER_TABLES if source.layerName else None)) # depends on [control=['for'], data=['source']]
if inplace:
result = designSpaceDoc # depends on [control=['if'], data=[]]
else:
# TODO try a more efficient copy method that doesn't involve (de)serializing
result = designSpaceDoc.__class__.fromstring(designSpaceDoc.tostring())
for (source, otf) in zip(result.sources, otfs):
source.font = otf # depends on [control=['for'], data=[]]
return result |
def destroy(device):
'''
Destroy a RAID device.
WARNING This will zero the superblock of all members of the RAID array..
CLI Example:
.. code-block:: bash
salt '*' raid.destroy /dev/md0
'''
try:
details = detail(device)
except CommandExecutionError:
return False
stop_cmd = ['mdadm', '--stop', device]
zero_cmd = ['mdadm', '--zero-superblock']
if __salt__['cmd.retcode'](stop_cmd, python_shell=False) == 0:
for number in details['members']:
zero_cmd.append(details['members'][number]['device'])
__salt__['cmd.retcode'](zero_cmd, python_shell=False)
# Remove entry from config file:
if __grains__.get('os_family') == 'Debian':
cfg_file = '/etc/mdadm/mdadm.conf'
else:
cfg_file = '/etc/mdadm.conf'
try:
__salt__['file.replace'](cfg_file, 'ARRAY {0} .*'.format(device), '')
except SaltInvocationError:
pass
if __salt__['raid.list']().get(device) is None:
return True
else:
return False | def function[destroy, parameter[device]]:
constant[
Destroy a RAID device.
WARNING This will zero the superblock of all members of the RAID array..
CLI Example:
.. code-block:: bash
salt '*' raid.destroy /dev/md0
]
<ast.Try object at 0x7da1b1c45b40>
variable[stop_cmd] assign[=] list[[<ast.Constant object at 0x7da18f720f70>, <ast.Constant object at 0x7da18f722530>, <ast.Name object at 0x7da18f7215a0>]]
variable[zero_cmd] assign[=] list[[<ast.Constant object at 0x7da18f7235b0>, <ast.Constant object at 0x7da18f720b80>]]
if compare[call[call[name[__salt__]][constant[cmd.retcode]], parameter[name[stop_cmd]]] equal[==] constant[0]] begin[:]
for taget[name[number]] in starred[call[name[details]][constant[members]]] begin[:]
call[name[zero_cmd].append, parameter[call[call[call[name[details]][constant[members]]][name[number]]][constant[device]]]]
call[call[name[__salt__]][constant[cmd.retcode]], parameter[name[zero_cmd]]]
if compare[call[name[__grains__].get, parameter[constant[os_family]]] equal[==] constant[Debian]] begin[:]
variable[cfg_file] assign[=] constant[/etc/mdadm/mdadm.conf]
<ast.Try object at 0x7da18f722dd0>
if compare[call[call[call[name[__salt__]][constant[raid.list]], parameter[]].get, parameter[name[device]]] is constant[None]] begin[:]
return[constant[True]] | keyword[def] identifier[destroy] ( identifier[device] ):
literal[string]
keyword[try] :
identifier[details] = identifier[detail] ( identifier[device] )
keyword[except] identifier[CommandExecutionError] :
keyword[return] keyword[False]
identifier[stop_cmd] =[ literal[string] , literal[string] , identifier[device] ]
identifier[zero_cmd] =[ literal[string] , literal[string] ]
keyword[if] identifier[__salt__] [ literal[string] ]( identifier[stop_cmd] , identifier[python_shell] = keyword[False] )== literal[int] :
keyword[for] identifier[number] keyword[in] identifier[details] [ literal[string] ]:
identifier[zero_cmd] . identifier[append] ( identifier[details] [ literal[string] ][ identifier[number] ][ literal[string] ])
identifier[__salt__] [ literal[string] ]( identifier[zero_cmd] , identifier[python_shell] = keyword[False] )
keyword[if] identifier[__grains__] . identifier[get] ( literal[string] )== literal[string] :
identifier[cfg_file] = literal[string]
keyword[else] :
identifier[cfg_file] = literal[string]
keyword[try] :
identifier[__salt__] [ literal[string] ]( identifier[cfg_file] , literal[string] . identifier[format] ( identifier[device] ), literal[string] )
keyword[except] identifier[SaltInvocationError] :
keyword[pass]
keyword[if] identifier[__salt__] [ literal[string] ](). identifier[get] ( identifier[device] ) keyword[is] keyword[None] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def destroy(device):
"""
Destroy a RAID device.
WARNING This will zero the superblock of all members of the RAID array..
CLI Example:
.. code-block:: bash
salt '*' raid.destroy /dev/md0
"""
try:
details = detail(device) # depends on [control=['try'], data=[]]
except CommandExecutionError:
return False # depends on [control=['except'], data=[]]
stop_cmd = ['mdadm', '--stop', device]
zero_cmd = ['mdadm', '--zero-superblock']
if __salt__['cmd.retcode'](stop_cmd, python_shell=False) == 0:
for number in details['members']:
zero_cmd.append(details['members'][number]['device']) # depends on [control=['for'], data=['number']]
__salt__['cmd.retcode'](zero_cmd, python_shell=False) # depends on [control=['if'], data=[]]
# Remove entry from config file:
if __grains__.get('os_family') == 'Debian':
cfg_file = '/etc/mdadm/mdadm.conf' # depends on [control=['if'], data=[]]
else:
cfg_file = '/etc/mdadm.conf'
try:
__salt__['file.replace'](cfg_file, 'ARRAY {0} .*'.format(device), '') # depends on [control=['try'], data=[]]
except SaltInvocationError:
pass # depends on [control=['except'], data=[]]
if __salt__['raid.list']().get(device) is None:
return True # depends on [control=['if'], data=[]]
else:
return False |
def _load_params(params, logger=logging):
"""Given a str as a path to the .params file or a pair of params,
returns two dictionaries representing arg_params and aux_params.
"""
if isinstance(params, str):
cur_path = os.path.dirname(os.path.realpath(__file__))
param_file_path = os.path.join(cur_path, params)
logger.info('Loading params from file %s' % param_file_path)
save_dict = nd_load(param_file_path)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params
elif isinstance(params, (tuple, list)) and len(params) == 2:
return params[0], params[1]
else:
raise ValueError('Unsupported params provided. Must be either a path to the param file or'
' a pair of dictionaries representing arg_params and aux_params') | def function[_load_params, parameter[params, logger]]:
constant[Given a str as a path to the .params file or a pair of params,
returns two dictionaries representing arg_params and aux_params.
]
if call[name[isinstance], parameter[name[params], name[str]]] begin[:]
variable[cur_path] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.realpath, parameter[name[__file__]]]]]
variable[param_file_path] assign[=] call[name[os].path.join, parameter[name[cur_path], name[params]]]
call[name[logger].info, parameter[binary_operation[constant[Loading params from file %s] <ast.Mod object at 0x7da2590d6920> name[param_file_path]]]]
variable[save_dict] assign[=] call[name[nd_load], parameter[name[param_file_path]]]
variable[arg_params] assign[=] dictionary[[], []]
variable[aux_params] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1f217e0>, <ast.Name object at 0x7da1b1f21480>]]] in starred[call[name[save_dict].items, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b1f22950> assign[=] call[name[k].split, parameter[constant[:], constant[1]]]
if compare[name[tp] equal[==] constant[arg]] begin[:]
call[name[arg_params]][name[name]] assign[=] name[v]
if compare[name[tp] equal[==] constant[aux]] begin[:]
call[name[aux_params]][name[name]] assign[=] name[v]
return[tuple[[<ast.Name object at 0x7da1b1f22ad0>, <ast.Name object at 0x7da1b1f20fa0>]]] | keyword[def] identifier[_load_params] ( identifier[params] , identifier[logger] = identifier[logging] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[params] , identifier[str] ):
identifier[cur_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[__file__] ))
identifier[param_file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[cur_path] , identifier[params] )
identifier[logger] . identifier[info] ( literal[string] % identifier[param_file_path] )
identifier[save_dict] = identifier[nd_load] ( identifier[param_file_path] )
identifier[arg_params] ={}
identifier[aux_params] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[save_dict] . identifier[items] ():
identifier[tp] , identifier[name] = identifier[k] . identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[tp] == literal[string] :
identifier[arg_params] [ identifier[name] ]= identifier[v]
keyword[if] identifier[tp] == literal[string] :
identifier[aux_params] [ identifier[name] ]= identifier[v]
keyword[return] identifier[arg_params] , identifier[aux_params]
keyword[elif] identifier[isinstance] ( identifier[params] ,( identifier[tuple] , identifier[list] )) keyword[and] identifier[len] ( identifier[params] )== literal[int] :
keyword[return] identifier[params] [ literal[int] ], identifier[params] [ literal[int] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] ) | def _load_params(params, logger=logging):
"""Given a str as a path to the .params file or a pair of params,
returns two dictionaries representing arg_params and aux_params.
"""
if isinstance(params, str):
cur_path = os.path.dirname(os.path.realpath(__file__))
param_file_path = os.path.join(cur_path, params)
logger.info('Loading params from file %s' % param_file_path)
save_dict = nd_load(param_file_path)
arg_params = {}
aux_params = {}
for (k, v) in save_dict.items():
(tp, name) = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v # depends on [control=['if'], data=[]]
if tp == 'aux':
aux_params[name] = v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (arg_params, aux_params) # depends on [control=['if'], data=[]]
elif isinstance(params, (tuple, list)) and len(params) == 2:
return (params[0], params[1]) # depends on [control=['if'], data=[]]
else:
raise ValueError('Unsupported params provided. Must be either a path to the param file or a pair of dictionaries representing arg_params and aux_params') |
def send_markdown(self, agent_id, user_ids, content, party_ids='', tag_ids=''):
"""markdown消息
https://work.weixin.qq.com/api/doc#90000/90135/90236/markdown%E6%B6%88%E6%81%AF
> 目前仅支持markdown语法的子集
> 微工作台(原企业号)不支持展示markdown消息
:param agent_id: 企业应用的id,整型。可在应用的设置页面查看
:type agent_id: string
:param content: markdown内容,最长不超过2048个字节,必须是utf8编码
:type content: string
:param user_ids: 成员ID列表(消息接收者,多个接收者用‘|’分隔,最多支持1000个)。
特殊情况:指定为@all,则向关注该企业应用的全部成员发送
:type user_ids: list or tuple or string
:param party_ids: 部门ID列表,最多支持100个。当touser为@all时忽略本参数
:type party_ids: list or tuple or string
:param tag_ids: 标签ID列表,最多支持100个。当touser为@all时忽略本参数
:type tag_ids: list or tuple or string
:return: 接口调用结果
:rtype: dict
"""
msg = {
"msgtype": "markdown",
"markdown": {"content": content}
}
return self._send_message(
agent_id=agent_id,
user_ids=user_ids,
party_ids=party_ids,
tag_ids=tag_ids,
msg=msg
) | def function[send_markdown, parameter[self, agent_id, user_ids, content, party_ids, tag_ids]]:
constant[markdown消息
https://work.weixin.qq.com/api/doc#90000/90135/90236/markdown%E6%B6%88%E6%81%AF
> 目前仅支持markdown语法的子集
> 微工作台(原企业号)不支持展示markdown消息
:param agent_id: 企业应用的id,整型。可在应用的设置页面查看
:type agent_id: string
:param content: markdown内容,最长不超过2048个字节,必须是utf8编码
:type content: string
:param user_ids: 成员ID列表(消息接收者,多个接收者用‘|’分隔,最多支持1000个)。
特殊情况:指定为@all,则向关注该企业应用的全部成员发送
:type user_ids: list or tuple or string
:param party_ids: 部门ID列表,最多支持100个。当touser为@all时忽略本参数
:type party_ids: list or tuple or string
:param tag_ids: 标签ID列表,最多支持100个。当touser为@all时忽略本参数
:type tag_ids: list or tuple or string
:return: 接口调用结果
:rtype: dict
]
variable[msg] assign[=] dictionary[[<ast.Constant object at 0x7da1b217fb50>, <ast.Constant object at 0x7da1b217efe0>], [<ast.Constant object at 0x7da1b217c5b0>, <ast.Dict object at 0x7da1b217eb00>]]
return[call[name[self]._send_message, parameter[]]] | keyword[def] identifier[send_markdown] ( identifier[self] , identifier[agent_id] , identifier[user_ids] , identifier[content] , identifier[party_ids] = literal[string] , identifier[tag_ids] = literal[string] ):
literal[string]
identifier[msg] ={
literal[string] : literal[string] ,
literal[string] :{ literal[string] : identifier[content] }
}
keyword[return] identifier[self] . identifier[_send_message] (
identifier[agent_id] = identifier[agent_id] ,
identifier[user_ids] = identifier[user_ids] ,
identifier[party_ids] = identifier[party_ids] ,
identifier[tag_ids] = identifier[tag_ids] ,
identifier[msg] = identifier[msg]
) | def send_markdown(self, agent_id, user_ids, content, party_ids='', tag_ids=''):
"""markdown消息
https://work.weixin.qq.com/api/doc#90000/90135/90236/markdown%E6%B6%88%E6%81%AF
> 目前仅支持markdown语法的子集
> 微工作台(原企业号)不支持展示markdown消息
:param agent_id: 企业应用的id,整型。可在应用的设置页面查看
:type agent_id: string
:param content: markdown内容,最长不超过2048个字节,必须是utf8编码
:type content: string
:param user_ids: 成员ID列表(消息接收者,多个接收者用‘|’分隔,最多支持1000个)。
特殊情况:指定为@all,则向关注该企业应用的全部成员发送
:type user_ids: list or tuple or string
:param party_ids: 部门ID列表,最多支持100个。当touser为@all时忽略本参数
:type party_ids: list or tuple or string
:param tag_ids: 标签ID列表,最多支持100个。当touser为@all时忽略本参数
:type tag_ids: list or tuple or string
:return: 接口调用结果
:rtype: dict
"""
msg = {'msgtype': 'markdown', 'markdown': {'content': content}}
return self._send_message(agent_id=agent_id, user_ids=user_ids, party_ids=party_ids, tag_ids=tag_ids, msg=msg) |
def process_object(self, obj):
"""Process current object and push it to the ElasticSearch."""
document = self.document_class(meta={'id': self.generate_id(obj)})
for field in document._doc_type.mapping: # pylint: disable=protected-access
if field in ['users_with_permissions', 'groups_with_permissions', 'public_permission']:
continue # These fields are handled separately
try:
# use get_X_value function
get_value_function = getattr(self, 'get_{}_value'.format(field), None)
if get_value_function:
setattr(document, field, get_value_function(obj)) # pylint: disable=not-callable
continue
# use `mapping` dict
if field in self.mapping:
if callable(self.mapping[field]):
setattr(document, field, self.mapping[field](obj))
continue
try:
object_attr = dict_dot(obj, self.mapping[field])
except (KeyError, AttributeError):
object_attr = None
if callable(object_attr):
# use method on object
setattr(document, field, object_attr(obj))
else:
# use attribute on object
setattr(document, field, object_attr)
continue
# get value from the object
try:
object_value = dict_dot(obj, field)
setattr(document, field, object_value)
continue
except KeyError:
pass
raise AttributeError("Cannot determine mapping for field {}".format(field))
except Exception: # pylint: disable=broad-except
logger.exception(
"Error occurred while setting value of field '%s' in '%s' Elasticsearch index.",
field, self.__class__.__name__,
extra={'object_type': self.object_type, 'obj_id': obj.pk}
)
permissions = self.get_permissions(obj)
document.users_with_permissions = permissions['users']
document.groups_with_permissions = permissions['groups']
document.public_permission = permissions['public']
self.push_queue.append(document) | def function[process_object, parameter[self, obj]]:
constant[Process current object and push it to the ElasticSearch.]
variable[document] assign[=] call[name[self].document_class, parameter[]]
for taget[name[field]] in starred[name[document]._doc_type.mapping] begin[:]
if compare[name[field] in list[[<ast.Constant object at 0x7da1b1946890>, <ast.Constant object at 0x7da1b1947400>, <ast.Constant object at 0x7da1b1946c50>]]] begin[:]
continue
<ast.Try object at 0x7da1b1947070>
variable[permissions] assign[=] call[name[self].get_permissions, parameter[name[obj]]]
name[document].users_with_permissions assign[=] call[name[permissions]][constant[users]]
name[document].groups_with_permissions assign[=] call[name[permissions]][constant[groups]]
name[document].public_permission assign[=] call[name[permissions]][constant[public]]
call[name[self].push_queue.append, parameter[name[document]]] | keyword[def] identifier[process_object] ( identifier[self] , identifier[obj] ):
literal[string]
identifier[document] = identifier[self] . identifier[document_class] ( identifier[meta] ={ literal[string] : identifier[self] . identifier[generate_id] ( identifier[obj] )})
keyword[for] identifier[field] keyword[in] identifier[document] . identifier[_doc_type] . identifier[mapping] :
keyword[if] identifier[field] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[continue]
keyword[try] :
identifier[get_value_function] = identifier[getattr] ( identifier[self] , literal[string] . identifier[format] ( identifier[field] ), keyword[None] )
keyword[if] identifier[get_value_function] :
identifier[setattr] ( identifier[document] , identifier[field] , identifier[get_value_function] ( identifier[obj] ))
keyword[continue]
keyword[if] identifier[field] keyword[in] identifier[self] . identifier[mapping] :
keyword[if] identifier[callable] ( identifier[self] . identifier[mapping] [ identifier[field] ]):
identifier[setattr] ( identifier[document] , identifier[field] , identifier[self] . identifier[mapping] [ identifier[field] ]( identifier[obj] ))
keyword[continue]
keyword[try] :
identifier[object_attr] = identifier[dict_dot] ( identifier[obj] , identifier[self] . identifier[mapping] [ identifier[field] ])
keyword[except] ( identifier[KeyError] , identifier[AttributeError] ):
identifier[object_attr] = keyword[None]
keyword[if] identifier[callable] ( identifier[object_attr] ):
identifier[setattr] ( identifier[document] , identifier[field] , identifier[object_attr] ( identifier[obj] ))
keyword[else] :
identifier[setattr] ( identifier[document] , identifier[field] , identifier[object_attr] )
keyword[continue]
keyword[try] :
identifier[object_value] = identifier[dict_dot] ( identifier[obj] , identifier[field] )
identifier[setattr] ( identifier[document] , identifier[field] , identifier[object_value] )
keyword[continue]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[raise] identifier[AttributeError] ( literal[string] . identifier[format] ( identifier[field] ))
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] (
literal[string] ,
identifier[field] , identifier[self] . identifier[__class__] . identifier[__name__] ,
identifier[extra] ={ literal[string] : identifier[self] . identifier[object_type] , literal[string] : identifier[obj] . identifier[pk] }
)
identifier[permissions] = identifier[self] . identifier[get_permissions] ( identifier[obj] )
identifier[document] . identifier[users_with_permissions] = identifier[permissions] [ literal[string] ]
identifier[document] . identifier[groups_with_permissions] = identifier[permissions] [ literal[string] ]
identifier[document] . identifier[public_permission] = identifier[permissions] [ literal[string] ]
identifier[self] . identifier[push_queue] . identifier[append] ( identifier[document] ) | def process_object(self, obj):
"""Process current object and push it to the ElasticSearch."""
document = self.document_class(meta={'id': self.generate_id(obj)})
for field in document._doc_type.mapping: # pylint: disable=protected-access
if field in ['users_with_permissions', 'groups_with_permissions', 'public_permission']:
continue # These fields are handled separately # depends on [control=['if'], data=[]]
try:
# use get_X_value function
get_value_function = getattr(self, 'get_{}_value'.format(field), None)
if get_value_function:
setattr(document, field, get_value_function(obj)) # pylint: disable=not-callable
continue # depends on [control=['if'], data=[]]
# use `mapping` dict
if field in self.mapping:
if callable(self.mapping[field]):
setattr(document, field, self.mapping[field](obj))
continue # depends on [control=['if'], data=[]]
try:
object_attr = dict_dot(obj, self.mapping[field]) # depends on [control=['try'], data=[]]
except (KeyError, AttributeError):
object_attr = None # depends on [control=['except'], data=[]]
if callable(object_attr):
# use method on object
setattr(document, field, object_attr(obj)) # depends on [control=['if'], data=[]]
else:
# use attribute on object
setattr(document, field, object_attr)
continue # depends on [control=['if'], data=['field']]
# get value from the object
try:
object_value = dict_dot(obj, field)
setattr(document, field, object_value)
continue # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
raise AttributeError('Cannot determine mapping for field {}'.format(field)) # depends on [control=['try'], data=[]]
except Exception: # pylint: disable=broad-except
logger.exception("Error occurred while setting value of field '%s' in '%s' Elasticsearch index.", field, self.__class__.__name__, extra={'object_type': self.object_type, 'obj_id': obj.pk}) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['field']]
permissions = self.get_permissions(obj)
document.users_with_permissions = permissions['users']
document.groups_with_permissions = permissions['groups']
document.public_permission = permissions['public']
self.push_queue.append(document) |
def post(self, request, format=None):
"""
Add a new bot
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
serializer = BotSerializer(data=request.data)
if serializer.is_valid():
bot = Bot.objects.create(owner=request.user,
name=serializer.data['name'])
return Response(BotSerializer(bot).data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | def function[post, parameter[self, request, format]]:
constant[
Add a new bot
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
]
variable[serializer] assign[=] call[name[BotSerializer], parameter[]]
if call[name[serializer].is_valid, parameter[]] begin[:]
variable[bot] assign[=] call[name[Bot].objects.create, parameter[]]
return[call[name[Response], parameter[call[name[BotSerializer], parameter[name[bot]]].data]]]
return[call[name[Response], parameter[name[serializer].errors]]] | keyword[def] identifier[post] ( identifier[self] , identifier[request] , identifier[format] = keyword[None] ):
literal[string]
identifier[serializer] = identifier[BotSerializer] ( identifier[data] = identifier[request] . identifier[data] )
keyword[if] identifier[serializer] . identifier[is_valid] ():
identifier[bot] = identifier[Bot] . identifier[objects] . identifier[create] ( identifier[owner] = identifier[request] . identifier[user] ,
identifier[name] = identifier[serializer] . identifier[data] [ literal[string] ])
keyword[return] identifier[Response] ( identifier[BotSerializer] ( identifier[bot] ). identifier[data] , identifier[status] = identifier[status] . identifier[HTTP_201_CREATED] )
keyword[return] identifier[Response] ( identifier[serializer] . identifier[errors] , identifier[status] = identifier[status] . identifier[HTTP_400_BAD_REQUEST] ) | def post(self, request, format=None):
"""
Add a new bot
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
serializer = BotSerializer(data=request.data)
if serializer.is_valid():
bot = Bot.objects.create(owner=request.user, name=serializer.data['name'])
return Response(BotSerializer(bot).data, status=status.HTTP_201_CREATED) # depends on [control=['if'], data=[]]
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) |
def _ParseDateTimeValue(self, byte_stream, file_offset):
"""Parses a CUPS IPP RFC2579 date-time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
dfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value.
Raises:
ParseError: when the RFC2579 date-time value cannot be parsed.
"""
datetime_value_map = self._GetDataTypeMap('cups_ipp_datetime_value')
try:
value = self._ReadStructureFromByteStream(
byte_stream, file_offset, datetime_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse datetime value with error: {0!s}'.format(exception))
direction_from_utc = chr(value.direction_from_utc)
rfc2579_date_time_tuple = (
value.year, value.month, value.day_of_month,
value.hours, value.minutes, value.seconds, value.deciseconds,
direction_from_utc, value.hours_from_utc, value.minutes_from_utc)
return dfdatetime_rfc2579_date_time.RFC2579DateTime(
rfc2579_date_time_tuple=rfc2579_date_time_tuple) | def function[_ParseDateTimeValue, parameter[self, byte_stream, file_offset]]:
constant[Parses a CUPS IPP RFC2579 date-time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
dfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value.
Raises:
ParseError: when the RFC2579 date-time value cannot be parsed.
]
variable[datetime_value_map] assign[=] call[name[self]._GetDataTypeMap, parameter[constant[cups_ipp_datetime_value]]]
<ast.Try object at 0x7da2045649d0>
variable[direction_from_utc] assign[=] call[name[chr], parameter[name[value].direction_from_utc]]
variable[rfc2579_date_time_tuple] assign[=] tuple[[<ast.Attribute object at 0x7da204564ca0>, <ast.Attribute object at 0x7da204565450>, <ast.Attribute object at 0x7da204566410>, <ast.Attribute object at 0x7da2045668f0>, <ast.Attribute object at 0x7da204567790>, <ast.Attribute object at 0x7da204565390>, <ast.Attribute object at 0x7da2045656c0>, <ast.Name object at 0x7da20e9567d0>, <ast.Attribute object at 0x7da20e954a60>, <ast.Attribute object at 0x7da20e955b40>]]
return[call[name[dfdatetime_rfc2579_date_time].RFC2579DateTime, parameter[]]] | keyword[def] identifier[_ParseDateTimeValue] ( identifier[self] , identifier[byte_stream] , identifier[file_offset] ):
literal[string]
identifier[datetime_value_map] = identifier[self] . identifier[_GetDataTypeMap] ( literal[string] )
keyword[try] :
identifier[value] = identifier[self] . identifier[_ReadStructureFromByteStream] (
identifier[byte_stream] , identifier[file_offset] , identifier[datetime_value_map] )
keyword[except] ( identifier[ValueError] , identifier[errors] . identifier[ParseError] ) keyword[as] identifier[exception] :
keyword[raise] identifier[errors] . identifier[ParseError] (
literal[string] . identifier[format] ( identifier[exception] ))
identifier[direction_from_utc] = identifier[chr] ( identifier[value] . identifier[direction_from_utc] )
identifier[rfc2579_date_time_tuple] =(
identifier[value] . identifier[year] , identifier[value] . identifier[month] , identifier[value] . identifier[day_of_month] ,
identifier[value] . identifier[hours] , identifier[value] . identifier[minutes] , identifier[value] . identifier[seconds] , identifier[value] . identifier[deciseconds] ,
identifier[direction_from_utc] , identifier[value] . identifier[hours_from_utc] , identifier[value] . identifier[minutes_from_utc] )
keyword[return] identifier[dfdatetime_rfc2579_date_time] . identifier[RFC2579DateTime] (
identifier[rfc2579_date_time_tuple] = identifier[rfc2579_date_time_tuple] ) | def _ParseDateTimeValue(self, byte_stream, file_offset):
"""Parses a CUPS IPP RFC2579 date-time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
dfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value.
Raises:
ParseError: when the RFC2579 date-time value cannot be parsed.
"""
datetime_value_map = self._GetDataTypeMap('cups_ipp_datetime_value')
try:
value = self._ReadStructureFromByteStream(byte_stream, file_offset, datetime_value_map) # depends on [control=['try'], data=[]]
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse datetime value with error: {0!s}'.format(exception)) # depends on [control=['except'], data=['exception']]
direction_from_utc = chr(value.direction_from_utc)
rfc2579_date_time_tuple = (value.year, value.month, value.day_of_month, value.hours, value.minutes, value.seconds, value.deciseconds, direction_from_utc, value.hours_from_utc, value.minutes_from_utc)
return dfdatetime_rfc2579_date_time.RFC2579DateTime(rfc2579_date_time_tuple=rfc2579_date_time_tuple) |
def delete_namespaced_event(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_event # noqa: E501
delete an Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_event(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_event_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_namespaced_event_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | def function[delete_namespaced_event, parameter[self, name, namespace]]:
constant[delete_namespaced_event # noqa: E501
delete an Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_event(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].delete_namespaced_event_with_http_info, parameter[name[name], name[namespace]]]] | keyword[def] identifier[delete_namespaced_event] ( identifier[self] , identifier[name] , identifier[namespace] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[delete_namespaced_event_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[delete_namespaced_event_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[return] identifier[data] | def delete_namespaced_event(self, name, namespace, **kwargs): # noqa: E501
'delete_namespaced_event # noqa: E501\n\n delete an Event # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_namespaced_event(name, namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the Event (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param str pretty: If \'true\', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.\n :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object\'s finalizers list. Either this field or PropagationPolicy may be set, but not both.\n :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: \'Orphan\' - orphan the dependents; \'Background\' - allow the garbage collector to delete the dependents in the background; \'Foreground\' - a cascading policy that deletes all dependents in the foreground.\n :param V1DeleteOptions body:\n :return: V1Status\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_event_with_http_info(name, namespace, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.delete_namespaced_event_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def _login_azure_app_token(client_id=None, client_secret=None, tenant_id=None):
"""
Authenticate APP using token credentials:
https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python
:return: ~ServicePrincipalCredentials credentials
"""
client_id = os.getenv('AZURE_CLIENT_ID') if not client_id else client_id
client_secret = os.getenv('AZURE_CLIENT_SECRET') if not client_secret else client_secret
tenant_id = os.getenv('AZURE_TENANT_ID') if not tenant_id else tenant_id
credentials = ServicePrincipalCredentials(
client_id=client_id,
secret=client_secret,
tenant=tenant_id,
)
return credentials | def function[_login_azure_app_token, parameter[client_id, client_secret, tenant_id]]:
constant[
Authenticate APP using token credentials:
https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python
:return: ~ServicePrincipalCredentials credentials
]
variable[client_id] assign[=] <ast.IfExp object at 0x7da1b195ec50>
variable[client_secret] assign[=] <ast.IfExp object at 0x7da1b195d900>
variable[tenant_id] assign[=] <ast.IfExp object at 0x7da2041d9c00>
variable[credentials] assign[=] call[name[ServicePrincipalCredentials], parameter[]]
return[name[credentials]] | keyword[def] identifier[_login_azure_app_token] ( identifier[client_id] = keyword[None] , identifier[client_secret] = keyword[None] , identifier[tenant_id] = keyword[None] ):
literal[string]
identifier[client_id] = identifier[os] . identifier[getenv] ( literal[string] ) keyword[if] keyword[not] identifier[client_id] keyword[else] identifier[client_id]
identifier[client_secret] = identifier[os] . identifier[getenv] ( literal[string] ) keyword[if] keyword[not] identifier[client_secret] keyword[else] identifier[client_secret]
identifier[tenant_id] = identifier[os] . identifier[getenv] ( literal[string] ) keyword[if] keyword[not] identifier[tenant_id] keyword[else] identifier[tenant_id]
identifier[credentials] = identifier[ServicePrincipalCredentials] (
identifier[client_id] = identifier[client_id] ,
identifier[secret] = identifier[client_secret] ,
identifier[tenant] = identifier[tenant_id] ,
)
keyword[return] identifier[credentials] | def _login_azure_app_token(client_id=None, client_secret=None, tenant_id=None):
"""
Authenticate APP using token credentials:
https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python
:return: ~ServicePrincipalCredentials credentials
"""
client_id = os.getenv('AZURE_CLIENT_ID') if not client_id else client_id
client_secret = os.getenv('AZURE_CLIENT_SECRET') if not client_secret else client_secret
tenant_id = os.getenv('AZURE_TENANT_ID') if not tenant_id else tenant_id
credentials = ServicePrincipalCredentials(client_id=client_id, secret=client_secret, tenant=tenant_id)
return credentials |
def _my_eps_formatter(logodata, format, ordered_alphabets) :
""" Generate a logo in Encapsulated Postscript (EPS)
Modified from weblogo version 3.4 source code.
*ordered_alphabets* is a dictionary keyed by zero-indexed
consecutive sites, with values giving order of characters
from bottom to top.
"""
substitutions = {}
from_format =[
"creation_date", "logo_width", "logo_height",
"lines_per_logo", "line_width", "line_height",
"line_margin_right","line_margin_left", "line_margin_bottom",
"line_margin_top", "title_height", "xaxis_label_height",
"creator_text", "logo_title", "logo_margin",
"stroke_width", "tic_length",
"stacks_per_line", "stack_margin",
"yaxis_label", "yaxis_tic_interval", "yaxis_minor_tic_interval",
"xaxis_label", "xaxis_tic_interval", "number_interval",
"fineprint", "shrink_fraction", "errorbar_fraction",
"errorbar_width_fraction",
"errorbar_gray", "small_fontsize", "fontsize",
"title_fontsize", "number_fontsize", "text_font",
"logo_font", "title_font",
"logo_label", "yaxis_scale", "end_type",
"debug", "show_title", "show_xaxis",
"show_xaxis_label", "show_yaxis", "show_yaxis_label",
"show_boxes", "show_errorbars", "show_fineprint",
"rotate_numbers", "show_ends", "stack_height",
"stack_width"
]
for s in from_format :
substitutions[s] = getattr(format,s)
substitutions["shrink"] = str(format.show_boxes).lower()
# --------- COLORS --------------
def format_color(color):
return " ".join( ("[",str(color.red) , str(color.green),
str(color.blue), "]"))
substitutions["default_color"] = format_color(format.default_color)
colors = []
if hasattr(format.color_scheme, 'rules'):
grouplist = format.color_scheme.rules
else:
# this line needed for weblogo 3.4
grouplist = format.color_scheme.groups
for group in grouplist:
cf = format_color(group.color)
for s in group.symbols :
colors.append( " ("+s+") " + cf )
substitutions["color_dict"] = "\n".join(colors)
data = []
# Unit conversion. 'None' for probability units
conv_factor = None #JDB
#JDB conv_factor = std_units[format.unit_name]
data.append("StartLine")
seq_from = format.logo_start- format.first_index
seq_to = format.logo_end - format.first_index +1
# seq_index : zero based index into sequence data
# logo_index : User visible coordinate, first_index based
# stack_index : zero based index of visible stacks
for seq_index in range(seq_from, seq_to) :
logo_index = seq_index + format.first_index
stack_index = seq_index - seq_from
if stack_index!=0 and (stack_index % format.stacks_per_line) ==0 :
data.append("")
data.append("EndLine")
data.append("StartLine")
data.append("")
data.append("(%s) StartStack" % format.annotate[seq_index] )
if conv_factor:
stack_height = logodata.entropy[seq_index] * std_units[format.unit_name]
else :
stack_height = 1.0 # Probability
# The following code modified by JDB to use ordered_alphabets
# and also to replace the "blank" characters 'b' and 'B'
# by spaces.
s_d = dict(zip(logodata.alphabet, logodata.counts[seq_index]))
s = []
for aa in ordered_alphabets[seq_index]:
if aa not in ['B', 'b']:
s.append((s_d[aa], aa))
else:
s.append((s_d[aa], ' '))
# s = [(s_d[aa], aa) for aa in ordered_alphabets[seq_index]]
# Sort by frequency. If equal frequency then reverse alphabetic
# (So sort reverse alphabetic first, then frequencty)
# TODO: doublecheck this actual works
#s = list(zip(logodata.counts[seq_index], logodata.alphabet))
#s.sort(key= lambda x: x[1])
#s.reverse()
#s.sort(key= lambda x: x[0])
#if not format.reverse_stacks: s.reverse()
C = float(sum(logodata.counts[seq_index]))
if C > 0.0 :
fraction_width = 1.0
if format.scale_width :
fraction_width = logodata.weight[seq_index]
# print(fraction_width, file=sys.stderr)
for c in s:
data.append(" %f %f (%s) ShowSymbol" % (fraction_width, c[0]*stack_height/C, c[1]) )
# Draw error bar on top of logo. Replaced by DrawErrorbarFirst above.
if logodata.entropy_interval is not None and conv_factor and C>0.0:
low, high = logodata.entropy_interval[seq_index]
center = logodata.entropy[seq_index]
low *= conv_factor
high *= conv_factor
center *=conv_factor
if high> format.yaxis_scale : high = format.yaxis_scale
down = (center - low)
up = (high - center)
data.append(" %f %f DrawErrorbar" % (down, up) )
data.append("EndStack")
data.append("")
data.append("EndLine")
substitutions["logo_data"] = "\n".join(data)
# Create and output logo
template = corebio.utils.resource_string( __name__, '_weblogo_template.eps', __file__).decode()
logo = string.Template(template).substitute(substitutions)
return logo.encode() | def function[_my_eps_formatter, parameter[logodata, format, ordered_alphabets]]:
constant[ Generate a logo in Encapsulated Postscript (EPS)
Modified from weblogo version 3.4 source code.
*ordered_alphabets* is a dictionary keyed by zero-indexed
consecutive sites, with values giving order of characters
from bottom to top.
]
variable[substitutions] assign[=] dictionary[[], []]
variable[from_format] assign[=] list[[<ast.Constant object at 0x7da20e9b1a20>, <ast.Constant object at 0x7da20e9b16c0>, <ast.Constant object at 0x7da20e9b0610>, <ast.Constant object at 0x7da20e9b32e0>, <ast.Constant object at 0x7da20e9b3b20>, <ast.Constant object at 0x7da20e9b3340>, <ast.Constant object at 0x7da20e9b3220>, <ast.Constant object at 0x7da20e9b07c0>, <ast.Constant object at 0x7da20e9b14b0>, <ast.Constant object at 0x7da20e9b0be0>, <ast.Constant object at 0x7da20e9b2890>, <ast.Constant object at 0x7da20e9b2080>, <ast.Constant object at 0x7da20e9b34c0>, <ast.Constant object at 0x7da20e9b3c40>, <ast.Constant object at 0x7da20e9b0790>, <ast.Constant object at 0x7da20e9b2770>, <ast.Constant object at 0x7da20e9b1120>, <ast.Constant object at 0x7da20e9b0520>, <ast.Constant object at 0x7da20e9b3e80>, <ast.Constant object at 0x7da20e9b1180>, <ast.Constant object at 0x7da20e9b2cb0>, <ast.Constant object at 0x7da20e9b3610>, <ast.Constant object at 0x7da20e9b1ff0>, <ast.Constant object at 0x7da20e9b2590>, <ast.Constant object at 0x7da20e9b0df0>, <ast.Constant object at 0x7da20e9b38b0>, <ast.Constant object at 0x7da20e9b02e0>, <ast.Constant object at 0x7da20e9b0970>, <ast.Constant object at 0x7da20e9b1360>, <ast.Constant object at 0x7da20e9b0e50>, <ast.Constant object at 0x7da20e9b0cd0>, <ast.Constant object at 0x7da20e9b0f10>, <ast.Constant object at 0x7da20e9b0220>, <ast.Constant object at 0x7da20e9b0430>, <ast.Constant object at 0x7da20e9b1ab0>, <ast.Constant object at 0x7da20e9b0100>, <ast.Constant object at 0x7da20e9b0eb0>, <ast.Constant object at 0x7da20e9b08e0>, <ast.Constant object at 0x7da20e9b0070>, <ast.Constant object at 0x7da20e9b0a60>, <ast.Constant object at 0x7da20e9b2680>, <ast.Constant object at 0x7da20e9b3910>, <ast.Constant object at 0x7da20e9b0ac0>, <ast.Constant object at 0x7da20e9b2d40>, <ast.Constant object at 0x7da20e9b0370>, <ast.Constant object at 0x7da20e9b1ea0>, <ast.Constant object at 0x7da20e9b2bf0>, <ast.Constant object at 0x7da20e9b0e80>, <ast.Constant object at 0x7da20e9b20e0>, <ast.Constant object at 0x7da20e9b1a80>, <ast.Constant object at 0x7da20e9b1ba0>, <ast.Constant object at 0x7da20e9b25f0>, <ast.Constant object at 0x7da20e9b0d00>]]
for taget[name[s]] in starred[name[from_format]] begin[:]
call[name[substitutions]][name[s]] assign[=] call[name[getattr], parameter[name[format], name[s]]]
call[name[substitutions]][constant[shrink]] assign[=] call[call[name[str], parameter[name[format].show_boxes]].lower, parameter[]]
def function[format_color, parameter[color]]:
return[call[constant[ ].join, parameter[tuple[[<ast.Constant object at 0x7da20e9b1c60>, <ast.Call object at 0x7da20e9b01f0>, <ast.Call object at 0x7da20e9b0280>, <ast.Call object at 0x7da20e9b0670>, <ast.Constant object at 0x7da20e9b04c0>]]]]]
call[name[substitutions]][constant[default_color]] assign[=] call[name[format_color], parameter[name[format].default_color]]
variable[colors] assign[=] list[[]]
if call[name[hasattr], parameter[name[format].color_scheme, constant[rules]]] begin[:]
variable[grouplist] assign[=] name[format].color_scheme.rules
for taget[name[group]] in starred[name[grouplist]] begin[:]
variable[cf] assign[=] call[name[format_color], parameter[name[group].color]]
for taget[name[s]] in starred[name[group].symbols] begin[:]
call[name[colors].append, parameter[binary_operation[binary_operation[binary_operation[constant[ (] + name[s]] + constant[) ]] + name[cf]]]]
call[name[substitutions]][constant[color_dict]] assign[=] call[constant[
].join, parameter[name[colors]]]
variable[data] assign[=] list[[]]
variable[conv_factor] assign[=] constant[None]
call[name[data].append, parameter[constant[StartLine]]]
variable[seq_from] assign[=] binary_operation[name[format].logo_start - name[format].first_index]
variable[seq_to] assign[=] binary_operation[binary_operation[name[format].logo_end - name[format].first_index] + constant[1]]
for taget[name[seq_index]] in starred[call[name[range], parameter[name[seq_from], name[seq_to]]]] begin[:]
variable[logo_index] assign[=] binary_operation[name[seq_index] + name[format].first_index]
variable[stack_index] assign[=] binary_operation[name[seq_index] - name[seq_from]]
if <ast.BoolOp object at 0x7da20e9b3a30> begin[:]
call[name[data].append, parameter[constant[]]]
call[name[data].append, parameter[constant[EndLine]]]
call[name[data].append, parameter[constant[StartLine]]]
call[name[data].append, parameter[constant[]]]
call[name[data].append, parameter[binary_operation[constant[(%s) StartStack] <ast.Mod object at 0x7da2590d6920> call[name[format].annotate][name[seq_index]]]]]
if name[conv_factor] begin[:]
variable[stack_height] assign[=] binary_operation[call[name[logodata].entropy][name[seq_index]] * call[name[std_units]][name[format].unit_name]]
variable[s_d] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[logodata].alphabet, call[name[logodata].counts][name[seq_index]]]]]]
variable[s] assign[=] list[[]]
for taget[name[aa]] in starred[call[name[ordered_alphabets]][name[seq_index]]] begin[:]
if compare[name[aa] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b26ad330>, <ast.Constant object at 0x7da1b26ae560>]]] begin[:]
call[name[s].append, parameter[tuple[[<ast.Subscript object at 0x7da1b26ad1b0>, <ast.Name object at 0x7da1b26acf70>]]]]
variable[C] assign[=] call[name[float], parameter[call[name[sum], parameter[call[name[logodata].counts][name[seq_index]]]]]]
if compare[name[C] greater[>] constant[0.0]] begin[:]
variable[fraction_width] assign[=] constant[1.0]
if name[format].scale_width begin[:]
variable[fraction_width] assign[=] call[name[logodata].weight][name[seq_index]]
for taget[name[c]] in starred[name[s]] begin[:]
call[name[data].append, parameter[binary_operation[constant[ %f %f (%s) ShowSymbol] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26af2e0>, <ast.BinOp object at 0x7da1b26ae7d0>, <ast.Subscript object at 0x7da1b26ac1f0>]]]]]
if <ast.BoolOp object at 0x7da1b26afa90> begin[:]
<ast.Tuple object at 0x7da1b26ad660> assign[=] call[name[logodata].entropy_interval][name[seq_index]]
variable[center] assign[=] call[name[logodata].entropy][name[seq_index]]
<ast.AugAssign object at 0x7da1b26ae680>
<ast.AugAssign object at 0x7da1b26af340>
<ast.AugAssign object at 0x7da1b26acb50>
if compare[name[high] greater[>] name[format].yaxis_scale] begin[:]
variable[high] assign[=] name[format].yaxis_scale
variable[down] assign[=] binary_operation[name[center] - name[low]]
variable[up] assign[=] binary_operation[name[high] - name[center]]
call[name[data].append, parameter[binary_operation[constant[ %f %f DrawErrorbar] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26ac5e0>, <ast.Name object at 0x7da1b26addb0>]]]]]
call[name[data].append, parameter[constant[EndStack]]]
call[name[data].append, parameter[constant[]]]
call[name[data].append, parameter[constant[EndLine]]]
call[name[substitutions]][constant[logo_data]] assign[=] call[constant[
].join, parameter[name[data]]]
variable[template] assign[=] call[call[name[corebio].utils.resource_string, parameter[name[__name__], constant[_weblogo_template.eps], name[__file__]]].decode, parameter[]]
variable[logo] assign[=] call[call[name[string].Template, parameter[name[template]]].substitute, parameter[name[substitutions]]]
return[call[name[logo].encode, parameter[]]] | keyword[def] identifier[_my_eps_formatter] ( identifier[logodata] , identifier[format] , identifier[ordered_alphabets] ):
literal[string]
identifier[substitutions] ={}
identifier[from_format] =[
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string]
]
keyword[for] identifier[s] keyword[in] identifier[from_format] :
identifier[substitutions] [ identifier[s] ]= identifier[getattr] ( identifier[format] , identifier[s] )
identifier[substitutions] [ literal[string] ]= identifier[str] ( identifier[format] . identifier[show_boxes] ). identifier[lower] ()
keyword[def] identifier[format_color] ( identifier[color] ):
keyword[return] literal[string] . identifier[join] (( literal[string] , identifier[str] ( identifier[color] . identifier[red] ), identifier[str] ( identifier[color] . identifier[green] ),
identifier[str] ( identifier[color] . identifier[blue] ), literal[string] ))
identifier[substitutions] [ literal[string] ]= identifier[format_color] ( identifier[format] . identifier[default_color] )
identifier[colors] =[]
keyword[if] identifier[hasattr] ( identifier[format] . identifier[color_scheme] , literal[string] ):
identifier[grouplist] = identifier[format] . identifier[color_scheme] . identifier[rules]
keyword[else] :
identifier[grouplist] = identifier[format] . identifier[color_scheme] . identifier[groups]
keyword[for] identifier[group] keyword[in] identifier[grouplist] :
identifier[cf] = identifier[format_color] ( identifier[group] . identifier[color] )
keyword[for] identifier[s] keyword[in] identifier[group] . identifier[symbols] :
identifier[colors] . identifier[append] ( literal[string] + identifier[s] + literal[string] + identifier[cf] )
identifier[substitutions] [ literal[string] ]= literal[string] . identifier[join] ( identifier[colors] )
identifier[data] =[]
identifier[conv_factor] = keyword[None]
identifier[data] . identifier[append] ( literal[string] )
identifier[seq_from] = identifier[format] . identifier[logo_start] - identifier[format] . identifier[first_index]
identifier[seq_to] = identifier[format] . identifier[logo_end] - identifier[format] . identifier[first_index] + literal[int]
keyword[for] identifier[seq_index] keyword[in] identifier[range] ( identifier[seq_from] , identifier[seq_to] ):
identifier[logo_index] = identifier[seq_index] + identifier[format] . identifier[first_index]
identifier[stack_index] = identifier[seq_index] - identifier[seq_from]
keyword[if] identifier[stack_index] != literal[int] keyword[and] ( identifier[stack_index] % identifier[format] . identifier[stacks_per_line] )== literal[int] :
identifier[data] . identifier[append] ( literal[string] )
identifier[data] . identifier[append] ( literal[string] )
identifier[data] . identifier[append] ( literal[string] )
identifier[data] . identifier[append] ( literal[string] )
identifier[data] . identifier[append] ( literal[string] % identifier[format] . identifier[annotate] [ identifier[seq_index] ])
keyword[if] identifier[conv_factor] :
identifier[stack_height] = identifier[logodata] . identifier[entropy] [ identifier[seq_index] ]* identifier[std_units] [ identifier[format] . identifier[unit_name] ]
keyword[else] :
identifier[stack_height] = literal[int]
identifier[s_d] = identifier[dict] ( identifier[zip] ( identifier[logodata] . identifier[alphabet] , identifier[logodata] . identifier[counts] [ identifier[seq_index] ]))
identifier[s] =[]
keyword[for] identifier[aa] keyword[in] identifier[ordered_alphabets] [ identifier[seq_index] ]:
keyword[if] identifier[aa] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
identifier[s] . identifier[append] (( identifier[s_d] [ identifier[aa] ], identifier[aa] ))
keyword[else] :
identifier[s] . identifier[append] (( identifier[s_d] [ identifier[aa] ], literal[string] ))
identifier[C] = identifier[float] ( identifier[sum] ( identifier[logodata] . identifier[counts] [ identifier[seq_index] ]))
keyword[if] identifier[C] > literal[int] :
identifier[fraction_width] = literal[int]
keyword[if] identifier[format] . identifier[scale_width] :
identifier[fraction_width] = identifier[logodata] . identifier[weight] [ identifier[seq_index] ]
keyword[for] identifier[c] keyword[in] identifier[s] :
identifier[data] . identifier[append] ( literal[string] %( identifier[fraction_width] , identifier[c] [ literal[int] ]* identifier[stack_height] / identifier[C] , identifier[c] [ literal[int] ]))
keyword[if] identifier[logodata] . identifier[entropy_interval] keyword[is] keyword[not] keyword[None] keyword[and] identifier[conv_factor] keyword[and] identifier[C] > literal[int] :
identifier[low] , identifier[high] = identifier[logodata] . identifier[entropy_interval] [ identifier[seq_index] ]
identifier[center] = identifier[logodata] . identifier[entropy] [ identifier[seq_index] ]
identifier[low] *= identifier[conv_factor]
identifier[high] *= identifier[conv_factor]
identifier[center] *= identifier[conv_factor]
keyword[if] identifier[high] > identifier[format] . identifier[yaxis_scale] : identifier[high] = identifier[format] . identifier[yaxis_scale]
identifier[down] =( identifier[center] - identifier[low] )
identifier[up] =( identifier[high] - identifier[center] )
identifier[data] . identifier[append] ( literal[string] %( identifier[down] , identifier[up] ))
identifier[data] . identifier[append] ( literal[string] )
identifier[data] . identifier[append] ( literal[string] )
identifier[data] . identifier[append] ( literal[string] )
identifier[substitutions] [ literal[string] ]= literal[string] . identifier[join] ( identifier[data] )
identifier[template] = identifier[corebio] . identifier[utils] . identifier[resource_string] ( identifier[__name__] , literal[string] , identifier[__file__] ). identifier[decode] ()
identifier[logo] = identifier[string] . identifier[Template] ( identifier[template] ). identifier[substitute] ( identifier[substitutions] )
keyword[return] identifier[logo] . identifier[encode] () | def _my_eps_formatter(logodata, format, ordered_alphabets):
""" Generate a logo in Encapsulated Postscript (EPS)
Modified from weblogo version 3.4 source code.
*ordered_alphabets* is a dictionary keyed by zero-indexed
consecutive sites, with values giving order of characters
from bottom to top.
"""
substitutions = {}
from_format = ['creation_date', 'logo_width', 'logo_height', 'lines_per_logo', 'line_width', 'line_height', 'line_margin_right', 'line_margin_left', 'line_margin_bottom', 'line_margin_top', 'title_height', 'xaxis_label_height', 'creator_text', 'logo_title', 'logo_margin', 'stroke_width', 'tic_length', 'stacks_per_line', 'stack_margin', 'yaxis_label', 'yaxis_tic_interval', 'yaxis_minor_tic_interval', 'xaxis_label', 'xaxis_tic_interval', 'number_interval', 'fineprint', 'shrink_fraction', 'errorbar_fraction', 'errorbar_width_fraction', 'errorbar_gray', 'small_fontsize', 'fontsize', 'title_fontsize', 'number_fontsize', 'text_font', 'logo_font', 'title_font', 'logo_label', 'yaxis_scale', 'end_type', 'debug', 'show_title', 'show_xaxis', 'show_xaxis_label', 'show_yaxis', 'show_yaxis_label', 'show_boxes', 'show_errorbars', 'show_fineprint', 'rotate_numbers', 'show_ends', 'stack_height', 'stack_width']
for s in from_format:
substitutions[s] = getattr(format, s) # depends on [control=['for'], data=['s']]
substitutions['shrink'] = str(format.show_boxes).lower()
# --------- COLORS --------------
def format_color(color):
return ' '.join(('[', str(color.red), str(color.green), str(color.blue), ']'))
substitutions['default_color'] = format_color(format.default_color)
colors = []
if hasattr(format.color_scheme, 'rules'):
grouplist = format.color_scheme.rules # depends on [control=['if'], data=[]]
else:
# this line needed for weblogo 3.4
grouplist = format.color_scheme.groups
for group in grouplist:
cf = format_color(group.color)
for s in group.symbols:
colors.append(' (' + s + ') ' + cf) # depends on [control=['for'], data=['s']] # depends on [control=['for'], data=['group']]
substitutions['color_dict'] = '\n'.join(colors)
data = []
# Unit conversion. 'None' for probability units
conv_factor = None #JDB
#JDB conv_factor = std_units[format.unit_name]
data.append('StartLine')
seq_from = format.logo_start - format.first_index
seq_to = format.logo_end - format.first_index + 1
# seq_index : zero based index into sequence data
# logo_index : User visible coordinate, first_index based
# stack_index : zero based index of visible stacks
for seq_index in range(seq_from, seq_to):
logo_index = seq_index + format.first_index
stack_index = seq_index - seq_from
if stack_index != 0 and stack_index % format.stacks_per_line == 0:
data.append('')
data.append('EndLine')
data.append('StartLine')
data.append('') # depends on [control=['if'], data=[]]
data.append('(%s) StartStack' % format.annotate[seq_index])
if conv_factor:
stack_height = logodata.entropy[seq_index] * std_units[format.unit_name] # depends on [control=['if'], data=[]]
else:
stack_height = 1.0 # Probability
# The following code modified by JDB to use ordered_alphabets
# and also to replace the "blank" characters 'b' and 'B'
# by spaces.
s_d = dict(zip(logodata.alphabet, logodata.counts[seq_index]))
s = []
for aa in ordered_alphabets[seq_index]:
if aa not in ['B', 'b']:
s.append((s_d[aa], aa)) # depends on [control=['if'], data=['aa']]
else:
s.append((s_d[aa], ' ')) # depends on [control=['for'], data=['aa']]
# s = [(s_d[aa], aa) for aa in ordered_alphabets[seq_index]]
# Sort by frequency. If equal frequency then reverse alphabetic
# (So sort reverse alphabetic first, then frequencty)
# TODO: doublecheck this actual works
#s = list(zip(logodata.counts[seq_index], logodata.alphabet))
#s.sort(key= lambda x: x[1])
#s.reverse()
#s.sort(key= lambda x: x[0])
#if not format.reverse_stacks: s.reverse()
C = float(sum(logodata.counts[seq_index]))
if C > 0.0:
fraction_width = 1.0
if format.scale_width:
fraction_width = logodata.weight[seq_index] # depends on [control=['if'], data=[]]
# print(fraction_width, file=sys.stderr)
for c in s:
data.append(' %f %f (%s) ShowSymbol' % (fraction_width, c[0] * stack_height / C, c[1])) # depends on [control=['for'], data=['c']] # depends on [control=['if'], data=['C']]
# Draw error bar on top of logo. Replaced by DrawErrorbarFirst above.
if logodata.entropy_interval is not None and conv_factor and (C > 0.0):
(low, high) = logodata.entropy_interval[seq_index]
center = logodata.entropy[seq_index]
low *= conv_factor
high *= conv_factor
center *= conv_factor
if high > format.yaxis_scale:
high = format.yaxis_scale # depends on [control=['if'], data=['high']]
down = center - low
up = high - center
data.append(' %f %f DrawErrorbar' % (down, up)) # depends on [control=['if'], data=[]]
data.append('EndStack')
data.append('') # depends on [control=['for'], data=['seq_index']]
data.append('EndLine')
substitutions['logo_data'] = '\n'.join(data)
# Create and output logo
template = corebio.utils.resource_string(__name__, '_weblogo_template.eps', __file__).decode()
logo = string.Template(template).substitute(substitutions)
return logo.encode() |
def wait_any(futures, timeout=None):
'''Wait for the completion of any (the first) one of multiple futures
:param list futures: A list of :class:`Future`\s
:param timeout:
The maximum time to wait. With ``None``, will block indefinitely.
:type timeout: float or None
:returns:
One of the futures from the provided list -- the first one to become
complete (or any of the ones that were already complete).
:raises WaitTimeout: if a timeout is provided and hit
'''
for fut in futures:
if fut.complete:
return fut
wait = _Wait(futures)
for fut in futures:
fut._waits.add(wait)
if wait.done.wait(timeout):
raise errors.WaitTimeout()
return wait.completed_future | def function[wait_any, parameter[futures, timeout]]:
constant[Wait for the completion of any (the first) one of multiple futures
:param list futures: A list of :class:`Future`\s
:param timeout:
The maximum time to wait. With ``None``, will block indefinitely.
:type timeout: float or None
:returns:
One of the futures from the provided list -- the first one to become
complete (or any of the ones that were already complete).
:raises WaitTimeout: if a timeout is provided and hit
]
for taget[name[fut]] in starred[name[futures]] begin[:]
if name[fut].complete begin[:]
return[name[fut]]
variable[wait] assign[=] call[name[_Wait], parameter[name[futures]]]
for taget[name[fut]] in starred[name[futures]] begin[:]
call[name[fut]._waits.add, parameter[name[wait]]]
if call[name[wait].done.wait, parameter[name[timeout]]] begin[:]
<ast.Raise object at 0x7da2054a6890>
return[name[wait].completed_future] | keyword[def] identifier[wait_any] ( identifier[futures] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[for] identifier[fut] keyword[in] identifier[futures] :
keyword[if] identifier[fut] . identifier[complete] :
keyword[return] identifier[fut]
identifier[wait] = identifier[_Wait] ( identifier[futures] )
keyword[for] identifier[fut] keyword[in] identifier[futures] :
identifier[fut] . identifier[_waits] . identifier[add] ( identifier[wait] )
keyword[if] identifier[wait] . identifier[done] . identifier[wait] ( identifier[timeout] ):
keyword[raise] identifier[errors] . identifier[WaitTimeout] ()
keyword[return] identifier[wait] . identifier[completed_future] | def wait_any(futures, timeout=None):
"""Wait for the completion of any (the first) one of multiple futures
:param list futures: A list of :class:`Future`\\s
:param timeout:
The maximum time to wait. With ``None``, will block indefinitely.
:type timeout: float or None
:returns:
One of the futures from the provided list -- the first one to become
complete (or any of the ones that were already complete).
:raises WaitTimeout: if a timeout is provided and hit
"""
for fut in futures:
if fut.complete:
return fut # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fut']]
wait = _Wait(futures)
for fut in futures:
fut._waits.add(wait) # depends on [control=['for'], data=['fut']]
if wait.done.wait(timeout):
raise errors.WaitTimeout() # depends on [control=['if'], data=[]]
return wait.completed_future |
def v_unique_name_leaf_list(ctx, stmt):
"""Make sure config true leaf-lists do nothave duplicate defaults"""
if not stmt.i_config:
return
seen = []
for defval in stmt.i_default:
if defval in seen:
err_add(ctx.errors, stmt.pos, 'DUPLICATE_DEFAULT', (defval))
else:
seen.append(defval) | def function[v_unique_name_leaf_list, parameter[ctx, stmt]]:
constant[Make sure config true leaf-lists do nothave duplicate defaults]
if <ast.UnaryOp object at 0x7da204567880> begin[:]
return[None]
variable[seen] assign[=] list[[]]
for taget[name[defval]] in starred[name[stmt].i_default] begin[:]
if compare[name[defval] in name[seen]] begin[:]
call[name[err_add], parameter[name[ctx].errors, name[stmt].pos, constant[DUPLICATE_DEFAULT], name[defval]]] | keyword[def] identifier[v_unique_name_leaf_list] ( identifier[ctx] , identifier[stmt] ):
literal[string]
keyword[if] keyword[not] identifier[stmt] . identifier[i_config] :
keyword[return]
identifier[seen] =[]
keyword[for] identifier[defval] keyword[in] identifier[stmt] . identifier[i_default] :
keyword[if] identifier[defval] keyword[in] identifier[seen] :
identifier[err_add] ( identifier[ctx] . identifier[errors] , identifier[stmt] . identifier[pos] , literal[string] ,( identifier[defval] ))
keyword[else] :
identifier[seen] . identifier[append] ( identifier[defval] ) | def v_unique_name_leaf_list(ctx, stmt):
"""Make sure config true leaf-lists do nothave duplicate defaults"""
if not stmt.i_config:
return # depends on [control=['if'], data=[]]
seen = []
for defval in stmt.i_default:
if defval in seen:
err_add(ctx.errors, stmt.pos, 'DUPLICATE_DEFAULT', defval) # depends on [control=['if'], data=['defval']]
else:
seen.append(defval) # depends on [control=['for'], data=['defval']] |
def from_json(cls, data):
"""Creat datetime from a dictionary.
Args:
data: {
'month': A value for month between 1-12. (Defualt: 1)
'day': A value for day between 1-31. (Defualt: 1)
'hour': A value for hour between 0-23. (Defualt: 0)
'minute': A value for month between 0-59. (Defualt: 0)
}
"""
if 'month' not in data:
data['month'] = 1
if 'day' not in data:
data['day'] = 1
if 'hour' not in data:
data['hour'] = 0
if 'minute' not in data:
data['minute'] = 0
if 'year' not in data:
data['year'] = 2017
leap_year = True if int(data['year']) == 2016 else False
return cls(data['month'], data['day'], data['hour'], data['minute'], leap_year) | def function[from_json, parameter[cls, data]]:
constant[Creat datetime from a dictionary.
Args:
data: {
'month': A value for month between 1-12. (Defualt: 1)
'day': A value for day between 1-31. (Defualt: 1)
'hour': A value for hour between 0-23. (Defualt: 0)
'minute': A value for month between 0-59. (Defualt: 0)
}
]
if compare[constant[month] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
call[name[data]][constant[month]] assign[=] constant[1]
if compare[constant[day] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
call[name[data]][constant[day]] assign[=] constant[1]
if compare[constant[hour] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
call[name[data]][constant[hour]] assign[=] constant[0]
if compare[constant[minute] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
call[name[data]][constant[minute]] assign[=] constant[0]
if compare[constant[year] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
call[name[data]][constant[year]] assign[=] constant[2017]
variable[leap_year] assign[=] <ast.IfExp object at 0x7da1b1267250>
return[call[name[cls], parameter[call[name[data]][constant[month]], call[name[data]][constant[day]], call[name[data]][constant[hour]], call[name[data]][constant[minute]], name[leap_year]]]] | keyword[def] identifier[from_json] ( identifier[cls] , identifier[data] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] :
identifier[data] [ literal[string] ]= literal[int]
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] :
identifier[data] [ literal[string] ]= literal[int]
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] :
identifier[data] [ literal[string] ]= literal[int]
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] :
identifier[data] [ literal[string] ]= literal[int]
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] :
identifier[data] [ literal[string] ]= literal[int]
identifier[leap_year] = keyword[True] keyword[if] identifier[int] ( identifier[data] [ literal[string] ])== literal[int] keyword[else] keyword[False]
keyword[return] identifier[cls] ( identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[leap_year] ) | def from_json(cls, data):
"""Creat datetime from a dictionary.
Args:
data: {
'month': A value for month between 1-12. (Defualt: 1)
'day': A value for day between 1-31. (Defualt: 1)
'hour': A value for hour between 0-23. (Defualt: 0)
'minute': A value for month between 0-59. (Defualt: 0)
}
"""
if 'month' not in data:
data['month'] = 1 # depends on [control=['if'], data=['data']]
if 'day' not in data:
data['day'] = 1 # depends on [control=['if'], data=['data']]
if 'hour' not in data:
data['hour'] = 0 # depends on [control=['if'], data=['data']]
if 'minute' not in data:
data['minute'] = 0 # depends on [control=['if'], data=['data']]
if 'year' not in data:
data['year'] = 2017 # depends on [control=['if'], data=['data']]
leap_year = True if int(data['year']) == 2016 else False
return cls(data['month'], data['day'], data['hour'], data['minute'], leap_year) |
def normalize_r_value(self):
'''
Normalize r-value.
Override.
This method is called in each learning steps.
For example:
self.r_df = self.r_df.r_value / self.r_df.r_value.sum()
'''
if self.r_df is not None and self.r_df.shape[0]:
# z-score normalization.
self.r_df.r_value = (self.r_df.r_value - self.r_df.r_value.mean()) / self.r_df.r_value.std() | def function[normalize_r_value, parameter[self]]:
constant[
Normalize r-value.
Override.
This method is called in each learning steps.
For example:
self.r_df = self.r_df.r_value / self.r_df.r_value.sum()
]
if <ast.BoolOp object at 0x7da204960a30> begin[:]
name[self].r_df.r_value assign[=] binary_operation[binary_operation[name[self].r_df.r_value - call[name[self].r_df.r_value.mean, parameter[]]] / call[name[self].r_df.r_value.std, parameter[]]] | keyword[def] identifier[normalize_r_value] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[r_df] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[r_df] . identifier[shape] [ literal[int] ]:
identifier[self] . identifier[r_df] . identifier[r_value] =( identifier[self] . identifier[r_df] . identifier[r_value] - identifier[self] . identifier[r_df] . identifier[r_value] . identifier[mean] ())/ identifier[self] . identifier[r_df] . identifier[r_value] . identifier[std] () | def normalize_r_value(self):
"""
Normalize r-value.
Override.
This method is called in each learning steps.
For example:
self.r_df = self.r_df.r_value / self.r_df.r_value.sum()
"""
if self.r_df is not None and self.r_df.shape[0]:
# z-score normalization.
self.r_df.r_value = (self.r_df.r_value - self.r_df.r_value.mean()) / self.r_df.r_value.std() # depends on [control=['if'], data=[]] |
def _try_get_current_manager(cls):
""" Try to detect a package manager used in a current Gentoo system. """
if utils.get_distro_name().find('gentoo') == -1:
return None
if 'PACKAGE_MANAGER' in os.environ:
pm = os.environ['PACKAGE_MANAGER']
if pm == 'paludis':
# Try to import paludis module
try:
import paludis
return GentooPackageManager.PALUDIS
except ImportError:
# TODO Environment tells that paludis must be used, but
# it seems latter was build w/o USE=python...
# Need to report an error!!??
cls._debug_doesnt_work('can\'t import paludis', name='PaludisPackageManager')
return None
elif pm == 'portage':
# Fallback to default: portage
pass
else:
# ATTENTION Some unknown package manager?! Which one?
return None
# Try to import portage module
try:
import portage
return GentooPackageManager.PORTAGE
except ImportError:
cls._debug_doesnt_work('can\'t import portage', name='EmergePackageManager')
return None | def function[_try_get_current_manager, parameter[cls]]:
constant[ Try to detect a package manager used in a current Gentoo system. ]
if compare[call[call[name[utils].get_distro_name, parameter[]].find, parameter[constant[gentoo]]] equal[==] <ast.UnaryOp object at 0x7da1b10c5a50>] begin[:]
return[constant[None]]
if compare[constant[PACKAGE_MANAGER] in name[os].environ] begin[:]
variable[pm] assign[=] call[name[os].environ][constant[PACKAGE_MANAGER]]
if compare[name[pm] equal[==] constant[paludis]] begin[:]
<ast.Try object at 0x7da1b10c5fc0>
<ast.Try object at 0x7da1b0facb20> | keyword[def] identifier[_try_get_current_manager] ( identifier[cls] ):
literal[string]
keyword[if] identifier[utils] . identifier[get_distro_name] (). identifier[find] ( literal[string] )==- literal[int] :
keyword[return] keyword[None]
keyword[if] literal[string] keyword[in] identifier[os] . identifier[environ] :
identifier[pm] = identifier[os] . identifier[environ] [ literal[string] ]
keyword[if] identifier[pm] == literal[string] :
keyword[try] :
keyword[import] identifier[paludis]
keyword[return] identifier[GentooPackageManager] . identifier[PALUDIS]
keyword[except] identifier[ImportError] :
identifier[cls] . identifier[_debug_doesnt_work] ( literal[string] , identifier[name] = literal[string] )
keyword[return] keyword[None]
keyword[elif] identifier[pm] == literal[string] :
keyword[pass]
keyword[else] :
keyword[return] keyword[None]
keyword[try] :
keyword[import] identifier[portage]
keyword[return] identifier[GentooPackageManager] . identifier[PORTAGE]
keyword[except] identifier[ImportError] :
identifier[cls] . identifier[_debug_doesnt_work] ( literal[string] , identifier[name] = literal[string] )
keyword[return] keyword[None] | def _try_get_current_manager(cls):
""" Try to detect a package manager used in a current Gentoo system. """
if utils.get_distro_name().find('gentoo') == -1:
return None # depends on [control=['if'], data=[]]
if 'PACKAGE_MANAGER' in os.environ:
pm = os.environ['PACKAGE_MANAGER']
if pm == 'paludis':
# Try to import paludis module
try:
import paludis
return GentooPackageManager.PALUDIS # depends on [control=['try'], data=[]]
except ImportError:
# TODO Environment tells that paludis must be used, but
# it seems latter was build w/o USE=python...
# Need to report an error!!??
cls._debug_doesnt_work("can't import paludis", name='PaludisPackageManager')
return None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif pm == 'portage':
# Fallback to default: portage
pass # depends on [control=['if'], data=[]]
else:
# ATTENTION Some unknown package manager?! Which one?
return None # depends on [control=['if'], data=[]]
# Try to import portage module
try:
import portage
return GentooPackageManager.PORTAGE # depends on [control=['try'], data=[]]
except ImportError:
cls._debug_doesnt_work("can't import portage", name='EmergePackageManager')
return None # depends on [control=['except'], data=[]] |
def call(self, params, _context, **kwargs):
'''Note that we're returning a Promise object here, and bypassing the
helper functionality that normally sets the results struct from the
returned object. Instead, we set _context.results directly inside of
another promise'''
assert len(params) == self.paramCount
# using setattr because '=' is not allowed inside of lambdas
return evaluate_impl(self.body, params).then(lambda value: setattr(_context.results, 'value', value)) | def function[call, parameter[self, params, _context]]:
constant[Note that we're returning a Promise object here, and bypassing the
helper functionality that normally sets the results struct from the
returned object. Instead, we set _context.results directly inside of
another promise]
assert[compare[call[name[len], parameter[name[params]]] equal[==] name[self].paramCount]]
return[call[call[name[evaluate_impl], parameter[name[self].body, name[params]]].then, parameter[<ast.Lambda object at 0x7da20c9917e0>]]] | keyword[def] identifier[call] ( identifier[self] , identifier[params] , identifier[_context] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[len] ( identifier[params] )== identifier[self] . identifier[paramCount]
keyword[return] identifier[evaluate_impl] ( identifier[self] . identifier[body] , identifier[params] ). identifier[then] ( keyword[lambda] identifier[value] : identifier[setattr] ( identifier[_context] . identifier[results] , literal[string] , identifier[value] )) | def call(self, params, _context, **kwargs):
"""Note that we're returning a Promise object here, and bypassing the
helper functionality that normally sets the results struct from the
returned object. Instead, we set _context.results directly inside of
another promise"""
assert len(params) == self.paramCount
# using setattr because '=' is not allowed inside of lambdas
return evaluate_impl(self.body, params).then(lambda value: setattr(_context.results, 'value', value)) |
def _cancelScheduledRestart(self, justification=None) -> None:
"""
Cancels scheduled restart
:param when: time restart was scheduled to
:param version: version restart scheduled for
"""
if self.scheduledAction:
why_prefix = ": "
why = justification
if justification is None:
why_prefix = ", "
why = "cancellation reason not specified"
ev_data = self.scheduledAction
logger.info("Cancelling restart"
" of node {}"
" scheduled on {}"
"{}{}"
.format(self.nodeName,
ev_data.when,
why_prefix,
why))
self._unscheduleAction()
self._actionLog.append_cancelled(ev_data)
logger.info(
"Restart of node '{}'"
"has been cancelled due to {}".format(
self.nodeName, why)) | def function[_cancelScheduledRestart, parameter[self, justification]]:
constant[
Cancels scheduled restart
:param when: time restart was scheduled to
:param version: version restart scheduled for
]
if name[self].scheduledAction begin[:]
variable[why_prefix] assign[=] constant[: ]
variable[why] assign[=] name[justification]
if compare[name[justification] is constant[None]] begin[:]
variable[why_prefix] assign[=] constant[, ]
variable[why] assign[=] constant[cancellation reason not specified]
variable[ev_data] assign[=] name[self].scheduledAction
call[name[logger].info, parameter[call[constant[Cancelling restart of node {} scheduled on {}{}{}].format, parameter[name[self].nodeName, name[ev_data].when, name[why_prefix], name[why]]]]]
call[name[self]._unscheduleAction, parameter[]]
call[name[self]._actionLog.append_cancelled, parameter[name[ev_data]]]
call[name[logger].info, parameter[call[constant[Restart of node '{}'has been cancelled due to {}].format, parameter[name[self].nodeName, name[why]]]]] | keyword[def] identifier[_cancelScheduledRestart] ( identifier[self] , identifier[justification] = keyword[None] )-> keyword[None] :
literal[string]
keyword[if] identifier[self] . identifier[scheduledAction] :
identifier[why_prefix] = literal[string]
identifier[why] = identifier[justification]
keyword[if] identifier[justification] keyword[is] keyword[None] :
identifier[why_prefix] = literal[string]
identifier[why] = literal[string]
identifier[ev_data] = identifier[self] . identifier[scheduledAction]
identifier[logger] . identifier[info] ( literal[string]
literal[string]
literal[string]
literal[string]
. identifier[format] ( identifier[self] . identifier[nodeName] ,
identifier[ev_data] . identifier[when] ,
identifier[why_prefix] ,
identifier[why] ))
identifier[self] . identifier[_unscheduleAction] ()
identifier[self] . identifier[_actionLog] . identifier[append_cancelled] ( identifier[ev_data] )
identifier[logger] . identifier[info] (
literal[string]
literal[string] . identifier[format] (
identifier[self] . identifier[nodeName] , identifier[why] )) | def _cancelScheduledRestart(self, justification=None) -> None:
"""
Cancels scheduled restart
:param when: time restart was scheduled to
:param version: version restart scheduled for
"""
if self.scheduledAction:
why_prefix = ': '
why = justification
if justification is None:
why_prefix = ', '
why = 'cancellation reason not specified' # depends on [control=['if'], data=[]]
ev_data = self.scheduledAction
logger.info('Cancelling restart of node {} scheduled on {}{}{}'.format(self.nodeName, ev_data.when, why_prefix, why))
self._unscheduleAction()
self._actionLog.append_cancelled(ev_data)
logger.info("Restart of node '{}'has been cancelled due to {}".format(self.nodeName, why)) # depends on [control=['if'], data=[]] |
def do_usufy(self, query, **kwargs):
"""
Verifying a usufy query in this platform.
This might be redefined in any class inheriting from Platform.
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended.
"""
results = []
test = self.check_usufy(query, **kwargs)
if test:
r = {
"type": "i3visio.profile",
"value": self.platformName + " - " + query,
"attributes": []
}
# Appending platform URI
aux = {}
aux["type"] = "i3visio.uri"
aux["value"] = self.createURL(word=query, mode="usufy")
aux["attributes"] = []
r["attributes"].append(aux)
# Appending the alias
aux = {}
aux["type"] = "i3visio.alias"
aux["value"] = query
aux["attributes"] = []
r["attributes"].append(aux)
# Appending platform name
aux = {}
aux["type"] = "i3visio.platform"
aux["value"] = self.platformName
aux["attributes"] = []
r["attributes"].append(aux)
r["attributes"] += self.process_usufy(test)
results.append(r)
return results | def function[do_usufy, parameter[self, query]]:
constant[
Verifying a usufy query in this platform.
This might be redefined in any class inheriting from Platform.
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended.
]
variable[results] assign[=] list[[]]
variable[test] assign[=] call[name[self].check_usufy, parameter[name[query]]]
if name[test] begin[:]
variable[r] assign[=] dictionary[[<ast.Constant object at 0x7da1b23466b0>, <ast.Constant object at 0x7da1b2347370>, <ast.Constant object at 0x7da1b2346d40>], [<ast.Constant object at 0x7da1b2344df0>, <ast.BinOp object at 0x7da1b2344460>, <ast.List object at 0x7da1b2346680>]]
variable[aux] assign[=] dictionary[[], []]
call[name[aux]][constant[type]] assign[=] constant[i3visio.uri]
call[name[aux]][constant[value]] assign[=] call[name[self].createURL, parameter[]]
call[name[aux]][constant[attributes]] assign[=] list[[]]
call[call[name[r]][constant[attributes]].append, parameter[name[aux]]]
variable[aux] assign[=] dictionary[[], []]
call[name[aux]][constant[type]] assign[=] constant[i3visio.alias]
call[name[aux]][constant[value]] assign[=] name[query]
call[name[aux]][constant[attributes]] assign[=] list[[]]
call[call[name[r]][constant[attributes]].append, parameter[name[aux]]]
variable[aux] assign[=] dictionary[[], []]
call[name[aux]][constant[type]] assign[=] constant[i3visio.platform]
call[name[aux]][constant[value]] assign[=] name[self].platformName
call[name[aux]][constant[attributes]] assign[=] list[[]]
call[call[name[r]][constant[attributes]].append, parameter[name[aux]]]
<ast.AugAssign object at 0x7da1b23461d0>
call[name[results].append, parameter[name[r]]]
return[name[results]] | keyword[def] identifier[do_usufy] ( identifier[self] , identifier[query] ,** identifier[kwargs] ):
literal[string]
identifier[results] =[]
identifier[test] = identifier[self] . identifier[check_usufy] ( identifier[query] ,** identifier[kwargs] )
keyword[if] identifier[test] :
identifier[r] ={
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[platformName] + literal[string] + identifier[query] ,
literal[string] :[]
}
identifier[aux] ={}
identifier[aux] [ literal[string] ]= literal[string]
identifier[aux] [ literal[string] ]= identifier[self] . identifier[createURL] ( identifier[word] = identifier[query] , identifier[mode] = literal[string] )
identifier[aux] [ literal[string] ]=[]
identifier[r] [ literal[string] ]. identifier[append] ( identifier[aux] )
identifier[aux] ={}
identifier[aux] [ literal[string] ]= literal[string]
identifier[aux] [ literal[string] ]= identifier[query]
identifier[aux] [ literal[string] ]=[]
identifier[r] [ literal[string] ]. identifier[append] ( identifier[aux] )
identifier[aux] ={}
identifier[aux] [ literal[string] ]= literal[string]
identifier[aux] [ literal[string] ]= identifier[self] . identifier[platformName]
identifier[aux] [ literal[string] ]=[]
identifier[r] [ literal[string] ]. identifier[append] ( identifier[aux] )
identifier[r] [ literal[string] ]+= identifier[self] . identifier[process_usufy] ( identifier[test] )
identifier[results] . identifier[append] ( identifier[r] )
keyword[return] identifier[results] | def do_usufy(self, query, **kwargs):
"""
Verifying a usufy query in this platform.
This might be redefined in any class inheriting from Platform.
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended.
"""
results = []
test = self.check_usufy(query, **kwargs)
if test:
r = {'type': 'i3visio.profile', 'value': self.platformName + ' - ' + query, 'attributes': []}
# Appending platform URI
aux = {}
aux['type'] = 'i3visio.uri'
aux['value'] = self.createURL(word=query, mode='usufy')
aux['attributes'] = []
r['attributes'].append(aux)
# Appending the alias
aux = {}
aux['type'] = 'i3visio.alias'
aux['value'] = query
aux['attributes'] = []
r['attributes'].append(aux)
# Appending platform name
aux = {}
aux['type'] = 'i3visio.platform'
aux['value'] = self.platformName
aux['attributes'] = []
r['attributes'].append(aux)
r['attributes'] += self.process_usufy(test)
results.append(r) # depends on [control=['if'], data=[]]
return results |
def split_channels(image):
"""
Split channels of a multi-channel ANTsImage into a collection
of scalar ANTsImage types
Arguments
---------
image : ANTsImage
multi-channel image to split
Returns
-------
list of ANTsImage types
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> image2 = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> imagemerge = ants.merge_channels([image,image2])
>>> imagemerge.components == 2
>>> images_unmerged = ants.split_channels(imagemerge)
>>> len(images_unmerged) == 2
>>> images_unmerged[0].components == 1
"""
inpixeltype = image.pixeltype
dimension = image.dimension
components = 1
libfn = utils.get_lib_fn('splitChannels%s' % image._libsuffix)
itkimages = libfn(image.pointer)
antsimages = [iio.ANTsImage(pixeltype=inpixeltype, dimension=dimension,
components=components, pointer=itkimage) for itkimage in itkimages]
return antsimages | def function[split_channels, parameter[image]]:
constant[
Split channels of a multi-channel ANTsImage into a collection
of scalar ANTsImage types
Arguments
---------
image : ANTsImage
multi-channel image to split
Returns
-------
list of ANTsImage types
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> image2 = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> imagemerge = ants.merge_channels([image,image2])
>>> imagemerge.components == 2
>>> images_unmerged = ants.split_channels(imagemerge)
>>> len(images_unmerged) == 2
>>> images_unmerged[0].components == 1
]
variable[inpixeltype] assign[=] name[image].pixeltype
variable[dimension] assign[=] name[image].dimension
variable[components] assign[=] constant[1]
variable[libfn] assign[=] call[name[utils].get_lib_fn, parameter[binary_operation[constant[splitChannels%s] <ast.Mod object at 0x7da2590d6920> name[image]._libsuffix]]]
variable[itkimages] assign[=] call[name[libfn], parameter[name[image].pointer]]
variable[antsimages] assign[=] <ast.ListComp object at 0x7da1b2347d90>
return[name[antsimages]] | keyword[def] identifier[split_channels] ( identifier[image] ):
literal[string]
identifier[inpixeltype] = identifier[image] . identifier[pixeltype]
identifier[dimension] = identifier[image] . identifier[dimension]
identifier[components] = literal[int]
identifier[libfn] = identifier[utils] . identifier[get_lib_fn] ( literal[string] % identifier[image] . identifier[_libsuffix] )
identifier[itkimages] = identifier[libfn] ( identifier[image] . identifier[pointer] )
identifier[antsimages] =[ identifier[iio] . identifier[ANTsImage] ( identifier[pixeltype] = identifier[inpixeltype] , identifier[dimension] = identifier[dimension] ,
identifier[components] = identifier[components] , identifier[pointer] = identifier[itkimage] ) keyword[for] identifier[itkimage] keyword[in] identifier[itkimages] ]
keyword[return] identifier[antsimages] | def split_channels(image):
"""
Split channels of a multi-channel ANTsImage into a collection
of scalar ANTsImage types
Arguments
---------
image : ANTsImage
multi-channel image to split
Returns
-------
list of ANTsImage types
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> image2 = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> imagemerge = ants.merge_channels([image,image2])
>>> imagemerge.components == 2
>>> images_unmerged = ants.split_channels(imagemerge)
>>> len(images_unmerged) == 2
>>> images_unmerged[0].components == 1
"""
inpixeltype = image.pixeltype
dimension = image.dimension
components = 1
libfn = utils.get_lib_fn('splitChannels%s' % image._libsuffix)
itkimages = libfn(image.pointer)
antsimages = [iio.ANTsImage(pixeltype=inpixeltype, dimension=dimension, components=components, pointer=itkimage) for itkimage in itkimages]
return antsimages |
def _get_indices(self, data):
""" Compute indices along temporal dimension corresponding to the sought percentile
:param data: Input 3D array holding the reference band
:type data: numpy array
:return: 2D array holding the temporal index corresponding to percentile
"""
indices = self._index_by_percentile(data, self.percentile)
return indices | def function[_get_indices, parameter[self, data]]:
constant[ Compute indices along temporal dimension corresponding to the sought percentile
:param data: Input 3D array holding the reference band
:type data: numpy array
:return: 2D array holding the temporal index corresponding to percentile
]
variable[indices] assign[=] call[name[self]._index_by_percentile, parameter[name[data], name[self].percentile]]
return[name[indices]] | keyword[def] identifier[_get_indices] ( identifier[self] , identifier[data] ):
literal[string]
identifier[indices] = identifier[self] . identifier[_index_by_percentile] ( identifier[data] , identifier[self] . identifier[percentile] )
keyword[return] identifier[indices] | def _get_indices(self, data):
""" Compute indices along temporal dimension corresponding to the sought percentile
:param data: Input 3D array holding the reference band
:type data: numpy array
:return: 2D array holding the temporal index corresponding to percentile
"""
indices = self._index_by_percentile(data, self.percentile)
return indices |
def takeoff(self):
'''
Sends the takeoff command.
'''
self.send(at.REF(at.REF.input.start)) | def function[takeoff, parameter[self]]:
constant[
Sends the takeoff command.
]
call[name[self].send, parameter[call[name[at].REF, parameter[name[at].REF.input.start]]]] | keyword[def] identifier[takeoff] ( identifier[self] ):
literal[string]
identifier[self] . identifier[send] ( identifier[at] . identifier[REF] ( identifier[at] . identifier[REF] . identifier[input] . identifier[start] )) | def takeoff(self):
"""
Sends the takeoff command.
"""
self.send(at.REF(at.REF.input.start)) |
def _not_reentrant(func):
"""Decorator that prevents callbacks from calling into methods that are
not reentrant
"""
def wrap(self, *args, **kws):
if self._callback_lock and self._callback_lock.in_callback:
m = "Connection %s cannot be invoked from a callback!" % func
raise RuntimeError(m)
return func(self, *args, **kws)
return wrap | def function[_not_reentrant, parameter[func]]:
constant[Decorator that prevents callbacks from calling into methods that are
not reentrant
]
def function[wrap, parameter[self]]:
if <ast.BoolOp object at 0x7da1b013dc60> begin[:]
variable[m] assign[=] binary_operation[constant[Connection %s cannot be invoked from a callback!] <ast.Mod object at 0x7da2590d6920> name[func]]
<ast.Raise object at 0x7da1b013cb50>
return[call[name[func], parameter[name[self], <ast.Starred object at 0x7da1b013cac0>]]]
return[name[wrap]] | keyword[def] identifier[_not_reentrant] ( identifier[func] ):
literal[string]
keyword[def] identifier[wrap] ( identifier[self] ,* identifier[args] ,** identifier[kws] ):
keyword[if] identifier[self] . identifier[_callback_lock] keyword[and] identifier[self] . identifier[_callback_lock] . identifier[in_callback] :
identifier[m] = literal[string] % identifier[func]
keyword[raise] identifier[RuntimeError] ( identifier[m] )
keyword[return] identifier[func] ( identifier[self] ,* identifier[args] ,** identifier[kws] )
keyword[return] identifier[wrap] | def _not_reentrant(func):
"""Decorator that prevents callbacks from calling into methods that are
not reentrant
"""
def wrap(self, *args, **kws):
if self._callback_lock and self._callback_lock.in_callback:
m = 'Connection %s cannot be invoked from a callback!' % func
raise RuntimeError(m) # depends on [control=['if'], data=[]]
return func(self, *args, **kws)
return wrap |
def ast_imports(file_path):
"""get list of import from python module
:return: (list - tuple) (module, name, asname, level)
"""
with open(file_path, 'r') as fp:
text = fp.read()
mod_ast = ast.parse(text, file_path)
finder = _ImportsFinder()
finder.visit(mod_ast)
return finder.imports | def function[ast_imports, parameter[file_path]]:
constant[get list of import from python module
:return: (list - tuple) (module, name, asname, level)
]
with call[name[open], parameter[name[file_path], constant[r]]] begin[:]
variable[text] assign[=] call[name[fp].read, parameter[]]
variable[mod_ast] assign[=] call[name[ast].parse, parameter[name[text], name[file_path]]]
variable[finder] assign[=] call[name[_ImportsFinder], parameter[]]
call[name[finder].visit, parameter[name[mod_ast]]]
return[name[finder].imports] | keyword[def] identifier[ast_imports] ( identifier[file_path] ):
literal[string]
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[fp] :
identifier[text] = identifier[fp] . identifier[read] ()
identifier[mod_ast] = identifier[ast] . identifier[parse] ( identifier[text] , identifier[file_path] )
identifier[finder] = identifier[_ImportsFinder] ()
identifier[finder] . identifier[visit] ( identifier[mod_ast] )
keyword[return] identifier[finder] . identifier[imports] | def ast_imports(file_path):
"""get list of import from python module
:return: (list - tuple) (module, name, asname, level)
"""
with open(file_path, 'r') as fp:
text = fp.read() # depends on [control=['with'], data=['fp']]
mod_ast = ast.parse(text, file_path)
finder = _ImportsFinder()
finder.visit(mod_ast)
return finder.imports |
def list_parameters(self, parameter_type=None, page_size=None):
"""Lists the parameters visible to this client.
Parameters are returned in lexicographical order.
:param str parameter_type: The type of parameter
:rtype: :class:`.Parameter` iterator
"""
params = {'details': True}
if parameter_type is not None:
params['type'] = parameter_type
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/parameters'.format(self._instance),
params=params,
response_class=mdb_pb2.ListParametersResponse,
items_key='parameter',
item_mapper=Parameter,
) | def function[list_parameters, parameter[self, parameter_type, page_size]]:
constant[Lists the parameters visible to this client.
Parameters are returned in lexicographical order.
:param str parameter_type: The type of parameter
:rtype: :class:`.Parameter` iterator
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1eef4c0>], [<ast.Constant object at 0x7da1b1eed750>]]
if compare[name[parameter_type] is_not constant[None]] begin[:]
call[name[params]][constant[type]] assign[=] name[parameter_type]
if compare[name[page_size] is_not constant[None]] begin[:]
call[name[params]][constant[limit]] assign[=] name[page_size]
return[call[name[pagination].Iterator, parameter[]]] | keyword[def] identifier[list_parameters] ( identifier[self] , identifier[parameter_type] = keyword[None] , identifier[page_size] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : keyword[True] }
keyword[if] identifier[parameter_type] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[parameter_type]
keyword[if] identifier[page_size] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[page_size]
keyword[return] identifier[pagination] . identifier[Iterator] (
identifier[client] = identifier[self] . identifier[_client] ,
identifier[path] = literal[string] . identifier[format] ( identifier[self] . identifier[_instance] ),
identifier[params] = identifier[params] ,
identifier[response_class] = identifier[mdb_pb2] . identifier[ListParametersResponse] ,
identifier[items_key] = literal[string] ,
identifier[item_mapper] = identifier[Parameter] ,
) | def list_parameters(self, parameter_type=None, page_size=None):
"""Lists the parameters visible to this client.
Parameters are returned in lexicographical order.
:param str parameter_type: The type of parameter
:rtype: :class:`.Parameter` iterator
"""
params = {'details': True}
if parameter_type is not None:
params['type'] = parameter_type # depends on [control=['if'], data=['parameter_type']]
if page_size is not None:
params['limit'] = page_size # depends on [control=['if'], data=['page_size']]
return pagination.Iterator(client=self._client, path='/mdb/{}/parameters'.format(self._instance), params=params, response_class=mdb_pb2.ListParametersResponse, items_key='parameter', item_mapper=Parameter) |
def _send_string_selection(self, string: str):
"""Use the mouse selection clipboard to send a string."""
backup = self.clipboard.selection # Keep a backup of current content, to restore the original afterwards.
if backup is None:
logger.warning("Tried to backup the X PRIMARY selection content, but got None instead of a string.")
self.clipboard.selection = string
self.__enqueue(self._paste_using_mouse_button_2)
self.__enqueue(self._restore_clipboard_selection, backup) | def function[_send_string_selection, parameter[self, string]]:
constant[Use the mouse selection clipboard to send a string.]
variable[backup] assign[=] name[self].clipboard.selection
if compare[name[backup] is constant[None]] begin[:]
call[name[logger].warning, parameter[constant[Tried to backup the X PRIMARY selection content, but got None instead of a string.]]]
name[self].clipboard.selection assign[=] name[string]
call[name[self].__enqueue, parameter[name[self]._paste_using_mouse_button_2]]
call[name[self].__enqueue, parameter[name[self]._restore_clipboard_selection, name[backup]]] | keyword[def] identifier[_send_string_selection] ( identifier[self] , identifier[string] : identifier[str] ):
literal[string]
identifier[backup] = identifier[self] . identifier[clipboard] . identifier[selection]
keyword[if] identifier[backup] keyword[is] keyword[None] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[self] . identifier[clipboard] . identifier[selection] = identifier[string]
identifier[self] . identifier[__enqueue] ( identifier[self] . identifier[_paste_using_mouse_button_2] )
identifier[self] . identifier[__enqueue] ( identifier[self] . identifier[_restore_clipboard_selection] , identifier[backup] ) | def _send_string_selection(self, string: str):
"""Use the mouse selection clipboard to send a string."""
backup = self.clipboard.selection # Keep a backup of current content, to restore the original afterwards.
if backup is None:
logger.warning('Tried to backup the X PRIMARY selection content, but got None instead of a string.') # depends on [control=['if'], data=[]]
self.clipboard.selection = string
self.__enqueue(self._paste_using_mouse_button_2)
self.__enqueue(self._restore_clipboard_selection, backup) |
def _empty_resource_attributes(self):
'''
small method to empty values if resource is removed or absent
Args:
None
Return:
None: empties selected resource attributes
'''
self.status_code = 404
self.headers = {}
self.exists = False
# build RDF
self.rdf = self._build_rdf()
# if NonRDF, empty binary data
if type(self) == NonRDFSource:
self.binary.empty() | def function[_empty_resource_attributes, parameter[self]]:
constant[
small method to empty values if resource is removed or absent
Args:
None
Return:
None: empties selected resource attributes
]
name[self].status_code assign[=] constant[404]
name[self].headers assign[=] dictionary[[], []]
name[self].exists assign[=] constant[False]
name[self].rdf assign[=] call[name[self]._build_rdf, parameter[]]
if compare[call[name[type], parameter[name[self]]] equal[==] name[NonRDFSource]] begin[:]
call[name[self].binary.empty, parameter[]] | keyword[def] identifier[_empty_resource_attributes] ( identifier[self] ):
literal[string]
identifier[self] . identifier[status_code] = literal[int]
identifier[self] . identifier[headers] ={}
identifier[self] . identifier[exists] = keyword[False]
identifier[self] . identifier[rdf] = identifier[self] . identifier[_build_rdf] ()
keyword[if] identifier[type] ( identifier[self] )== identifier[NonRDFSource] :
identifier[self] . identifier[binary] . identifier[empty] () | def _empty_resource_attributes(self):
"""
small method to empty values if resource is removed or absent
Args:
None
Return:
None: empties selected resource attributes
"""
self.status_code = 404
self.headers = {}
self.exists = False # build RDF
self.rdf = self._build_rdf() # if NonRDF, empty binary data
if type(self) == NonRDFSource:
self.binary.empty() # depends on [control=['if'], data=[]] |
def _expand_dataset_packages(dataset_label_dict):
"""Returns list of possible packages contained in dataset, in case the dataset is multi dataset, eg. 'lisa'.
In case the param is not pointing to multidataset returns only that label in a list.
:param str dataset_label_dict: label of multi dataset
:return: list of labels
"""
new_dataset_label_dict = []
for label in dataset_label_dict:
dataset_metadata = data_urls[label]
if type(dataset_metadata) == dict and "package" in dataset_metadata:
new_dataset_label_dict.extend(dataset_metadata["package"])
else:
new_dataset_label_dict.append(label)
return new_dataset_label_dict | def function[_expand_dataset_packages, parameter[dataset_label_dict]]:
constant[Returns list of possible packages contained in dataset, in case the dataset is multi dataset, eg. 'lisa'.
In case the param is not pointing to multidataset returns only that label in a list.
:param str dataset_label_dict: label of multi dataset
:return: list of labels
]
variable[new_dataset_label_dict] assign[=] list[[]]
for taget[name[label]] in starred[name[dataset_label_dict]] begin[:]
variable[dataset_metadata] assign[=] call[name[data_urls]][name[label]]
if <ast.BoolOp object at 0x7da2043479d0> begin[:]
call[name[new_dataset_label_dict].extend, parameter[call[name[dataset_metadata]][constant[package]]]]
return[name[new_dataset_label_dict]] | keyword[def] identifier[_expand_dataset_packages] ( identifier[dataset_label_dict] ):
literal[string]
identifier[new_dataset_label_dict] =[]
keyword[for] identifier[label] keyword[in] identifier[dataset_label_dict] :
identifier[dataset_metadata] = identifier[data_urls] [ identifier[label] ]
keyword[if] identifier[type] ( identifier[dataset_metadata] )== identifier[dict] keyword[and] literal[string] keyword[in] identifier[dataset_metadata] :
identifier[new_dataset_label_dict] . identifier[extend] ( identifier[dataset_metadata] [ literal[string] ])
keyword[else] :
identifier[new_dataset_label_dict] . identifier[append] ( identifier[label] )
keyword[return] identifier[new_dataset_label_dict] | def _expand_dataset_packages(dataset_label_dict):
"""Returns list of possible packages contained in dataset, in case the dataset is multi dataset, eg. 'lisa'.
In case the param is not pointing to multidataset returns only that label in a list.
:param str dataset_label_dict: label of multi dataset
:return: list of labels
"""
new_dataset_label_dict = []
for label in dataset_label_dict:
dataset_metadata = data_urls[label]
if type(dataset_metadata) == dict and 'package' in dataset_metadata:
new_dataset_label_dict.extend(dataset_metadata['package']) # depends on [control=['if'], data=[]]
else:
new_dataset_label_dict.append(label) # depends on [control=['for'], data=['label']]
return new_dataset_label_dict |
def astext(data):
"""
Given a unicode/str/bytes always return str.
We prefer to work with the 'native' string type for the version of python
we run on, and this gets us that.
"""
if isinstance(data, str):
return data
elif isinstance(data, text_type):
return data.encode("utf-8", "ignore")
elif isinstance(data, binary_type):
return data.decode("utf-8", "ignore")
raise TypeError('{!r} not a string'.format(data)) | def function[astext, parameter[data]]:
constant[
Given a unicode/str/bytes always return str.
We prefer to work with the 'native' string type for the version of python
we run on, and this gets us that.
]
if call[name[isinstance], parameter[name[data], name[str]]] begin[:]
return[name[data]]
<ast.Raise object at 0x7da18bccae30> | keyword[def] identifier[astext] ( identifier[data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[str] ):
keyword[return] identifier[data]
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[text_type] ):
keyword[return] identifier[data] . identifier[encode] ( literal[string] , literal[string] )
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[binary_type] ):
keyword[return] identifier[data] . identifier[decode] ( literal[string] , literal[string] )
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[data] )) | def astext(data):
"""
Given a unicode/str/bytes always return str.
We prefer to work with the 'native' string type for the version of python
we run on, and this gets us that.
"""
if isinstance(data, str):
return data # depends on [control=['if'], data=[]]
elif isinstance(data, text_type):
return data.encode('utf-8', 'ignore') # depends on [control=['if'], data=[]]
elif isinstance(data, binary_type):
return data.decode('utf-8', 'ignore') # depends on [control=['if'], data=[]]
raise TypeError('{!r} not a string'.format(data)) |
def find_application(app_id=None, app_name=None):
"""
find the application according application id (prioritary) or application name
:param app_id: the application id
:param app_name: the application name
:return: found application or None if not found
"""
LOGGER.debug("ApplicationService.find_application")
if (app_id is None or not app_id) and (app_name is None or not app_name):
raise exceptions.ArianeCallParametersError('id and name')
if (app_id is not None and app_id) and (app_name is not None and app_name):
LOGGER.warn('ApplicationService.find_application - Both id and name are defined. '
'Will give you search on id.')
app_name = None
params = None
if app_id is not None and app_id:
params = {'id': app_id}
elif app_name is not None and app_name:
params = {'name': app_name}
ret = None
if params is not None:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
response = ApplicationService.requester.call(args)
if response.rc == 0:
ret = Application.json_2_application(response.response_content)
elif response.rc != 404:
err_msg = 'ApplicationService.find_application - Problem while finding application (id:' + \
str(app_id) + ', name:' + str(app_name) + '). ' + \
'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(
err_msg
)
return ret | def function[find_application, parameter[app_id, app_name]]:
constant[
find the application according application id (prioritary) or application name
:param app_id: the application id
:param app_name: the application name
:return: found application or None if not found
]
call[name[LOGGER].debug, parameter[constant[ApplicationService.find_application]]]
if <ast.BoolOp object at 0x7da2054a7e20> begin[:]
<ast.Raise object at 0x7da2054a61d0>
if <ast.BoolOp object at 0x7da2054a5960> begin[:]
call[name[LOGGER].warn, parameter[constant[ApplicationService.find_application - Both id and name are defined. Will give you search on id.]]]
variable[app_name] assign[=] constant[None]
variable[params] assign[=] constant[None]
if <ast.BoolOp object at 0x7da2054a4d00> begin[:]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2054a67d0>], [<ast.Name object at 0x7da2054a7f10>]]
variable[ret] assign[=] constant[None]
if compare[name[params] is_not constant[None]] begin[:]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da2054a7b80>, <ast.Constant object at 0x7da2054a4040>, <ast.Constant object at 0x7da2054a5870>], [<ast.Constant object at 0x7da2054a4100>, <ast.Constant object at 0x7da2054a5c60>, <ast.Name object at 0x7da2054a4820>]]
variable[response] assign[=] call[name[ApplicationService].requester.call, parameter[name[args]]]
if compare[name[response].rc equal[==] constant[0]] begin[:]
variable[ret] assign[=] call[name[Application].json_2_application, parameter[name[response].response_content]]
return[name[ret]] | keyword[def] identifier[find_application] ( identifier[app_id] = keyword[None] , identifier[app_name] = keyword[None] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
keyword[if] ( identifier[app_id] keyword[is] keyword[None] keyword[or] keyword[not] identifier[app_id] ) keyword[and] ( identifier[app_name] keyword[is] keyword[None] keyword[or] keyword[not] identifier[app_name] ):
keyword[raise] identifier[exceptions] . identifier[ArianeCallParametersError] ( literal[string] )
keyword[if] ( identifier[app_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[app_id] ) keyword[and] ( identifier[app_name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[app_name] ):
identifier[LOGGER] . identifier[warn] ( literal[string]
literal[string] )
identifier[app_name] = keyword[None]
identifier[params] = keyword[None]
keyword[if] identifier[app_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[app_id] :
identifier[params] ={ literal[string] : identifier[app_id] }
keyword[elif] identifier[app_name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[app_name] :
identifier[params] ={ literal[string] : identifier[app_name] }
identifier[ret] = keyword[None]
keyword[if] identifier[params] keyword[is] keyword[not] keyword[None] :
identifier[args] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[params] }
identifier[response] = identifier[ApplicationService] . identifier[requester] . identifier[call] ( identifier[args] )
keyword[if] identifier[response] . identifier[rc] == literal[int] :
identifier[ret] = identifier[Application] . identifier[json_2_application] ( identifier[response] . identifier[response_content] )
keyword[elif] identifier[response] . identifier[rc] != literal[int] :
identifier[err_msg] = literal[string] + identifier[str] ( identifier[app_id] )+ literal[string] + identifier[str] ( identifier[app_name] )+ literal[string] + literal[string] + identifier[str] ( identifier[response] . identifier[response_content] )+ literal[string] + identifier[str] ( identifier[response] . identifier[error_message] )+ literal[string] + identifier[str] ( identifier[response] . identifier[rc] )+ literal[string]
identifier[LOGGER] . identifier[warning] (
identifier[err_msg]
)
keyword[return] identifier[ret] | def find_application(app_id=None, app_name=None):
"""
find the application according application id (prioritary) or application name
:param app_id: the application id
:param app_name: the application name
:return: found application or None if not found
"""
LOGGER.debug('ApplicationService.find_application')
if (app_id is None or not app_id) and (app_name is None or not app_name):
raise exceptions.ArianeCallParametersError('id and name') # depends on [control=['if'], data=[]]
if (app_id is not None and app_id) and (app_name is not None and app_name):
LOGGER.warn('ApplicationService.find_application - Both id and name are defined. Will give you search on id.')
app_name = None # depends on [control=['if'], data=[]]
params = None
if app_id is not None and app_id:
params = {'id': app_id} # depends on [control=['if'], data=[]]
elif app_name is not None and app_name:
params = {'name': app_name} # depends on [control=['if'], data=[]]
ret = None
if params is not None:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
response = ApplicationService.requester.call(args)
if response.rc == 0:
ret = Application.json_2_application(response.response_content) # depends on [control=['if'], data=[]]
elif response.rc != 404:
err_msg = 'ApplicationService.find_application - Problem while finding application (id:' + str(app_id) + ', name:' + str(app_name) + '). ' + 'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + ' (' + str(response.rc) + ')'
LOGGER.warning(err_msg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['params']]
return ret |
def narrow_to(self, featuremap):
"""
Slice anchors to the spatial size of this featuremap.
"""
shape2d = tf.shape(featuremap)[2:] # h,w
slice3d = tf.concat([shape2d, [-1]], axis=0)
slice4d = tf.concat([shape2d, [-1, -1]], axis=0)
boxes = tf.slice(self.boxes, [0, 0, 0, 0], slice4d)
gt_labels = tf.slice(self.gt_labels, [0, 0, 0], slice3d)
gt_boxes = tf.slice(self.gt_boxes, [0, 0, 0, 0], slice4d)
return RPNAnchors(boxes, gt_labels, gt_boxes) | def function[narrow_to, parameter[self, featuremap]]:
constant[
Slice anchors to the spatial size of this featuremap.
]
variable[shape2d] assign[=] call[call[name[tf].shape, parameter[name[featuremap]]]][<ast.Slice object at 0x7da18f720040>]
variable[slice3d] assign[=] call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da18f722020>, <ast.List object at 0x7da18f7219f0>]]]]
variable[slice4d] assign[=] call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da18f721d50>, <ast.List object at 0x7da18f7229e0>]]]]
variable[boxes] assign[=] call[name[tf].slice, parameter[name[self].boxes, list[[<ast.Constant object at 0x7da18f723010>, <ast.Constant object at 0x7da18f720ac0>, <ast.Constant object at 0x7da18f7220b0>, <ast.Constant object at 0x7da18f7233d0>]], name[slice4d]]]
variable[gt_labels] assign[=] call[name[tf].slice, parameter[name[self].gt_labels, list[[<ast.Constant object at 0x7da18f723a30>, <ast.Constant object at 0x7da18f720250>, <ast.Constant object at 0x7da18f722fb0>]], name[slice3d]]]
variable[gt_boxes] assign[=] call[name[tf].slice, parameter[name[self].gt_boxes, list[[<ast.Constant object at 0x7da18f7202b0>, <ast.Constant object at 0x7da18f721900>, <ast.Constant object at 0x7da18f720e80>, <ast.Constant object at 0x7da18f722410>]], name[slice4d]]]
return[call[name[RPNAnchors], parameter[name[boxes], name[gt_labels], name[gt_boxes]]]] | keyword[def] identifier[narrow_to] ( identifier[self] , identifier[featuremap] ):
literal[string]
identifier[shape2d] = identifier[tf] . identifier[shape] ( identifier[featuremap] )[ literal[int] :]
identifier[slice3d] = identifier[tf] . identifier[concat] ([ identifier[shape2d] ,[- literal[int] ]], identifier[axis] = literal[int] )
identifier[slice4d] = identifier[tf] . identifier[concat] ([ identifier[shape2d] ,[- literal[int] ,- literal[int] ]], identifier[axis] = literal[int] )
identifier[boxes] = identifier[tf] . identifier[slice] ( identifier[self] . identifier[boxes] ,[ literal[int] , literal[int] , literal[int] , literal[int] ], identifier[slice4d] )
identifier[gt_labels] = identifier[tf] . identifier[slice] ( identifier[self] . identifier[gt_labels] ,[ literal[int] , literal[int] , literal[int] ], identifier[slice3d] )
identifier[gt_boxes] = identifier[tf] . identifier[slice] ( identifier[self] . identifier[gt_boxes] ,[ literal[int] , literal[int] , literal[int] , literal[int] ], identifier[slice4d] )
keyword[return] identifier[RPNAnchors] ( identifier[boxes] , identifier[gt_labels] , identifier[gt_boxes] ) | def narrow_to(self, featuremap):
"""
Slice anchors to the spatial size of this featuremap.
"""
shape2d = tf.shape(featuremap)[2:] # h,w
slice3d = tf.concat([shape2d, [-1]], axis=0)
slice4d = tf.concat([shape2d, [-1, -1]], axis=0)
boxes = tf.slice(self.boxes, [0, 0, 0, 0], slice4d)
gt_labels = tf.slice(self.gt_labels, [0, 0, 0], slice3d)
gt_boxes = tf.slice(self.gt_boxes, [0, 0, 0, 0], slice4d)
return RPNAnchors(boxes, gt_labels, gt_boxes) |
def get(self, sid):
"""
Constructs a ExecutionContext
:param sid: Execution Sid.
:returns: twilio.rest.studio.v1.flow.execution.ExecutionContext
:rtype: twilio.rest.studio.v1.flow.execution.ExecutionContext
"""
return ExecutionContext(self._version, flow_sid=self._solution['flow_sid'], sid=sid, ) | def function[get, parameter[self, sid]]:
constant[
Constructs a ExecutionContext
:param sid: Execution Sid.
:returns: twilio.rest.studio.v1.flow.execution.ExecutionContext
:rtype: twilio.rest.studio.v1.flow.execution.ExecutionContext
]
return[call[name[ExecutionContext], parameter[name[self]._version]]] | keyword[def] identifier[get] ( identifier[self] , identifier[sid] ):
literal[string]
keyword[return] identifier[ExecutionContext] ( identifier[self] . identifier[_version] , identifier[flow_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[sid] = identifier[sid] ,) | def get(self, sid):
"""
Constructs a ExecutionContext
:param sid: Execution Sid.
:returns: twilio.rest.studio.v1.flow.execution.ExecutionContext
:rtype: twilio.rest.studio.v1.flow.execution.ExecutionContext
"""
return ExecutionContext(self._version, flow_sid=self._solution['flow_sid'], sid=sid) |
def binaryEntropyVectorized(x):
"""
Calculate entropy for a list of binary random variables
:param x: (numpy array) the probability of the variable to be 1.
:return: entropy: (numpy array) entropy
"""
entropy = - x*np.log2(x) - (1-x)*np.log2(1-x)
entropy[x*(1 - x) == 0] = 0
return entropy | def function[binaryEntropyVectorized, parameter[x]]:
constant[
Calculate entropy for a list of binary random variables
:param x: (numpy array) the probability of the variable to be 1.
:return: entropy: (numpy array) entropy
]
variable[entropy] assign[=] binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b09004c0> * call[name[np].log2, parameter[name[x]]]] - binary_operation[binary_operation[constant[1] - name[x]] * call[name[np].log2, parameter[binary_operation[constant[1] - name[x]]]]]]
call[name[entropy]][compare[binary_operation[name[x] * binary_operation[constant[1] - name[x]]] equal[==] constant[0]]] assign[=] constant[0]
return[name[entropy]] | keyword[def] identifier[binaryEntropyVectorized] ( identifier[x] ):
literal[string]
identifier[entropy] =- identifier[x] * identifier[np] . identifier[log2] ( identifier[x] )-( literal[int] - identifier[x] )* identifier[np] . identifier[log2] ( literal[int] - identifier[x] )
identifier[entropy] [ identifier[x] *( literal[int] - identifier[x] )== literal[int] ]= literal[int]
keyword[return] identifier[entropy] | def binaryEntropyVectorized(x):
"""
Calculate entropy for a list of binary random variables
:param x: (numpy array) the probability of the variable to be 1.
:return: entropy: (numpy array) entropy
"""
entropy = -x * np.log2(x) - (1 - x) * np.log2(1 - x)
entropy[x * (1 - x) == 0] = 0
return entropy |
def _set_group(self, v, load=False):
"""
Setter method for group, mapped from YANG variable /qos/cpu/slot/port_group/group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_group() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("group_id",group.group, yang_name="group", rest_name="group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='group-id', extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}), is_container='list', yang_name="group", rest_name="group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}, namespace='urn:brocade.com:mgmt:brocade-qos-cpu', defining_module='brocade-qos-cpu', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """group must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("group_id",group.group, yang_name="group", rest_name="group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='group-id', extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}), is_container='list', yang_name="group", rest_name="group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}, namespace='urn:brocade.com:mgmt:brocade-qos-cpu', defining_module='brocade-qos-cpu', yang_type='list', is_config=True)""",
})
self.__group = t
if hasattr(self, '_set'):
self._set() | def function[_set_group, parameter[self, v, load]]:
constant[
Setter method for group, mapped from YANG variable /qos/cpu/slot/port_group/group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_group() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da2046215a0>
name[self].__group assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_group] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[YANGListType] ( literal[string] , identifier[group] . identifier[group] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[is_container] = literal[string] , identifier[user_ordered] = keyword[False] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[yang_keys] = literal[string] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] }}), identifier[is_container] = literal[string] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__group] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_group(self, v, load=False):
"""
Setter method for group, mapped from YANG variable /qos/cpu/slot/port_group/group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_group() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=YANGListType('group_id', group.group, yang_name='group', rest_name='group', parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='group-id', extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}), is_container='list', yang_name='group', rest_name='group', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}, namespace='urn:brocade.com:mgmt:brocade-qos-cpu', defining_module='brocade-qos-cpu', yang_type='list', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'group must be of a type compatible with list', 'defined-type': 'list', 'generated-type': 'YANGDynClass(base=YANGListType("group_id",group.group, yang_name="group", rest_name="group", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'group-id\', extensions={u\'tailf-common\': {u\'info\': u\'Configure CPU QoS group parameters\', u\'cli-suppress-mode\': None, u\'cli-incomplete-no\': None, u\'callpoint\': u\'QosCpuGroupConfig\', u\'cli-sequence-commands\': None, u\'cli-incomplete-command\': None, u\'alt-name\': u\'group\'}}), is_container=\'list\', yang_name="group", rest_name="group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Configure CPU QoS group parameters\', u\'cli-suppress-mode\': None, u\'cli-incomplete-no\': None, u\'callpoint\': u\'QosCpuGroupConfig\', u\'cli-sequence-commands\': None, u\'cli-incomplete-command\': None, u\'alt-name\': u\'group\'}}, namespace=\'urn:brocade.com:mgmt:brocade-qos-cpu\', defining_module=\'brocade-qos-cpu\', yang_type=\'list\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__group = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def iso_payment_reference_validator(v: str):
"""
Validates ISO reference number checksum.
:param v: Reference number
"""
num = ''
v = STRIP_WHITESPACE.sub('', v)
for ch in v[4:] + v[0:4]:
x = ord(ch)
if ord('0') <= x <= ord('9'):
num += ch
else:
x -= 55
if x < 10 or x > 35:
raise ValidationError(_('Invalid payment reference: {}').format(v))
num += str(x)
res = Decimal(num) % Decimal('97')
if res != Decimal('1'):
raise ValidationError(_('Invalid payment reference: {}').format(v)) | def function[iso_payment_reference_validator, parameter[v]]:
constant[
Validates ISO reference number checksum.
:param v: Reference number
]
variable[num] assign[=] constant[]
variable[v] assign[=] call[name[STRIP_WHITESPACE].sub, parameter[constant[], name[v]]]
for taget[name[ch]] in starred[binary_operation[call[name[v]][<ast.Slice object at 0x7da1b1053ac0>] + call[name[v]][<ast.Slice object at 0x7da1b10504c0>]]] begin[:]
variable[x] assign[=] call[name[ord], parameter[name[ch]]]
if compare[call[name[ord], parameter[constant[0]]] less_or_equal[<=] name[x]] begin[:]
<ast.AugAssign object at 0x7da1b10ad120>
variable[res] assign[=] binary_operation[call[name[Decimal], parameter[name[num]]] <ast.Mod object at 0x7da2590d6920> call[name[Decimal], parameter[constant[97]]]]
if compare[name[res] not_equal[!=] call[name[Decimal], parameter[constant[1]]]] begin[:]
<ast.Raise object at 0x7da1b10adf60> | keyword[def] identifier[iso_payment_reference_validator] ( identifier[v] : identifier[str] ):
literal[string]
identifier[num] = literal[string]
identifier[v] = identifier[STRIP_WHITESPACE] . identifier[sub] ( literal[string] , identifier[v] )
keyword[for] identifier[ch] keyword[in] identifier[v] [ literal[int] :]+ identifier[v] [ literal[int] : literal[int] ]:
identifier[x] = identifier[ord] ( identifier[ch] )
keyword[if] identifier[ord] ( literal[string] )<= identifier[x] <= identifier[ord] ( literal[string] ):
identifier[num] += identifier[ch]
keyword[else] :
identifier[x] -= literal[int]
keyword[if] identifier[x] < literal[int] keyword[or] identifier[x] > literal[int] :
keyword[raise] identifier[ValidationError] ( identifier[_] ( literal[string] ). identifier[format] ( identifier[v] ))
identifier[num] += identifier[str] ( identifier[x] )
identifier[res] = identifier[Decimal] ( identifier[num] )% identifier[Decimal] ( literal[string] )
keyword[if] identifier[res] != identifier[Decimal] ( literal[string] ):
keyword[raise] identifier[ValidationError] ( identifier[_] ( literal[string] ). identifier[format] ( identifier[v] )) | def iso_payment_reference_validator(v: str):
"""
Validates ISO reference number checksum.
:param v: Reference number
"""
num = ''
v = STRIP_WHITESPACE.sub('', v)
for ch in v[4:] + v[0:4]:
x = ord(ch)
if ord('0') <= x <= ord('9'):
num += ch # depends on [control=['if'], data=[]]
else:
x -= 55
if x < 10 or x > 35:
raise ValidationError(_('Invalid payment reference: {}').format(v)) # depends on [control=['if'], data=[]]
num += str(x) # depends on [control=['for'], data=['ch']]
res = Decimal(num) % Decimal('97')
if res != Decimal('1'):
raise ValidationError(_('Invalid payment reference: {}').format(v)) # depends on [control=['if'], data=[]] |
def RandomUniform(shape, dtype, seed):
"""
Random uniform op.
"""
if seed:
np.random.seed(seed)
return np.random.uniform(size=shape).astype(dtype_map[dtype]), | def function[RandomUniform, parameter[shape, dtype, seed]]:
constant[
Random uniform op.
]
if name[seed] begin[:]
call[name[np].random.seed, parameter[name[seed]]]
return[tuple[[<ast.Call object at 0x7da1b05bef80>]]] | keyword[def] identifier[RandomUniform] ( identifier[shape] , identifier[dtype] , identifier[seed] ):
literal[string]
keyword[if] identifier[seed] :
identifier[np] . identifier[random] . identifier[seed] ( identifier[seed] )
keyword[return] identifier[np] . identifier[random] . identifier[uniform] ( identifier[size] = identifier[shape] ). identifier[astype] ( identifier[dtype_map] [ identifier[dtype] ]), | def RandomUniform(shape, dtype, seed):
"""
Random uniform op.
"""
if seed:
np.random.seed(seed) # depends on [control=['if'], data=[]]
return (np.random.uniform(size=shape).astype(dtype_map[dtype]),) |
def generate(self):
"""
:return: A new token
:rtype: str
"""
random_data = os.urandom(100)
hash_gen = hashlib.new("sha512")
hash_gen.update(random_data)
return hash_gen.hexdigest()[:self.token_length] | def function[generate, parameter[self]]:
constant[
:return: A new token
:rtype: str
]
variable[random_data] assign[=] call[name[os].urandom, parameter[constant[100]]]
variable[hash_gen] assign[=] call[name[hashlib].new, parameter[constant[sha512]]]
call[name[hash_gen].update, parameter[name[random_data]]]
return[call[call[name[hash_gen].hexdigest, parameter[]]][<ast.Slice object at 0x7da1b11e3040>]] | keyword[def] identifier[generate] ( identifier[self] ):
literal[string]
identifier[random_data] = identifier[os] . identifier[urandom] ( literal[int] )
identifier[hash_gen] = identifier[hashlib] . identifier[new] ( literal[string] )
identifier[hash_gen] . identifier[update] ( identifier[random_data] )
keyword[return] identifier[hash_gen] . identifier[hexdigest] ()[: identifier[self] . identifier[token_length] ] | def generate(self):
"""
:return: A new token
:rtype: str
"""
random_data = os.urandom(100)
hash_gen = hashlib.new('sha512')
hash_gen.update(random_data)
return hash_gen.hexdigest()[:self.token_length] |
def count(self, X):
"""
Called from the fit method, this method gets all the
words from the corpus and their corresponding frequency
counts.
Parameters
----------
X : ndarray or masked ndarray
Pass in the matrix of vectorized documents, can be masked in
order to sum the word frequencies for only a subset of documents.
Returns
-------
counts : array
A vector containing the counts of all words in X (columns)
"""
# Sum on axis 0 (by columns), each column is a word
# Convert the matrix to an array
# Squeeze to remove the 1 dimension objects (like ravel)
return np.squeeze(np.asarray(X.sum(axis=0))) | def function[count, parameter[self, X]]:
constant[
Called from the fit method, this method gets all the
words from the corpus and their corresponding frequency
counts.
Parameters
----------
X : ndarray or masked ndarray
Pass in the matrix of vectorized documents, can be masked in
order to sum the word frequencies for only a subset of documents.
Returns
-------
counts : array
A vector containing the counts of all words in X (columns)
]
return[call[name[np].squeeze, parameter[call[name[np].asarray, parameter[call[name[X].sum, parameter[]]]]]]] | keyword[def] identifier[count] ( identifier[self] , identifier[X] ):
literal[string]
keyword[return] identifier[np] . identifier[squeeze] ( identifier[np] . identifier[asarray] ( identifier[X] . identifier[sum] ( identifier[axis] = literal[int] ))) | def count(self, X):
"""
Called from the fit method, this method gets all the
words from the corpus and their corresponding frequency
counts.
Parameters
----------
X : ndarray or masked ndarray
Pass in the matrix of vectorized documents, can be masked in
order to sum the word frequencies for only a subset of documents.
Returns
-------
counts : array
A vector containing the counts of all words in X (columns)
"""
# Sum on axis 0 (by columns), each column is a word
# Convert the matrix to an array
# Squeeze to remove the 1 dimension objects (like ravel)
return np.squeeze(np.asarray(X.sum(axis=0))) |
async def upload_image(self, image_file, filename=None, *,
return_uploaded_image=False):
"""Upload an image that can be later attached to a chat message.
Args:
image_file: A file-like object containing an image.
filename (str): (optional) Custom name for the uploaded file.
return_uploaded_image (bool): (optional) If True, return
:class:`.UploadedImage` instead of image ID. Defaults to False.
Raises:
hangups.NetworkError: If the upload request failed.
Returns:
:class:`.UploadedImage` instance, or ID of the uploaded image.
"""
image_filename = filename or os.path.basename(image_file.name)
image_data = image_file.read()
# request an upload URL
res = await self._base_request(
IMAGE_UPLOAD_URL,
'application/x-www-form-urlencoded;charset=UTF-8', 'json',
json.dumps({
"protocolVersion": "0.8",
"createSessionRequest": {
"fields": [{
"external": {
"name": "file",
"filename": image_filename,
"put": {},
"size": len(image_data)
}
}]
}
})
)
try:
upload_url = self._get_upload_session_status(res)[
'externalFieldTransfers'
][0]['putInfo']['url']
except KeyError:
raise exceptions.NetworkError(
'image upload failed: can not acquire an upload url'
)
# upload the image data using the upload_url to get the upload info
res = await self._base_request(
upload_url, 'application/octet-stream', 'json', image_data
)
try:
raw_info = (
self._get_upload_session_status(res)['additionalInfo']
['uploader_service.GoogleRupioAdditionalInfo']
['completionInfo']['customerSpecificInfo']
)
image_id = raw_info['photoid']
url = raw_info['url']
except KeyError:
raise exceptions.NetworkError(
'image upload failed: can not fetch upload info'
)
result = UploadedImage(image_id=image_id, url=url)
return result if return_uploaded_image else result.image_id | <ast.AsyncFunctionDef object at 0x7da2047e9d20> | keyword[async] keyword[def] identifier[upload_image] ( identifier[self] , identifier[image_file] , identifier[filename] = keyword[None] ,*,
identifier[return_uploaded_image] = keyword[False] ):
literal[string]
identifier[image_filename] = identifier[filename] keyword[or] identifier[os] . identifier[path] . identifier[basename] ( identifier[image_file] . identifier[name] )
identifier[image_data] = identifier[image_file] . identifier[read] ()
identifier[res] = keyword[await] identifier[self] . identifier[_base_request] (
identifier[IMAGE_UPLOAD_URL] ,
literal[string] , literal[string] ,
identifier[json] . identifier[dumps] ({
literal[string] : literal[string] ,
literal[string] :{
literal[string] :[{
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[image_filename] ,
literal[string] :{},
literal[string] : identifier[len] ( identifier[image_data] )
}
}]
}
})
)
keyword[try] :
identifier[upload_url] = identifier[self] . identifier[_get_upload_session_status] ( identifier[res] )[
literal[string]
][ literal[int] ][ literal[string] ][ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[exceptions] . identifier[NetworkError] (
literal[string]
)
identifier[res] = keyword[await] identifier[self] . identifier[_base_request] (
identifier[upload_url] , literal[string] , literal[string] , identifier[image_data]
)
keyword[try] :
identifier[raw_info] =(
identifier[self] . identifier[_get_upload_session_status] ( identifier[res] )[ literal[string] ]
[ literal[string] ]
[ literal[string] ][ literal[string] ]
)
identifier[image_id] = identifier[raw_info] [ literal[string] ]
identifier[url] = identifier[raw_info] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[exceptions] . identifier[NetworkError] (
literal[string]
)
identifier[result] = identifier[UploadedImage] ( identifier[image_id] = identifier[image_id] , identifier[url] = identifier[url] )
keyword[return] identifier[result] keyword[if] identifier[return_uploaded_image] keyword[else] identifier[result] . identifier[image_id] | async def upload_image(self, image_file, filename=None, *, return_uploaded_image=False):
"""Upload an image that can be later attached to a chat message.
Args:
image_file: A file-like object containing an image.
filename (str): (optional) Custom name for the uploaded file.
return_uploaded_image (bool): (optional) If True, return
:class:`.UploadedImage` instead of image ID. Defaults to False.
Raises:
hangups.NetworkError: If the upload request failed.
Returns:
:class:`.UploadedImage` instance, or ID of the uploaded image.
"""
image_filename = filename or os.path.basename(image_file.name)
image_data = image_file.read()
# request an upload URL
res = await self._base_request(IMAGE_UPLOAD_URL, 'application/x-www-form-urlencoded;charset=UTF-8', 'json', json.dumps({'protocolVersion': '0.8', 'createSessionRequest': {'fields': [{'external': {'name': 'file', 'filename': image_filename, 'put': {}, 'size': len(image_data)}}]}}))
try:
upload_url = self._get_upload_session_status(res)['externalFieldTransfers'][0]['putInfo']['url'] # depends on [control=['try'], data=[]]
except KeyError:
raise exceptions.NetworkError('image upload failed: can not acquire an upload url') # depends on [control=['except'], data=[]]
# upload the image data using the upload_url to get the upload info
res = await self._base_request(upload_url, 'application/octet-stream', 'json', image_data)
try:
raw_info = self._get_upload_session_status(res)['additionalInfo']['uploader_service.GoogleRupioAdditionalInfo']['completionInfo']['customerSpecificInfo']
image_id = raw_info['photoid']
url = raw_info['url'] # depends on [control=['try'], data=[]]
except KeyError:
raise exceptions.NetworkError('image upload failed: can not fetch upload info') # depends on [control=['except'], data=[]]
result = UploadedImage(image_id=image_id, url=url)
return result if return_uploaded_image else result.image_id |
def _readuntil(f, end=_TYPE_END):
"""Helper function to read bytes until a certain end byte is hit"""
buf = bytearray()
byte = f.read(1)
while byte != end:
if byte == b'':
raise ValueError('File ended unexpectedly. Expected end byte {}.'.format(end))
buf += byte
byte = f.read(1)
return buf | def function[_readuntil, parameter[f, end]]:
constant[Helper function to read bytes until a certain end byte is hit]
variable[buf] assign[=] call[name[bytearray], parameter[]]
variable[byte] assign[=] call[name[f].read, parameter[constant[1]]]
while compare[name[byte] not_equal[!=] name[end]] begin[:]
if compare[name[byte] equal[==] constant[b'']] begin[:]
<ast.Raise object at 0x7da18f09c940>
<ast.AugAssign object at 0x7da18f09da50>
variable[byte] assign[=] call[name[f].read, parameter[constant[1]]]
return[name[buf]] | keyword[def] identifier[_readuntil] ( identifier[f] , identifier[end] = identifier[_TYPE_END] ):
literal[string]
identifier[buf] = identifier[bytearray] ()
identifier[byte] = identifier[f] . identifier[read] ( literal[int] )
keyword[while] identifier[byte] != identifier[end] :
keyword[if] identifier[byte] == literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[end] ))
identifier[buf] += identifier[byte]
identifier[byte] = identifier[f] . identifier[read] ( literal[int] )
keyword[return] identifier[buf] | def _readuntil(f, end=_TYPE_END):
"""Helper function to read bytes until a certain end byte is hit"""
buf = bytearray()
byte = f.read(1)
while byte != end:
if byte == b'':
raise ValueError('File ended unexpectedly. Expected end byte {}.'.format(end)) # depends on [control=['if'], data=[]]
buf += byte
byte = f.read(1) # depends on [control=['while'], data=['byte', 'end']]
return buf |
def disable_for_loaddata(signal_handler):
"""
Decorator for disabling signals sent by 'post_save'
on loaddata command.
http://code.djangoproject.com/ticket/8399
"""
@wraps(signal_handler)
def wrapper(*args, **kwargs):
for fr in inspect.stack():
if inspect.getmodulename(fr[1]) == 'loaddata':
return # pragma: no cover
signal_handler(*args, **kwargs)
return wrapper | def function[disable_for_loaddata, parameter[signal_handler]]:
constant[
Decorator for disabling signals sent by 'post_save'
on loaddata command.
http://code.djangoproject.com/ticket/8399
]
def function[wrapper, parameter[]]:
for taget[name[fr]] in starred[call[name[inspect].stack, parameter[]]] begin[:]
if compare[call[name[inspect].getmodulename, parameter[call[name[fr]][constant[1]]]] equal[==] constant[loaddata]] begin[:]
return[None]
call[name[signal_handler], parameter[<ast.Starred object at 0x7da1b222f550>]]
return[name[wrapper]] | keyword[def] identifier[disable_for_loaddata] ( identifier[signal_handler] ):
literal[string]
@ identifier[wraps] ( identifier[signal_handler] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[for] identifier[fr] keyword[in] identifier[inspect] . identifier[stack] ():
keyword[if] identifier[inspect] . identifier[getmodulename] ( identifier[fr] [ literal[int] ])== literal[string] :
keyword[return]
identifier[signal_handler] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper] | def disable_for_loaddata(signal_handler):
"""
Decorator for disabling signals sent by 'post_save'
on loaddata command.
http://code.djangoproject.com/ticket/8399
"""
@wraps(signal_handler)
def wrapper(*args, **kwargs):
for fr in inspect.stack():
if inspect.getmodulename(fr[1]) == 'loaddata':
return # pragma: no cover # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fr']]
signal_handler(*args, **kwargs)
return wrapper |
def check_uniqueness_constraint(m, kind=None):
'''
Check the model for uniqueness constraint violations.
'''
if kind is None:
metaclasses = m.metaclasses.values()
else:
metaclasses = [m.find_metaclass(kind)]
res = 0
for metaclass in metaclasses:
id_map = dict()
for identifier in metaclass.indices:
id_map[identifier] = dict()
for inst in metaclass.select_many():
# Check for null-values
for name, ty in metaclass.attributes:
if name not in metaclass.identifying_attributes:
continue
value = getattr(inst, name)
isnull = value is None
isnull |= (ty == 'UNIQUE_ID' and not value)
if isnull:
res += 1
logger.warning('%s.%s is part of an identifier and is null'
% (metaclass.kind, name))
# Check uniqueness
for identifier in metaclass.indices:
kwargs = dict()
for name in metaclass.indices[identifier]:
kwargs[name] = getattr(inst, name)
index_key = frozenset(kwargs.items())
if index_key in id_map[identifier]:
res += 1
id_string = pretty_unique_identifier(inst, identifier)
logger.warning('uniqueness constraint violation in %s, %s'
% (metaclass.kind, id_string))
id_map[identifier][index_key] = inst
return res | def function[check_uniqueness_constraint, parameter[m, kind]]:
constant[
Check the model for uniqueness constraint violations.
]
if compare[name[kind] is constant[None]] begin[:]
variable[metaclasses] assign[=] call[name[m].metaclasses.values, parameter[]]
variable[res] assign[=] constant[0]
for taget[name[metaclass]] in starred[name[metaclasses]] begin[:]
variable[id_map] assign[=] call[name[dict], parameter[]]
for taget[name[identifier]] in starred[name[metaclass].indices] begin[:]
call[name[id_map]][name[identifier]] assign[=] call[name[dict], parameter[]]
for taget[name[inst]] in starred[call[name[metaclass].select_many, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b021dea0>, <ast.Name object at 0x7da1b021f910>]]] in starred[name[metaclass].attributes] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[metaclass].identifying_attributes] begin[:]
continue
variable[value] assign[=] call[name[getattr], parameter[name[inst], name[name]]]
variable[isnull] assign[=] compare[name[value] is constant[None]]
<ast.AugAssign object at 0x7da1b021d720>
if name[isnull] begin[:]
<ast.AugAssign object at 0x7da1b021e0e0>
call[name[logger].warning, parameter[binary_operation[constant[%s.%s is part of an identifier and is null] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b021e350>, <ast.Name object at 0x7da1b021d8a0>]]]]]
for taget[name[identifier]] in starred[name[metaclass].indices] begin[:]
variable[kwargs] assign[=] call[name[dict], parameter[]]
for taget[name[name]] in starred[call[name[metaclass].indices][name[identifier]]] begin[:]
call[name[kwargs]][name[name]] assign[=] call[name[getattr], parameter[name[inst], name[name]]]
variable[index_key] assign[=] call[name[frozenset], parameter[call[name[kwargs].items, parameter[]]]]
if compare[name[index_key] in call[name[id_map]][name[identifier]]] begin[:]
<ast.AugAssign object at 0x7da1b021e200>
variable[id_string] assign[=] call[name[pretty_unique_identifier], parameter[name[inst], name[identifier]]]
call[name[logger].warning, parameter[binary_operation[constant[uniqueness constraint violation in %s, %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b021e4d0>, <ast.Name object at 0x7da1b021fb50>]]]]]
call[call[name[id_map]][name[identifier]]][name[index_key]] assign[=] name[inst]
return[name[res]] | keyword[def] identifier[check_uniqueness_constraint] ( identifier[m] , identifier[kind] = keyword[None] ):
literal[string]
keyword[if] identifier[kind] keyword[is] keyword[None] :
identifier[metaclasses] = identifier[m] . identifier[metaclasses] . identifier[values] ()
keyword[else] :
identifier[metaclasses] =[ identifier[m] . identifier[find_metaclass] ( identifier[kind] )]
identifier[res] = literal[int]
keyword[for] identifier[metaclass] keyword[in] identifier[metaclasses] :
identifier[id_map] = identifier[dict] ()
keyword[for] identifier[identifier] keyword[in] identifier[metaclass] . identifier[indices] :
identifier[id_map] [ identifier[identifier] ]= identifier[dict] ()
keyword[for] identifier[inst] keyword[in] identifier[metaclass] . identifier[select_many] ():
keyword[for] identifier[name] , identifier[ty] keyword[in] identifier[metaclass] . identifier[attributes] :
keyword[if] identifier[name] keyword[not] keyword[in] identifier[metaclass] . identifier[identifying_attributes] :
keyword[continue]
identifier[value] = identifier[getattr] ( identifier[inst] , identifier[name] )
identifier[isnull] = identifier[value] keyword[is] keyword[None]
identifier[isnull] |=( identifier[ty] == literal[string] keyword[and] keyword[not] identifier[value] )
keyword[if] identifier[isnull] :
identifier[res] += literal[int]
identifier[logger] . identifier[warning] ( literal[string]
%( identifier[metaclass] . identifier[kind] , identifier[name] ))
keyword[for] identifier[identifier] keyword[in] identifier[metaclass] . identifier[indices] :
identifier[kwargs] = identifier[dict] ()
keyword[for] identifier[name] keyword[in] identifier[metaclass] . identifier[indices] [ identifier[identifier] ]:
identifier[kwargs] [ identifier[name] ]= identifier[getattr] ( identifier[inst] , identifier[name] )
identifier[index_key] = identifier[frozenset] ( identifier[kwargs] . identifier[items] ())
keyword[if] identifier[index_key] keyword[in] identifier[id_map] [ identifier[identifier] ]:
identifier[res] += literal[int]
identifier[id_string] = identifier[pretty_unique_identifier] ( identifier[inst] , identifier[identifier] )
identifier[logger] . identifier[warning] ( literal[string]
%( identifier[metaclass] . identifier[kind] , identifier[id_string] ))
identifier[id_map] [ identifier[identifier] ][ identifier[index_key] ]= identifier[inst]
keyword[return] identifier[res] | def check_uniqueness_constraint(m, kind=None):
"""
Check the model for uniqueness constraint violations.
"""
if kind is None:
metaclasses = m.metaclasses.values() # depends on [control=['if'], data=[]]
else:
metaclasses = [m.find_metaclass(kind)]
res = 0
for metaclass in metaclasses:
id_map = dict()
for identifier in metaclass.indices:
id_map[identifier] = dict() # depends on [control=['for'], data=['identifier']]
for inst in metaclass.select_many():
# Check for null-values
for (name, ty) in metaclass.attributes:
if name not in metaclass.identifying_attributes:
continue # depends on [control=['if'], data=[]]
value = getattr(inst, name)
isnull = value is None
isnull |= ty == 'UNIQUE_ID' and (not value)
if isnull:
res += 1
logger.warning('%s.%s is part of an identifier and is null' % (metaclass.kind, name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Check uniqueness
for identifier in metaclass.indices:
kwargs = dict()
for name in metaclass.indices[identifier]:
kwargs[name] = getattr(inst, name) # depends on [control=['for'], data=['name']]
index_key = frozenset(kwargs.items())
if index_key in id_map[identifier]:
res += 1
id_string = pretty_unique_identifier(inst, identifier)
logger.warning('uniqueness constraint violation in %s, %s' % (metaclass.kind, id_string)) # depends on [control=['if'], data=[]]
id_map[identifier][index_key] = inst # depends on [control=['for'], data=['identifier']] # depends on [control=['for'], data=['inst']] # depends on [control=['for'], data=['metaclass']]
return res |
def acquire(self, timeout=10):
"""Acquire the lock.
:params timeout: Maximum time to wait before returning. `None` means
forever, any other value equal or greater than 0 is
the number of seconds.
:returns: True if the lock has been acquired, False otherwise.
"""
stop = (
tenacity.stop_never
if timeout is None else tenacity.stop_after_delay(timeout)
)
def wait(previous_attempt_number, delay_since_first_attempt):
if timeout is None:
remaining_timeout = None
else:
remaining_timeout = max(timeout - delay_since_first_attempt, 0)
# TODO(jd): Wait for a DELETE event to happen: that'd mean the lock
# has been released, rather than retrying on PUT events too
try:
self.etcd_client.watch_once(self.key, remaining_timeout)
except exceptions.WatchTimedOut:
pass
return 0
@tenacity.retry(retry=tenacity.retry_never,
stop=stop,
wait=wait)
def _acquire():
# TODO: save the created revision so we can check it later to make
# sure we still have the lock
self.lease = self.etcd_client.lease(self.ttl)
success, _ = self.etcd_client.transaction(
compare=[
self.etcd_client.transactions.create(self.key) == 0
],
success=[
self.etcd_client.transactions.put(self.key, self.uuid,
lease=self.lease)
],
failure=[
self.etcd_client.transactions.get(self.key)
]
)
if success is True:
return True
self.lease = None
raise tenacity.TryAgain
try:
return _acquire()
except tenacity.RetryError:
return False | def function[acquire, parameter[self, timeout]]:
constant[Acquire the lock.
:params timeout: Maximum time to wait before returning. `None` means
forever, any other value equal or greater than 0 is
the number of seconds.
:returns: True if the lock has been acquired, False otherwise.
]
variable[stop] assign[=] <ast.IfExp object at 0x7da18f810160>
def function[wait, parameter[previous_attempt_number, delay_since_first_attempt]]:
if compare[name[timeout] is constant[None]] begin[:]
variable[remaining_timeout] assign[=] constant[None]
<ast.Try object at 0x7da18f8115d0>
return[constant[0]]
def function[_acquire, parameter[]]:
name[self].lease assign[=] call[name[self].etcd_client.lease, parameter[name[self].ttl]]
<ast.Tuple object at 0x7da18f8108e0> assign[=] call[name[self].etcd_client.transaction, parameter[]]
if compare[name[success] is constant[True]] begin[:]
return[constant[True]]
name[self].lease assign[=] constant[None]
<ast.Raise object at 0x7da18f8100d0>
<ast.Try object at 0x7da18f812200> | keyword[def] identifier[acquire] ( identifier[self] , identifier[timeout] = literal[int] ):
literal[string]
identifier[stop] =(
identifier[tenacity] . identifier[stop_never]
keyword[if] identifier[timeout] keyword[is] keyword[None] keyword[else] identifier[tenacity] . identifier[stop_after_delay] ( identifier[timeout] )
)
keyword[def] identifier[wait] ( identifier[previous_attempt_number] , identifier[delay_since_first_attempt] ):
keyword[if] identifier[timeout] keyword[is] keyword[None] :
identifier[remaining_timeout] = keyword[None]
keyword[else] :
identifier[remaining_timeout] = identifier[max] ( identifier[timeout] - identifier[delay_since_first_attempt] , literal[int] )
keyword[try] :
identifier[self] . identifier[etcd_client] . identifier[watch_once] ( identifier[self] . identifier[key] , identifier[remaining_timeout] )
keyword[except] identifier[exceptions] . identifier[WatchTimedOut] :
keyword[pass]
keyword[return] literal[int]
@ identifier[tenacity] . identifier[retry] ( identifier[retry] = identifier[tenacity] . identifier[retry_never] ,
identifier[stop] = identifier[stop] ,
identifier[wait] = identifier[wait] )
keyword[def] identifier[_acquire] ():
identifier[self] . identifier[lease] = identifier[self] . identifier[etcd_client] . identifier[lease] ( identifier[self] . identifier[ttl] )
identifier[success] , identifier[_] = identifier[self] . identifier[etcd_client] . identifier[transaction] (
identifier[compare] =[
identifier[self] . identifier[etcd_client] . identifier[transactions] . identifier[create] ( identifier[self] . identifier[key] )== literal[int]
],
identifier[success] =[
identifier[self] . identifier[etcd_client] . identifier[transactions] . identifier[put] ( identifier[self] . identifier[key] , identifier[self] . identifier[uuid] ,
identifier[lease] = identifier[self] . identifier[lease] )
],
identifier[failure] =[
identifier[self] . identifier[etcd_client] . identifier[transactions] . identifier[get] ( identifier[self] . identifier[key] )
]
)
keyword[if] identifier[success] keyword[is] keyword[True] :
keyword[return] keyword[True]
identifier[self] . identifier[lease] = keyword[None]
keyword[raise] identifier[tenacity] . identifier[TryAgain]
keyword[try] :
keyword[return] identifier[_acquire] ()
keyword[except] identifier[tenacity] . identifier[RetryError] :
keyword[return] keyword[False] | def acquire(self, timeout=10):
"""Acquire the lock.
:params timeout: Maximum time to wait before returning. `None` means
forever, any other value equal or greater than 0 is
the number of seconds.
:returns: True if the lock has been acquired, False otherwise.
"""
stop = tenacity.stop_never if timeout is None else tenacity.stop_after_delay(timeout)
def wait(previous_attempt_number, delay_since_first_attempt):
if timeout is None:
remaining_timeout = None # depends on [control=['if'], data=[]]
else:
remaining_timeout = max(timeout - delay_since_first_attempt, 0)
# TODO(jd): Wait for a DELETE event to happen: that'd mean the lock
# has been released, rather than retrying on PUT events too
try:
self.etcd_client.watch_once(self.key, remaining_timeout) # depends on [control=['try'], data=[]]
except exceptions.WatchTimedOut:
pass # depends on [control=['except'], data=[]]
return 0
@tenacity.retry(retry=tenacity.retry_never, stop=stop, wait=wait)
def _acquire():
# TODO: save the created revision so we can check it later to make
# sure we still have the lock
self.lease = self.etcd_client.lease(self.ttl)
(success, _) = self.etcd_client.transaction(compare=[self.etcd_client.transactions.create(self.key) == 0], success=[self.etcd_client.transactions.put(self.key, self.uuid, lease=self.lease)], failure=[self.etcd_client.transactions.get(self.key)])
if success is True:
return True # depends on [control=['if'], data=[]]
self.lease = None
raise tenacity.TryAgain
try:
return _acquire() # depends on [control=['try'], data=[]]
except tenacity.RetryError:
return False # depends on [control=['except'], data=[]] |
def partitioned_repertoire(self, direction, partition):
"""Compute the repertoire over the partition in the given direction."""
system = self.system[direction]
return system.partitioned_repertoire(direction, partition) | def function[partitioned_repertoire, parameter[self, direction, partition]]:
constant[Compute the repertoire over the partition in the given direction.]
variable[system] assign[=] call[name[self].system][name[direction]]
return[call[name[system].partitioned_repertoire, parameter[name[direction], name[partition]]]] | keyword[def] identifier[partitioned_repertoire] ( identifier[self] , identifier[direction] , identifier[partition] ):
literal[string]
identifier[system] = identifier[self] . identifier[system] [ identifier[direction] ]
keyword[return] identifier[system] . identifier[partitioned_repertoire] ( identifier[direction] , identifier[partition] ) | def partitioned_repertoire(self, direction, partition):
"""Compute the repertoire over the partition in the given direction."""
system = self.system[direction]
return system.partitioned_repertoire(direction, partition) |
def _build_list_params(param_name, key, values):
"""Builds a list of POST parameters from a list or single value."""
params = {}
if hasattr(values, '__iter__'):
index = 0
for value in values:
params[str(param_name) + '[' + str(index) + '].' + str(key)] = str(value)
index += 1
else:
params[str(param_name) + '[0].' + str(key)] = str(values)
return params | def function[_build_list_params, parameter[param_name, key, values]]:
constant[Builds a list of POST parameters from a list or single value.]
variable[params] assign[=] dictionary[[], []]
if call[name[hasattr], parameter[name[values], constant[__iter__]]] begin[:]
variable[index] assign[=] constant[0]
for taget[name[value]] in starred[name[values]] begin[:]
call[name[params]][binary_operation[binary_operation[binary_operation[binary_operation[call[name[str], parameter[name[param_name]]] + constant[[]] + call[name[str], parameter[name[index]]]] + constant[].]] + call[name[str], parameter[name[key]]]]] assign[=] call[name[str], parameter[name[value]]]
<ast.AugAssign object at 0x7da1b09144f0>
return[name[params]] | keyword[def] identifier[_build_list_params] ( identifier[param_name] , identifier[key] , identifier[values] ):
literal[string]
identifier[params] ={}
keyword[if] identifier[hasattr] ( identifier[values] , literal[string] ):
identifier[index] = literal[int]
keyword[for] identifier[value] keyword[in] identifier[values] :
identifier[params] [ identifier[str] ( identifier[param_name] )+ literal[string] + identifier[str] ( identifier[index] )+ literal[string] + identifier[str] ( identifier[key] )]= identifier[str] ( identifier[value] )
identifier[index] += literal[int]
keyword[else] :
identifier[params] [ identifier[str] ( identifier[param_name] )+ literal[string] + identifier[str] ( identifier[key] )]= identifier[str] ( identifier[values] )
keyword[return] identifier[params] | def _build_list_params(param_name, key, values):
"""Builds a list of POST parameters from a list or single value."""
params = {}
if hasattr(values, '__iter__'):
index = 0
for value in values:
params[str(param_name) + '[' + str(index) + '].' + str(key)] = str(value)
index += 1 # depends on [control=['for'], data=['value']] # depends on [control=['if'], data=[]]
else:
params[str(param_name) + '[0].' + str(key)] = str(values)
return params |
def setDecel(self, vehID, decel):
"""setDecel(string, double) -> None
Sets the preferred maximal deceleration in m/s^2 for this vehicle.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_DECEL, vehID, decel) | def function[setDecel, parameter[self, vehID, decel]]:
constant[setDecel(string, double) -> None
Sets the preferred maximal deceleration in m/s^2 for this vehicle.
]
call[name[self]._connection._sendDoubleCmd, parameter[name[tc].CMD_SET_VEHICLE_VARIABLE, name[tc].VAR_DECEL, name[vehID], name[decel]]] | keyword[def] identifier[setDecel] ( identifier[self] , identifier[vehID] , identifier[decel] ):
literal[string]
identifier[self] . identifier[_connection] . identifier[_sendDoubleCmd] (
identifier[tc] . identifier[CMD_SET_VEHICLE_VARIABLE] , identifier[tc] . identifier[VAR_DECEL] , identifier[vehID] , identifier[decel] ) | def setDecel(self, vehID, decel):
"""setDecel(string, double) -> None
Sets the preferred maximal deceleration in m/s^2 for this vehicle.
"""
self._connection._sendDoubleCmd(tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_DECEL, vehID, decel) |
def post_event_discount(
self, event_id,
discount_code,
discount_amount_off=None,
discount_percent_off=None,
discount_ticket_ids=None,
discount_quantity_available=None,
discount_start_date=None,
discount_end_date=None):
"""
POST /events/:id/discounts/
discount_code string required Code to activate discount
discount_amount_off unknown optional Fixed reduction amount
discount_percent_off string optional Percentage reduction
discount_ticket_ids unknown optional IDs of tickets to limit discount to
discount_quantity_available integer optional Number of discount uses
discount_start_date datetime optional Allow use from this date
discount_end_date datetime optional Allow use until this date
TODO: Consider deprecating this method
"""
data = construct_namespaced_dict("discount", locals())
return self.post("/events/{0}/discounts/".format(event_id), data=data) | def function[post_event_discount, parameter[self, event_id, discount_code, discount_amount_off, discount_percent_off, discount_ticket_ids, discount_quantity_available, discount_start_date, discount_end_date]]:
constant[
POST /events/:id/discounts/
discount_code string required Code to activate discount
discount_amount_off unknown optional Fixed reduction amount
discount_percent_off string optional Percentage reduction
discount_ticket_ids unknown optional IDs of tickets to limit discount to
discount_quantity_available integer optional Number of discount uses
discount_start_date datetime optional Allow use from this date
discount_end_date datetime optional Allow use until this date
TODO: Consider deprecating this method
]
variable[data] assign[=] call[name[construct_namespaced_dict], parameter[constant[discount], call[name[locals], parameter[]]]]
return[call[name[self].post, parameter[call[constant[/events/{0}/discounts/].format, parameter[name[event_id]]]]]] | keyword[def] identifier[post_event_discount] (
identifier[self] , identifier[event_id] ,
identifier[discount_code] ,
identifier[discount_amount_off] = keyword[None] ,
identifier[discount_percent_off] = keyword[None] ,
identifier[discount_ticket_ids] = keyword[None] ,
identifier[discount_quantity_available] = keyword[None] ,
identifier[discount_start_date] = keyword[None] ,
identifier[discount_end_date] = keyword[None] ):
literal[string]
identifier[data] = identifier[construct_namespaced_dict] ( literal[string] , identifier[locals] ())
keyword[return] identifier[self] . identifier[post] ( literal[string] . identifier[format] ( identifier[event_id] ), identifier[data] = identifier[data] ) | def post_event_discount(self, event_id, discount_code, discount_amount_off=None, discount_percent_off=None, discount_ticket_ids=None, discount_quantity_available=None, discount_start_date=None, discount_end_date=None):
"""
POST /events/:id/discounts/
discount_code string required Code to activate discount
discount_amount_off unknown optional Fixed reduction amount
discount_percent_off string optional Percentage reduction
discount_ticket_ids unknown optional IDs of tickets to limit discount to
discount_quantity_available integer optional Number of discount uses
discount_start_date datetime optional Allow use from this date
discount_end_date datetime optional Allow use until this date
TODO: Consider deprecating this method
"""
data = construct_namespaced_dict('discount', locals())
return self.post('/events/{0}/discounts/'.format(event_id), data=data) |
def addEntry(self, key='', value=''):
"""
Creates a new entry item for this widget.
:param key | <str>
value | <variant>
"""
img = resources.find('img/close.png')
new_item = XTreeWidgetItem()
new_item.setText(1, nativestring(key))
new_item.setText(2, nativestring(value))
new_item.setIcon(0, QtGui.QIcon(img))
new_item.setFixedHeight(22)
self.insertTopLevelItem(self.topLevelItemCount() - 1, new_item)
return new_item | def function[addEntry, parameter[self, key, value]]:
constant[
Creates a new entry item for this widget.
:param key | <str>
value | <variant>
]
variable[img] assign[=] call[name[resources].find, parameter[constant[img/close.png]]]
variable[new_item] assign[=] call[name[XTreeWidgetItem], parameter[]]
call[name[new_item].setText, parameter[constant[1], call[name[nativestring], parameter[name[key]]]]]
call[name[new_item].setText, parameter[constant[2], call[name[nativestring], parameter[name[value]]]]]
call[name[new_item].setIcon, parameter[constant[0], call[name[QtGui].QIcon, parameter[name[img]]]]]
call[name[new_item].setFixedHeight, parameter[constant[22]]]
call[name[self].insertTopLevelItem, parameter[binary_operation[call[name[self].topLevelItemCount, parameter[]] - constant[1]], name[new_item]]]
return[name[new_item]] | keyword[def] identifier[addEntry] ( identifier[self] , identifier[key] = literal[string] , identifier[value] = literal[string] ):
literal[string]
identifier[img] = identifier[resources] . identifier[find] ( literal[string] )
identifier[new_item] = identifier[XTreeWidgetItem] ()
identifier[new_item] . identifier[setText] ( literal[int] , identifier[nativestring] ( identifier[key] ))
identifier[new_item] . identifier[setText] ( literal[int] , identifier[nativestring] ( identifier[value] ))
identifier[new_item] . identifier[setIcon] ( literal[int] , identifier[QtGui] . identifier[QIcon] ( identifier[img] ))
identifier[new_item] . identifier[setFixedHeight] ( literal[int] )
identifier[self] . identifier[insertTopLevelItem] ( identifier[self] . identifier[topLevelItemCount] ()- literal[int] , identifier[new_item] )
keyword[return] identifier[new_item] | def addEntry(self, key='', value=''):
"""
Creates a new entry item for this widget.
:param key | <str>
value | <variant>
"""
img = resources.find('img/close.png')
new_item = XTreeWidgetItem()
new_item.setText(1, nativestring(key))
new_item.setText(2, nativestring(value))
new_item.setIcon(0, QtGui.QIcon(img))
new_item.setFixedHeight(22)
self.insertTopLevelItem(self.topLevelItemCount() - 1, new_item)
return new_item |
def network_security_group_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network security group {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': nsg,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_security_group_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been deleted.'.format(name)
ret['changes'] = {
'old': nsg,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network security group {0}!'.format(name)
return ret | def function[network_security_group_absent, parameter[name, resource_group, connection_auth]]:
constant[
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c312a0>, <ast.Constant object at 0x7da1b1c339a0>, <ast.Constant object at 0x7da1b1c32770>, <ast.Constant object at 0x7da1b1c33d00>], [<ast.Name object at 0x7da1b1c33a90>, <ast.Constant object at 0x7da1b1c31db0>, <ast.Constant object at 0x7da1b1c330a0>, <ast.Dict object at 0x7da1b1c31300>]]
if <ast.UnaryOp object at 0x7da1b1c30bb0> begin[:]
call[name[ret]][constant[comment]] assign[=] constant[Connection information must be specified via connection_auth dictionary!]
return[name[ret]]
variable[nsg] assign[=] call[call[name[__salt__]][constant[azurearm_network.network_security_group_get]], parameter[name[name], name[resource_group]]]
if compare[constant[error] in name[nsg]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[Network security group {0} was not found.].format, parameter[name[name]]]
return[name[ret]]
variable[deleted] assign[=] call[call[name[__salt__]][constant[azurearm_network.network_security_group_delete]], parameter[name[name], name[resource_group]]]
if name[deleted] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[Network security group {0} has been deleted.].format, parameter[name[name]]]
call[name[ret]][constant[changes]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c153f0>, <ast.Constant object at 0x7da1b1c15ab0>], [<ast.Name object at 0x7da1b1c175e0>, <ast.Dict object at 0x7da1b21a9e70>]]
return[name[ret]]
call[name[ret]][constant[comment]] assign[=] call[constant[Failed to delete network security group {0}!].format, parameter[name[name]]]
return[name[ret]] | keyword[def] identifier[network_security_group_absent] ( identifier[name] , identifier[resource_group] , identifier[connection_auth] = keyword[None] ):
literal[string]
identifier[ret] ={
literal[string] : identifier[name] ,
literal[string] : keyword[False] ,
literal[string] : literal[string] ,
literal[string] :{}
}
keyword[if] keyword[not] identifier[isinstance] ( identifier[connection_auth] , identifier[dict] ):
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[nsg] = identifier[__salt__] [ literal[string] ](
identifier[name] ,
identifier[resource_group] ,
identifier[azurearm_log_level] = literal[string] ,
** identifier[connection_auth]
)
keyword[if] literal[string] keyword[in] identifier[nsg] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
keyword[elif] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]={
literal[string] : identifier[nsg] ,
literal[string] :{},
}
keyword[return] identifier[ret]
identifier[deleted] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[resource_group] ,** identifier[connection_auth] )
keyword[if] identifier[deleted] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]={
literal[string] : identifier[nsg] ,
literal[string] :{}
}
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret] | def network_security_group_absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret # depends on [control=['if'], data=[]]
nsg = __salt__['azurearm_network.network_security_group_get'](name, resource_group, azurearm_log_level='info', **connection_auth)
if 'error' in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} was not found.'.format(name)
return ret # depends on [control=['if'], data=[]]
elif __opts__['test']:
ret['comment'] = 'Network security group {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {'old': nsg, 'new': {}}
return ret # depends on [control=['if'], data=[]]
deleted = __salt__['azurearm_network.network_security_group_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been deleted.'.format(name)
ret['changes'] = {'old': nsg, 'new': {}}
return ret # depends on [control=['if'], data=[]]
ret['comment'] = 'Failed to delete network security group {0}!'.format(name)
return ret |
def handle_log_message(msg): # pylint: disable=useless-return
"""Process an internal log message."""
msg.gateway.can_log = True
_LOGGER.debug(
'n:%s c:%s t:%s s:%s p:%s', msg.node_id, msg.child_id, msg.type,
msg.sub_type, msg.payload)
return None | def function[handle_log_message, parameter[msg]]:
constant[Process an internal log message.]
name[msg].gateway.can_log assign[=] constant[True]
call[name[_LOGGER].debug, parameter[constant[n:%s c:%s t:%s s:%s p:%s], name[msg].node_id, name[msg].child_id, name[msg].type, name[msg].sub_type, name[msg].payload]]
return[constant[None]] | keyword[def] identifier[handle_log_message] ( identifier[msg] ):
literal[string]
identifier[msg] . identifier[gateway] . identifier[can_log] = keyword[True]
identifier[_LOGGER] . identifier[debug] (
literal[string] , identifier[msg] . identifier[node_id] , identifier[msg] . identifier[child_id] , identifier[msg] . identifier[type] ,
identifier[msg] . identifier[sub_type] , identifier[msg] . identifier[payload] )
keyword[return] keyword[None] | def handle_log_message(msg): # pylint: disable=useless-return
'Process an internal log message.'
msg.gateway.can_log = True
_LOGGER.debug('n:%s c:%s t:%s s:%s p:%s', msg.node_id, msg.child_id, msg.type, msg.sub_type, msg.payload)
return None |
def set_full_screen(self, keybind="<Escape>"):
"""Make this window full screen and bind the Escape key (or given key) to exit full screen mode"""
self.tk.attributes("-fullscreen", True)
self._full_screen = True
self.events.set_event("<FullScreen.Escape>", keybind, self.exit_full_screen) | def function[set_full_screen, parameter[self, keybind]]:
constant[Make this window full screen and bind the Escape key (or given key) to exit full screen mode]
call[name[self].tk.attributes, parameter[constant[-fullscreen], constant[True]]]
name[self]._full_screen assign[=] constant[True]
call[name[self].events.set_event, parameter[constant[<FullScreen.Escape>], name[keybind], name[self].exit_full_screen]] | keyword[def] identifier[set_full_screen] ( identifier[self] , identifier[keybind] = literal[string] ):
literal[string]
identifier[self] . identifier[tk] . identifier[attributes] ( literal[string] , keyword[True] )
identifier[self] . identifier[_full_screen] = keyword[True]
identifier[self] . identifier[events] . identifier[set_event] ( literal[string] , identifier[keybind] , identifier[self] . identifier[exit_full_screen] ) | def set_full_screen(self, keybind='<Escape>'):
"""Make this window full screen and bind the Escape key (or given key) to exit full screen mode"""
self.tk.attributes('-fullscreen', True)
self._full_screen = True
self.events.set_event('<FullScreen.Escape>', keybind, self.exit_full_screen) |
def are_symmetrically_related(self, point_a, point_b, tol=0.001):
"""
Checks if two points are symmetrically related.
Args:
point_a (3x1 array): First point.
point_b (3x1 array): Second point.
tol (float): Absolute tolerance for checking distance.
Returns:
True if self.operate(point_a) == point_b or vice versa.
"""
if np.allclose(self.operate(point_a), point_b, atol=tol):
return True
if np.allclose(self.operate(point_b), point_a, atol=tol):
return True
return False | def function[are_symmetrically_related, parameter[self, point_a, point_b, tol]]:
constant[
Checks if two points are symmetrically related.
Args:
point_a (3x1 array): First point.
point_b (3x1 array): Second point.
tol (float): Absolute tolerance for checking distance.
Returns:
True if self.operate(point_a) == point_b or vice versa.
]
if call[name[np].allclose, parameter[call[name[self].operate, parameter[name[point_a]]], name[point_b]]] begin[:]
return[constant[True]]
if call[name[np].allclose, parameter[call[name[self].operate, parameter[name[point_b]]], name[point_a]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[are_symmetrically_related] ( identifier[self] , identifier[point_a] , identifier[point_b] , identifier[tol] = literal[int] ):
literal[string]
keyword[if] identifier[np] . identifier[allclose] ( identifier[self] . identifier[operate] ( identifier[point_a] ), identifier[point_b] , identifier[atol] = identifier[tol] ):
keyword[return] keyword[True]
keyword[if] identifier[np] . identifier[allclose] ( identifier[self] . identifier[operate] ( identifier[point_b] ), identifier[point_a] , identifier[atol] = identifier[tol] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def are_symmetrically_related(self, point_a, point_b, tol=0.001):
"""
Checks if two points are symmetrically related.
Args:
point_a (3x1 array): First point.
point_b (3x1 array): Second point.
tol (float): Absolute tolerance for checking distance.
Returns:
True if self.operate(point_a) == point_b or vice versa.
"""
if np.allclose(self.operate(point_a), point_b, atol=tol):
return True # depends on [control=['if'], data=[]]
if np.allclose(self.operate(point_b), point_a, atol=tol):
return True # depends on [control=['if'], data=[]]
return False |
def setdefaults(d, d2):
"""
:type d: dict
:type d2: dict
:rtype: dict
"""
for k, v in d2.items():
d.setdefault(k, v)
return d | def function[setdefaults, parameter[d, d2]]:
constant[
:type d: dict
:type d2: dict
:rtype: dict
]
for taget[tuple[[<ast.Name object at 0x7da18f720520>, <ast.Name object at 0x7da18f720a30>]]] in starred[call[name[d2].items, parameter[]]] begin[:]
call[name[d].setdefault, parameter[name[k], name[v]]]
return[name[d]] | keyword[def] identifier[setdefaults] ( identifier[d] , identifier[d2] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d2] . identifier[items] ():
identifier[d] . identifier[setdefault] ( identifier[k] , identifier[v] )
keyword[return] identifier[d] | def setdefaults(d, d2):
"""
:type d: dict
:type d2: dict
:rtype: dict
"""
for (k, v) in d2.items():
d.setdefault(k, v) # depends on [control=['for'], data=[]]
return d |
def _render_batch(self,
non_fluents: NonFluents,
states: Fluents, actions: Fluents, interms: Fluents,
rewards: np.array,
horizon: Optional[int] = None) -> None:
'''Prints `non_fluents`, `states`, `actions`, `interms` and `rewards`
for given `horizon`.
Args:
states (Sequence[Tuple[str, np.array]]): A state trajectory.
actions (Sequence[Tuple[str, np.array]]): An action trajectory.
interms (Sequence[Tuple[str, np.array]]): An interm state trajectory.
rewards (np.array): Sequence of rewards (1-dimensional array).
horizon (Optional[int]): Number of timesteps.
'''
if horizon is None:
horizon = len(states[0][1])
self._render_round_init(horizon, non_fluents)
for t in range(horizon):
s = [(s[0], s[1][t]) for s in states]
f = [(f[0], f[1][t]) for f in interms]
a = [(a[0], a[1][t]) for a in actions]
r = rewards[t]
self._render_timestep(t, s, a, f, r)
self._render_round_end(rewards) | def function[_render_batch, parameter[self, non_fluents, states, actions, interms, rewards, horizon]]:
constant[Prints `non_fluents`, `states`, `actions`, `interms` and `rewards`
for given `horizon`.
Args:
states (Sequence[Tuple[str, np.array]]): A state trajectory.
actions (Sequence[Tuple[str, np.array]]): An action trajectory.
interms (Sequence[Tuple[str, np.array]]): An interm state trajectory.
rewards (np.array): Sequence of rewards (1-dimensional array).
horizon (Optional[int]): Number of timesteps.
]
if compare[name[horizon] is constant[None]] begin[:]
variable[horizon] assign[=] call[name[len], parameter[call[call[name[states]][constant[0]]][constant[1]]]]
call[name[self]._render_round_init, parameter[name[horizon], name[non_fluents]]]
for taget[name[t]] in starred[call[name[range], parameter[name[horizon]]]] begin[:]
variable[s] assign[=] <ast.ListComp object at 0x7da1b2360b50>
variable[f] assign[=] <ast.ListComp object at 0x7da1b24e1960>
variable[a] assign[=] <ast.ListComp object at 0x7da1b24e21a0>
variable[r] assign[=] call[name[rewards]][name[t]]
call[name[self]._render_timestep, parameter[name[t], name[s], name[a], name[f], name[r]]]
call[name[self]._render_round_end, parameter[name[rewards]]] | keyword[def] identifier[_render_batch] ( identifier[self] ,
identifier[non_fluents] : identifier[NonFluents] ,
identifier[states] : identifier[Fluents] , identifier[actions] : identifier[Fluents] , identifier[interms] : identifier[Fluents] ,
identifier[rewards] : identifier[np] . identifier[array] ,
identifier[horizon] : identifier[Optional] [ identifier[int] ]= keyword[None] )-> keyword[None] :
literal[string]
keyword[if] identifier[horizon] keyword[is] keyword[None] :
identifier[horizon] = identifier[len] ( identifier[states] [ literal[int] ][ literal[int] ])
identifier[self] . identifier[_render_round_init] ( identifier[horizon] , identifier[non_fluents] )
keyword[for] identifier[t] keyword[in] identifier[range] ( identifier[horizon] ):
identifier[s] =[( identifier[s] [ literal[int] ], identifier[s] [ literal[int] ][ identifier[t] ]) keyword[for] identifier[s] keyword[in] identifier[states] ]
identifier[f] =[( identifier[f] [ literal[int] ], identifier[f] [ literal[int] ][ identifier[t] ]) keyword[for] identifier[f] keyword[in] identifier[interms] ]
identifier[a] =[( identifier[a] [ literal[int] ], identifier[a] [ literal[int] ][ identifier[t] ]) keyword[for] identifier[a] keyword[in] identifier[actions] ]
identifier[r] = identifier[rewards] [ identifier[t] ]
identifier[self] . identifier[_render_timestep] ( identifier[t] , identifier[s] , identifier[a] , identifier[f] , identifier[r] )
identifier[self] . identifier[_render_round_end] ( identifier[rewards] ) | def _render_batch(self, non_fluents: NonFluents, states: Fluents, actions: Fluents, interms: Fluents, rewards: np.array, horizon: Optional[int]=None) -> None:
"""Prints `non_fluents`, `states`, `actions`, `interms` and `rewards`
for given `horizon`.
Args:
states (Sequence[Tuple[str, np.array]]): A state trajectory.
actions (Sequence[Tuple[str, np.array]]): An action trajectory.
interms (Sequence[Tuple[str, np.array]]): An interm state trajectory.
rewards (np.array): Sequence of rewards (1-dimensional array).
horizon (Optional[int]): Number of timesteps.
"""
if horizon is None:
horizon = len(states[0][1])
self._render_round_init(horizon, non_fluents)
for t in range(horizon):
s = [(s[0], s[1][t]) for s in states]
f = [(f[0], f[1][t]) for f in interms]
a = [(a[0], a[1][t]) for a in actions]
r = rewards[t]
self._render_timestep(t, s, a, f, r) # depends on [control=['for'], data=['t']]
self._render_round_end(rewards) # depends on [control=['if'], data=['horizon']] |
def get_iterator_type(script_settings, subscripts={}):
"""
figures out the iterator type based on the script settings and (optionally) subscripts
Args:
script_settings: iterator_type
subscripts: subscripts
Returns:
"""
if 'iterator_type' in script_settings:
# figure out the iterator type
if script_settings['iterator_type'] == 'Loop':
iterator_type = 'loop'
elif script_settings['iterator_type'] == 'Parameter Sweep':
iterator_type = 'sweep'
else:
raise TypeError('unknown iterator type')
else:
# asign the correct iterator script type
if 'sweep_param' in script_settings:
iterator_type = 'sweep'
elif 'num_loops' in script_settings:
iterator_type = 'loop'
else:
raise TypeError('unknown iterator type')
return iterator_type | def function[get_iterator_type, parameter[script_settings, subscripts]]:
constant[
figures out the iterator type based on the script settings and (optionally) subscripts
Args:
script_settings: iterator_type
subscripts: subscripts
Returns:
]
if compare[constant[iterator_type] in name[script_settings]] begin[:]
if compare[call[name[script_settings]][constant[iterator_type]] equal[==] constant[Loop]] begin[:]
variable[iterator_type] assign[=] constant[loop]
return[name[iterator_type]] | keyword[def] identifier[get_iterator_type] ( identifier[script_settings] , identifier[subscripts] ={}):
literal[string]
keyword[if] literal[string] keyword[in] identifier[script_settings] :
keyword[if] identifier[script_settings] [ literal[string] ]== literal[string] :
identifier[iterator_type] = literal[string]
keyword[elif] identifier[script_settings] [ literal[string] ]== literal[string] :
identifier[iterator_type] = literal[string]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[script_settings] :
identifier[iterator_type] = literal[string]
keyword[elif] literal[string] keyword[in] identifier[script_settings] :
identifier[iterator_type] = literal[string]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] identifier[iterator_type] | def get_iterator_type(script_settings, subscripts={}):
"""
figures out the iterator type based on the script settings and (optionally) subscripts
Args:
script_settings: iterator_type
subscripts: subscripts
Returns:
"""
if 'iterator_type' in script_settings:
# figure out the iterator type
if script_settings['iterator_type'] == 'Loop':
iterator_type = 'loop' # depends on [control=['if'], data=[]]
elif script_settings['iterator_type'] == 'Parameter Sweep':
iterator_type = 'sweep' # depends on [control=['if'], data=[]]
else:
raise TypeError('unknown iterator type') # depends on [control=['if'], data=['script_settings']]
# asign the correct iterator script type
elif 'sweep_param' in script_settings:
iterator_type = 'sweep' # depends on [control=['if'], data=[]]
elif 'num_loops' in script_settings:
iterator_type = 'loop' # depends on [control=['if'], data=[]]
else:
raise TypeError('unknown iterator type')
return iterator_type |
def baseglob(pat, base):
"""Given a pattern and a base, return files that match the glob pattern
and also contain the base."""
return [f for f in glob(pat) if f.startswith(base)] | def function[baseglob, parameter[pat, base]]:
constant[Given a pattern and a base, return files that match the glob pattern
and also contain the base.]
return[<ast.ListComp object at 0x7da1b0a05180>] | keyword[def] identifier[baseglob] ( identifier[pat] , identifier[base] ):
literal[string]
keyword[return] [ identifier[f] keyword[for] identifier[f] keyword[in] identifier[glob] ( identifier[pat] ) keyword[if] identifier[f] . identifier[startswith] ( identifier[base] )] | def baseglob(pat, base):
"""Given a pattern and a base, return files that match the glob pattern
and also contain the base."""
return [f for f in glob(pat) if f.startswith(base)] |
def process_class_docstrings(app, what, name, obj, options, lines):
"""
For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring.
"""
if what == "class":
joined = '\n'.join(lines)
templates = [
""".. rubric:: Attributes
.. autosummary::
:toctree:
None
""",
""".. rubric:: Methods
.. autosummary::
:toctree:
None
"""
]
for template in templates:
if template in joined:
joined = joined.replace(template, '')
lines[:] = joined.split('\n') | def function[process_class_docstrings, parameter[app, what, name, obj, options, lines]]:
constant[
For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring.
]
if compare[name[what] equal[==] constant[class]] begin[:]
variable[joined] assign[=] call[constant[
].join, parameter[name[lines]]]
variable[templates] assign[=] list[[<ast.Constant object at 0x7da20c6c7cd0>, <ast.Constant object at 0x7da20c6c4a90>]]
for taget[name[template]] in starred[name[templates]] begin[:]
if compare[name[template] in name[joined]] begin[:]
variable[joined] assign[=] call[name[joined].replace, parameter[name[template], constant[]]]
call[name[lines]][<ast.Slice object at 0x7da18fe939d0>] assign[=] call[name[joined].split, parameter[constant[
]]] | keyword[def] identifier[process_class_docstrings] ( identifier[app] , identifier[what] , identifier[name] , identifier[obj] , identifier[options] , identifier[lines] ):
literal[string]
keyword[if] identifier[what] == literal[string] :
identifier[joined] = literal[string] . identifier[join] ( identifier[lines] )
identifier[templates] =[
literal[string] ,
literal[string]
]
keyword[for] identifier[template] keyword[in] identifier[templates] :
keyword[if] identifier[template] keyword[in] identifier[joined] :
identifier[joined] = identifier[joined] . identifier[replace] ( identifier[template] , literal[string] )
identifier[lines] [:]= identifier[joined] . identifier[split] ( literal[string] ) | def process_class_docstrings(app, what, name, obj, options, lines):
"""
For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring.
"""
if what == 'class':
joined = '\n'.join(lines)
templates = ['.. rubric:: Attributes\n\n.. autosummary::\n :toctree:\n\n None\n', '.. rubric:: Methods\n\n.. autosummary::\n :toctree:\n\n None\n']
for template in templates:
if template in joined:
joined = joined.replace(template, '') # depends on [control=['if'], data=['template', 'joined']] # depends on [control=['for'], data=['template']]
lines[:] = joined.split('\n') # depends on [control=['if'], data=[]] |
def forward(self, agent_qs, states):
"""Forward pass for the mixer.
Arguments:
agent_qs: Tensor of shape [B, T, n_agents, n_actions]
states: Tensor of shape [B, T, state_dim]
"""
bs = agent_qs.size(0)
states = states.reshape(-1, self.state_dim)
agent_qs = agent_qs.view(-1, 1, self.n_agents)
# First layer
w1 = th.abs(self.hyper_w_1(states))
b1 = self.hyper_b_1(states)
w1 = w1.view(-1, self.n_agents, self.embed_dim)
b1 = b1.view(-1, 1, self.embed_dim)
hidden = F.elu(th.bmm(agent_qs, w1) + b1)
# Second layer
w_final = th.abs(self.hyper_w_final(states))
w_final = w_final.view(-1, self.embed_dim, 1)
# State-dependent bias
v = self.V(states).view(-1, 1, 1)
# Compute final output
y = th.bmm(hidden, w_final) + v
# Reshape and return
q_tot = y.view(bs, -1, 1)
return q_tot | def function[forward, parameter[self, agent_qs, states]]:
constant[Forward pass for the mixer.
Arguments:
agent_qs: Tensor of shape [B, T, n_agents, n_actions]
states: Tensor of shape [B, T, state_dim]
]
variable[bs] assign[=] call[name[agent_qs].size, parameter[constant[0]]]
variable[states] assign[=] call[name[states].reshape, parameter[<ast.UnaryOp object at 0x7da2041d9db0>, name[self].state_dim]]
variable[agent_qs] assign[=] call[name[agent_qs].view, parameter[<ast.UnaryOp object at 0x7da2041d8df0>, constant[1], name[self].n_agents]]
variable[w1] assign[=] call[name[th].abs, parameter[call[name[self].hyper_w_1, parameter[name[states]]]]]
variable[b1] assign[=] call[name[self].hyper_b_1, parameter[name[states]]]
variable[w1] assign[=] call[name[w1].view, parameter[<ast.UnaryOp object at 0x7da2041d92a0>, name[self].n_agents, name[self].embed_dim]]
variable[b1] assign[=] call[name[b1].view, parameter[<ast.UnaryOp object at 0x7da2041d8250>, constant[1], name[self].embed_dim]]
variable[hidden] assign[=] call[name[F].elu, parameter[binary_operation[call[name[th].bmm, parameter[name[agent_qs], name[w1]]] + name[b1]]]]
variable[w_final] assign[=] call[name[th].abs, parameter[call[name[self].hyper_w_final, parameter[name[states]]]]]
variable[w_final] assign[=] call[name[w_final].view, parameter[<ast.UnaryOp object at 0x7da2041d83d0>, name[self].embed_dim, constant[1]]]
variable[v] assign[=] call[call[name[self].V, parameter[name[states]]].view, parameter[<ast.UnaryOp object at 0x7da2041d8e20>, constant[1], constant[1]]]
variable[y] assign[=] binary_operation[call[name[th].bmm, parameter[name[hidden], name[w_final]]] + name[v]]
variable[q_tot] assign[=] call[name[y].view, parameter[name[bs], <ast.UnaryOp object at 0x7da2041d9450>, constant[1]]]
return[name[q_tot]] | keyword[def] identifier[forward] ( identifier[self] , identifier[agent_qs] , identifier[states] ):
literal[string]
identifier[bs] = identifier[agent_qs] . identifier[size] ( literal[int] )
identifier[states] = identifier[states] . identifier[reshape] (- literal[int] , identifier[self] . identifier[state_dim] )
identifier[agent_qs] = identifier[agent_qs] . identifier[view] (- literal[int] , literal[int] , identifier[self] . identifier[n_agents] )
identifier[w1] = identifier[th] . identifier[abs] ( identifier[self] . identifier[hyper_w_1] ( identifier[states] ))
identifier[b1] = identifier[self] . identifier[hyper_b_1] ( identifier[states] )
identifier[w1] = identifier[w1] . identifier[view] (- literal[int] , identifier[self] . identifier[n_agents] , identifier[self] . identifier[embed_dim] )
identifier[b1] = identifier[b1] . identifier[view] (- literal[int] , literal[int] , identifier[self] . identifier[embed_dim] )
identifier[hidden] = identifier[F] . identifier[elu] ( identifier[th] . identifier[bmm] ( identifier[agent_qs] , identifier[w1] )+ identifier[b1] )
identifier[w_final] = identifier[th] . identifier[abs] ( identifier[self] . identifier[hyper_w_final] ( identifier[states] ))
identifier[w_final] = identifier[w_final] . identifier[view] (- literal[int] , identifier[self] . identifier[embed_dim] , literal[int] )
identifier[v] = identifier[self] . identifier[V] ( identifier[states] ). identifier[view] (- literal[int] , literal[int] , literal[int] )
identifier[y] = identifier[th] . identifier[bmm] ( identifier[hidden] , identifier[w_final] )+ identifier[v]
identifier[q_tot] = identifier[y] . identifier[view] ( identifier[bs] ,- literal[int] , literal[int] )
keyword[return] identifier[q_tot] | def forward(self, agent_qs, states):
"""Forward pass for the mixer.
Arguments:
agent_qs: Tensor of shape [B, T, n_agents, n_actions]
states: Tensor of shape [B, T, state_dim]
"""
bs = agent_qs.size(0)
states = states.reshape(-1, self.state_dim)
agent_qs = agent_qs.view(-1, 1, self.n_agents)
# First layer
w1 = th.abs(self.hyper_w_1(states))
b1 = self.hyper_b_1(states)
w1 = w1.view(-1, self.n_agents, self.embed_dim)
b1 = b1.view(-1, 1, self.embed_dim)
hidden = F.elu(th.bmm(agent_qs, w1) + b1)
# Second layer
w_final = th.abs(self.hyper_w_final(states))
w_final = w_final.view(-1, self.embed_dim, 1)
# State-dependent bias
v = self.V(states).view(-1, 1, 1)
# Compute final output
y = th.bmm(hidden, w_final) + v
# Reshape and return
q_tot = y.view(bs, -1, 1)
return q_tot |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.