code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def register_watcher(self, event_type, callback, register_timeout=None):
"""
Register a callback for a given event type.
"""
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout) | def function[register_watcher, parameter[self, event_type, callback, register_timeout]]:
constant[
Register a callback for a given event type.
]
call[call[name[self]._push_watchers][name[event_type]].add, parameter[name[callback]]]
call[name[self].wait_for_response, parameter[call[name[RegisterMessage], parameter[]]]] | keyword[def] identifier[register_watcher] ( identifier[self] , identifier[event_type] , identifier[callback] , identifier[register_timeout] = keyword[None] ):
literal[string]
identifier[self] . identifier[_push_watchers] [ identifier[event_type] ]. identifier[add] ( identifier[callback] )
identifier[self] . identifier[wait_for_response] (
identifier[RegisterMessage] ( identifier[event_list] =[ identifier[event_type] ]),
identifier[timeout] = identifier[register_timeout] ) | def register_watcher(self, event_type, callback, register_timeout=None):
"""
Register a callback for a given event type.
"""
self._push_watchers[event_type].add(callback)
self.wait_for_response(RegisterMessage(event_list=[event_type]), timeout=register_timeout) |
def ligolw_add(xmldoc, urls, non_lsc_tables_ok = False, verbose = False, contenthandler = DefaultContentHandler):
"""
An implementation of the LIGO LW add algorithm. urls is a list of
URLs (or filenames) to load, xmldoc is the XML document tree to
which they should be added.
"""
# Input
for n, url in enumerate(urls):
if verbose:
print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)),
utils.load_url(url, verbose = verbose, xmldoc = xmldoc, contenthandler = contenthandler)
# ID reassignment
if not non_lsc_tables_ok and lsctables.HasNonLSCTables(xmldoc):
raise ValueError("non-LSC tables found. Use --non-lsc-tables-ok to force")
reassign_ids(xmldoc, verbose = verbose)
# Document merge
if verbose:
print >>sys.stderr, "merging elements ..."
merge_ligolws(xmldoc)
merge_compatible_tables(xmldoc)
return xmldoc | def function[ligolw_add, parameter[xmldoc, urls, non_lsc_tables_ok, verbose, contenthandler]]:
constant[
An implementation of the LIGO LW add algorithm. urls is a list of
URLs (or filenames) to load, xmldoc is the XML document tree to
which they should be added.
]
for taget[tuple[[<ast.Name object at 0x7da1b0b56ad0>, <ast.Name object at 0x7da1b0b570a0>]]] in starred[call[name[enumerate], parameter[name[urls]]]] begin[:]
if name[verbose] begin[:]
tuple[[<ast.BinOp object at 0x7da1b0b55270>, <ast.BinOp object at 0x7da1b0b55f60>]]
call[name[utils].load_url, parameter[name[url]]]
if <ast.BoolOp object at 0x7da2047ea560> begin[:]
<ast.Raise object at 0x7da2047ebbe0>
call[name[reassign_ids], parameter[name[xmldoc]]]
if name[verbose] begin[:]
tuple[[<ast.BinOp object at 0x7da2047e8880>, <ast.Constant object at 0x7da2047e8850>]]
call[name[merge_ligolws], parameter[name[xmldoc]]]
call[name[merge_compatible_tables], parameter[name[xmldoc]]]
return[name[xmldoc]] | keyword[def] identifier[ligolw_add] ( identifier[xmldoc] , identifier[urls] , identifier[non_lsc_tables_ok] = keyword[False] , identifier[verbose] = keyword[False] , identifier[contenthandler] = identifier[DefaultContentHandler] ):
literal[string]
keyword[for] identifier[n] , identifier[url] keyword[in] identifier[enumerate] ( identifier[urls] ):
keyword[if] identifier[verbose] :
identifier[print] >> identifier[sys] . identifier[stderr] , literal[string] %( identifier[n] + literal[int] , identifier[len] ( identifier[urls] )),
identifier[utils] . identifier[load_url] ( identifier[url] , identifier[verbose] = identifier[verbose] , identifier[xmldoc] = identifier[xmldoc] , identifier[contenthandler] = identifier[contenthandler] )
keyword[if] keyword[not] identifier[non_lsc_tables_ok] keyword[and] identifier[lsctables] . identifier[HasNonLSCTables] ( identifier[xmldoc] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[reassign_ids] ( identifier[xmldoc] , identifier[verbose] = identifier[verbose] )
keyword[if] identifier[verbose] :
identifier[print] >> identifier[sys] . identifier[stderr] , literal[string]
identifier[merge_ligolws] ( identifier[xmldoc] )
identifier[merge_compatible_tables] ( identifier[xmldoc] )
keyword[return] identifier[xmldoc] | def ligolw_add(xmldoc, urls, non_lsc_tables_ok=False, verbose=False, contenthandler=DefaultContentHandler):
"""
An implementation of the LIGO LW add algorithm. urls is a list of
URLs (or filenames) to load, xmldoc is the XML document tree to
which they should be added.
""" # Input
for (n, url) in enumerate(urls):
if verbose:
(print >> sys.stderr, '%d/%d:' % (n + 1, len(urls))) # depends on [control=['if'], data=[]]
utils.load_url(url, verbose=verbose, xmldoc=xmldoc, contenthandler=contenthandler) # depends on [control=['for'], data=[]] # ID reassignment
if not non_lsc_tables_ok and lsctables.HasNonLSCTables(xmldoc):
raise ValueError('non-LSC tables found. Use --non-lsc-tables-ok to force') # depends on [control=['if'], data=[]]
reassign_ids(xmldoc, verbose=verbose) # Document merge
if verbose:
(print >> sys.stderr, 'merging elements ...') # depends on [control=['if'], data=[]]
merge_ligolws(xmldoc)
merge_compatible_tables(xmldoc)
return xmldoc |
def get_node(self, node_name):
"""Retrieve node with passed name"""
for node in self.nodes:
if node.__name__ == node_name:
return node | def function[get_node, parameter[self, node_name]]:
constant[Retrieve node with passed name]
for taget[name[node]] in starred[name[self].nodes] begin[:]
if compare[name[node].__name__ equal[==] name[node_name]] begin[:]
return[name[node]] | keyword[def] identifier[get_node] ( identifier[self] , identifier[node_name] ):
literal[string]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[nodes] :
keyword[if] identifier[node] . identifier[__name__] == identifier[node_name] :
keyword[return] identifier[node] | def get_node(self, node_name):
"""Retrieve node with passed name"""
for node in self.nodes:
if node.__name__ == node_name:
return node # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']] |
def slaveof(name, sentinel_host=None, sentinel_port=None, sentinel_password=None, **connection_args):
'''
Set this redis instance as a slave.
.. versionadded: 2016.3.0
name
Master to make this a slave of
sentinel_host
Ip of the sentinel to check for the master
sentinel_port
Port of the sentinel to check for the master
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to setup slave'}
kwargs = copy.copy(connection_args)
sentinel_master = __salt__['redis.sentinel_get_master_ip'](name, sentinel_host, sentinel_port, sentinel_password)
if sentinel_master['master_host'] in __salt__['network.ip_addrs']():
ret['result'] = True
ret['comment'] = 'Minion is the master: {0}'.format(name)
return ret
first_master = __salt__['redis.get_master_ip'](**connection_args)
if first_master == sentinel_master:
ret['result'] = True
ret['comment'] = 'Minion already slave of master: {0}'.format(name)
return ret
if __opts__['test'] is True:
ret['comment'] = 'Minion will be made a slave of {0}: {1}'.format(name, sentinel_master['host'])
ret['result'] = None
return ret
kwargs.update(**sentinel_master)
__salt__['redis.slaveof'](**kwargs)
current_master = __salt__['redis.get_master_ip'](**connection_args)
if current_master != sentinel_master:
return ret
ret['result'] = True
ret['changes'] = {
'old': first_master,
'new': current_master,
}
ret['comment'] = 'Minion successfully connected to master: {0}'.format(name)
return ret | def function[slaveof, parameter[name, sentinel_host, sentinel_port, sentinel_password]]:
constant[
Set this redis instance as a slave.
.. versionadded: 2016.3.0
name
Master to make this a slave of
sentinel_host
Ip of the sentinel to check for the master
sentinel_port
Port of the sentinel to check for the master
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2197850>, <ast.Constant object at 0x7da1b2196bc0>, <ast.Constant object at 0x7da1b2196320>, <ast.Constant object at 0x7da1b21960b0>], [<ast.Name object at 0x7da1b2197bb0>, <ast.Dict object at 0x7da1b21962c0>, <ast.Constant object at 0x7da1b2194100>, <ast.Constant object at 0x7da1b2195d80>]]
variable[kwargs] assign[=] call[name[copy].copy, parameter[name[connection_args]]]
variable[sentinel_master] assign[=] call[call[name[__salt__]][constant[redis.sentinel_get_master_ip]], parameter[name[name], name[sentinel_host], name[sentinel_port], name[sentinel_password]]]
if compare[call[name[sentinel_master]][constant[master_host]] in call[call[name[__salt__]][constant[network.ip_addrs]], parameter[]]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[Minion is the master: {0}].format, parameter[name[name]]]
return[name[ret]]
variable[first_master] assign[=] call[call[name[__salt__]][constant[redis.get_master_ip]], parameter[]]
if compare[name[first_master] equal[==] name[sentinel_master]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[Minion already slave of master: {0}].format, parameter[name[name]]]
return[name[ret]]
if compare[call[name[__opts__]][constant[test]] is constant[True]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Minion will be made a slave of {0}: {1}].format, parameter[name[name], call[name[sentinel_master]][constant[host]]]]
call[name[ret]][constant[result]] assign[=] constant[None]
return[name[ret]]
call[name[kwargs].update, parameter[]]
call[call[name[__salt__]][constant[redis.slaveof]], parameter[]]
variable[current_master] assign[=] call[call[name[__salt__]][constant[redis.get_master_ip]], parameter[]]
if compare[name[current_master] not_equal[!=] name[sentinel_master]] begin[:]
return[name[ret]]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[changes]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2195b10>, <ast.Constant object at 0x7da1b2195db0>], [<ast.Name object at 0x7da1b2195e10>, <ast.Name object at 0x7da1b21954b0>]]
call[name[ret]][constant[comment]] assign[=] call[constant[Minion successfully connected to master: {0}].format, parameter[name[name]]]
return[name[ret]] | keyword[def] identifier[slaveof] ( identifier[name] , identifier[sentinel_host] = keyword[None] , identifier[sentinel_port] = keyword[None] , identifier[sentinel_password] = keyword[None] ,** identifier[connection_args] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[False] ,
literal[string] : literal[string] }
identifier[kwargs] = identifier[copy] . identifier[copy] ( identifier[connection_args] )
identifier[sentinel_master] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[sentinel_host] , identifier[sentinel_port] , identifier[sentinel_password] )
keyword[if] identifier[sentinel_master] [ literal[string] ] keyword[in] identifier[__salt__] [ literal[string] ]():
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
identifier[first_master] = identifier[__salt__] [ literal[string] ](** identifier[connection_args] )
keyword[if] identifier[first_master] == identifier[sentinel_master] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
keyword[if] identifier[__opts__] [ literal[string] ] keyword[is] keyword[True] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] , identifier[sentinel_master] [ literal[string] ])
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
identifier[kwargs] . identifier[update] (** identifier[sentinel_master] )
identifier[__salt__] [ literal[string] ](** identifier[kwargs] )
identifier[current_master] = identifier[__salt__] [ literal[string] ](** identifier[connection_args] )
keyword[if] identifier[current_master] != identifier[sentinel_master] :
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]={
literal[string] : identifier[first_master] ,
literal[string] : identifier[current_master] ,
}
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret] | def slaveof(name, sentinel_host=None, sentinel_port=None, sentinel_password=None, **connection_args):
"""
Set this redis instance as a slave.
.. versionadded: 2016.3.0
name
Master to make this a slave of
sentinel_host
Ip of the sentinel to check for the master
sentinel_port
Port of the sentinel to check for the master
"""
ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to setup slave'}
kwargs = copy.copy(connection_args)
sentinel_master = __salt__['redis.sentinel_get_master_ip'](name, sentinel_host, sentinel_port, sentinel_password)
if sentinel_master['master_host'] in __salt__['network.ip_addrs']():
ret['result'] = True
ret['comment'] = 'Minion is the master: {0}'.format(name)
return ret # depends on [control=['if'], data=[]]
first_master = __salt__['redis.get_master_ip'](**connection_args)
if first_master == sentinel_master:
ret['result'] = True
ret['comment'] = 'Minion already slave of master: {0}'.format(name)
return ret # depends on [control=['if'], data=[]]
if __opts__['test'] is True:
ret['comment'] = 'Minion will be made a slave of {0}: {1}'.format(name, sentinel_master['host'])
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
kwargs.update(**sentinel_master)
__salt__['redis.slaveof'](**kwargs)
current_master = __salt__['redis.get_master_ip'](**connection_args)
if current_master != sentinel_master:
return ret # depends on [control=['if'], data=[]]
ret['result'] = True
ret['changes'] = {'old': first_master, 'new': current_master}
ret['comment'] = 'Minion successfully connected to master: {0}'.format(name)
return ret |
def get_vars(self):
""" Parse request path and return GET-vars
:return: None or dictionary of names and tuples of values
"""
if self.method() != 'GET':
raise RuntimeError('Unable to return get vars for non-get method')
re_search = WWebRequestProto.get_vars_re.search(self.path())
if re_search is not None:
return urllib.parse.parse_qs(re_search.group(1), keep_blank_values=1) | def function[get_vars, parameter[self]]:
constant[ Parse request path and return GET-vars
:return: None or dictionary of names and tuples of values
]
if compare[call[name[self].method, parameter[]] not_equal[!=] constant[GET]] begin[:]
<ast.Raise object at 0x7da18eb57b20>
variable[re_search] assign[=] call[name[WWebRequestProto].get_vars_re.search, parameter[call[name[self].path, parameter[]]]]
if compare[name[re_search] is_not constant[None]] begin[:]
return[call[name[urllib].parse.parse_qs, parameter[call[name[re_search].group, parameter[constant[1]]]]]] | keyword[def] identifier[get_vars] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[method] ()!= literal[string] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[re_search] = identifier[WWebRequestProto] . identifier[get_vars_re] . identifier[search] ( identifier[self] . identifier[path] ())
keyword[if] identifier[re_search] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[urllib] . identifier[parse] . identifier[parse_qs] ( identifier[re_search] . identifier[group] ( literal[int] ), identifier[keep_blank_values] = literal[int] ) | def get_vars(self):
""" Parse request path and return GET-vars
:return: None or dictionary of names and tuples of values
"""
if self.method() != 'GET':
raise RuntimeError('Unable to return get vars for non-get method') # depends on [control=['if'], data=[]]
re_search = WWebRequestProto.get_vars_re.search(self.path())
if re_search is not None:
return urllib.parse.parse_qs(re_search.group(1), keep_blank_values=1) # depends on [control=['if'], data=['re_search']] |
def mean(self, only_valid=True) -> ErrorValue:
"""Calculate the mean of the pixels, not counting the masked ones if only_valid is True."""
if not only_valid:
intensity = self.intensity
error = self.error
else:
intensity = self.intensity[self.mask]
error = self.error[self.mask]
return ErrorValue(intensity.mean(),
(error ** 2).mean() ** 0.5) | def function[mean, parameter[self, only_valid]]:
constant[Calculate the mean of the pixels, not counting the masked ones if only_valid is True.]
if <ast.UnaryOp object at 0x7da1b1075930> begin[:]
variable[intensity] assign[=] name[self].intensity
variable[error] assign[=] name[self].error
return[call[name[ErrorValue], parameter[call[name[intensity].mean, parameter[]], binary_operation[call[binary_operation[name[error] ** constant[2]].mean, parameter[]] ** constant[0.5]]]]] | keyword[def] identifier[mean] ( identifier[self] , identifier[only_valid] = keyword[True] )-> identifier[ErrorValue] :
literal[string]
keyword[if] keyword[not] identifier[only_valid] :
identifier[intensity] = identifier[self] . identifier[intensity]
identifier[error] = identifier[self] . identifier[error]
keyword[else] :
identifier[intensity] = identifier[self] . identifier[intensity] [ identifier[self] . identifier[mask] ]
identifier[error] = identifier[self] . identifier[error] [ identifier[self] . identifier[mask] ]
keyword[return] identifier[ErrorValue] ( identifier[intensity] . identifier[mean] (),
( identifier[error] ** literal[int] ). identifier[mean] ()** literal[int] ) | def mean(self, only_valid=True) -> ErrorValue:
"""Calculate the mean of the pixels, not counting the masked ones if only_valid is True."""
if not only_valid:
intensity = self.intensity
error = self.error # depends on [control=['if'], data=[]]
else:
intensity = self.intensity[self.mask]
error = self.error[self.mask]
return ErrorValue(intensity.mean(), (error ** 2).mean() ** 0.5) |
def get_jobs(self, id=None, params=None):
"""
`<>`_
:arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left
blank for all jobs
"""
return self.transport.perform_request(
"GET", _make_path("_rollup", "job", id), params=params
) | def function[get_jobs, parameter[self, id, params]]:
constant[
`<>`_
:arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left
blank for all jobs
]
return[call[name[self].transport.perform_request, parameter[constant[GET], call[name[_make_path], parameter[constant[_rollup], constant[job], name[id]]]]]] | keyword[def] identifier[get_jobs] ( identifier[self] , identifier[id] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] (
literal[string] , identifier[_make_path] ( literal[string] , literal[string] , identifier[id] ), identifier[params] = identifier[params]
) | def get_jobs(self, id=None, params=None):
"""
`<>`_
:arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left
blank for all jobs
"""
return self.transport.perform_request('GET', _make_path('_rollup', 'job', id), params=params) |
def save(self, acl=None, client=None):
"""Save this ACL for the current bucket.
If :attr:`user_project` is set, bills the API request to that project.
:type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list.
:param acl: The ACL object to save. If left blank, this will save
current entries.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
"""
if acl is None:
acl = self
save_to_backend = acl.loaded
else:
save_to_backend = True
if save_to_backend:
self._save(acl, None, client) | def function[save, parameter[self, acl, client]]:
constant[Save this ACL for the current bucket.
If :attr:`user_project` is set, bills the API request to that project.
:type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list.
:param acl: The ACL object to save. If left blank, this will save
current entries.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
]
if compare[name[acl] is constant[None]] begin[:]
variable[acl] assign[=] name[self]
variable[save_to_backend] assign[=] name[acl].loaded
if name[save_to_backend] begin[:]
call[name[self]._save, parameter[name[acl], constant[None], name[client]]] | keyword[def] identifier[save] ( identifier[self] , identifier[acl] = keyword[None] , identifier[client] = keyword[None] ):
literal[string]
keyword[if] identifier[acl] keyword[is] keyword[None] :
identifier[acl] = identifier[self]
identifier[save_to_backend] = identifier[acl] . identifier[loaded]
keyword[else] :
identifier[save_to_backend] = keyword[True]
keyword[if] identifier[save_to_backend] :
identifier[self] . identifier[_save] ( identifier[acl] , keyword[None] , identifier[client] ) | def save(self, acl=None, client=None):
"""Save this ACL for the current bucket.
If :attr:`user_project` is set, bills the API request to that project.
:type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list.
:param acl: The ACL object to save. If left blank, this will save
current entries.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
"""
if acl is None:
acl = self
save_to_backend = acl.loaded # depends on [control=['if'], data=['acl']]
else:
save_to_backend = True
if save_to_backend:
self._save(acl, None, client) # depends on [control=['if'], data=[]] |
def Tz(self,**kwargs): #pragma: no cover
"""
NAME:
Tz
PURPOSE:
Calculate the vertical period
INPUT:
+scipy.integrate.quad keywords
OUTPUT:
T_z(z,vz)*vc/ro + estimate of the error
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
if hasattr(self,'_Tz'):
return self._Tz
zmax= self.calczmax()
Ez= calcEz(self._z,self._vz,self._verticalpot)
self._Tz= 4.*integrate.quad(_TzIntegrand,0.,zmax,
args=(Ez,self._verticalpot),
**kwargs)[0]
return self._Tz | def function[Tz, parameter[self]]:
constant[
NAME:
Tz
PURPOSE:
Calculate the vertical period
INPUT:
+scipy.integrate.quad keywords
OUTPUT:
T_z(z,vz)*vc/ro + estimate of the error
HISTORY:
2012-06-01 - Written - Bovy (IAS)
]
if call[name[hasattr], parameter[name[self], constant[_Tz]]] begin[:]
return[name[self]._Tz]
variable[zmax] assign[=] call[name[self].calczmax, parameter[]]
variable[Ez] assign[=] call[name[calcEz], parameter[name[self]._z, name[self]._vz, name[self]._verticalpot]]
name[self]._Tz assign[=] binary_operation[constant[4.0] * call[call[name[integrate].quad, parameter[name[_TzIntegrand], constant[0.0], name[zmax]]]][constant[0]]]
return[name[self]._Tz] | keyword[def] identifier[Tz] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[return] identifier[self] . identifier[_Tz]
identifier[zmax] = identifier[self] . identifier[calczmax] ()
identifier[Ez] = identifier[calcEz] ( identifier[self] . identifier[_z] , identifier[self] . identifier[_vz] , identifier[self] . identifier[_verticalpot] )
identifier[self] . identifier[_Tz] = literal[int] * identifier[integrate] . identifier[quad] ( identifier[_TzIntegrand] , literal[int] , identifier[zmax] ,
identifier[args] =( identifier[Ez] , identifier[self] . identifier[_verticalpot] ),
** identifier[kwargs] )[ literal[int] ]
keyword[return] identifier[self] . identifier[_Tz] | def Tz(self, **kwargs): #pragma: no cover
'\n NAME:\n Tz\n PURPOSE:\n Calculate the vertical period\n INPUT:\n +scipy.integrate.quad keywords\n OUTPUT:\n T_z(z,vz)*vc/ro + estimate of the error\n HISTORY:\n 2012-06-01 - Written - Bovy (IAS)\n '
if hasattr(self, '_Tz'):
return self._Tz # depends on [control=['if'], data=[]]
zmax = self.calczmax()
Ez = calcEz(self._z, self._vz, self._verticalpot)
self._Tz = 4.0 * integrate.quad(_TzIntegrand, 0.0, zmax, args=(Ez, self._verticalpot), **kwargs)[0]
return self._Tz |
def get_portchannel_info_by_intf_output_lacp_partner_oper_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf")
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, "output")
lacp = ET.SubElement(output, "lacp")
partner_oper_priority = ET.SubElement(lacp, "partner-oper-priority")
partner_oper_priority.text = kwargs.pop('partner_oper_priority')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_portchannel_info_by_intf_output_lacp_partner_oper_priority, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_portchannel_info_by_intf] assign[=] call[name[ET].Element, parameter[constant[get_portchannel_info_by_intf]]]
variable[config] assign[=] name[get_portchannel_info_by_intf]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_portchannel_info_by_intf], constant[output]]]
variable[lacp] assign[=] call[name[ET].SubElement, parameter[name[output], constant[lacp]]]
variable[partner_oper_priority] assign[=] call[name[ET].SubElement, parameter[name[lacp], constant[partner-oper-priority]]]
name[partner_oper_priority].text assign[=] call[name[kwargs].pop, parameter[constant[partner_oper_priority]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_portchannel_info_by_intf_output_lacp_partner_oper_priority] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_portchannel_info_by_intf] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_portchannel_info_by_intf]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_portchannel_info_by_intf] , literal[string] )
identifier[lacp] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[partner_oper_priority] = identifier[ET] . identifier[SubElement] ( identifier[lacp] , literal[string] )
identifier[partner_oper_priority] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_portchannel_info_by_intf_output_lacp_partner_oper_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_portchannel_info_by_intf = ET.Element('get_portchannel_info_by_intf')
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, 'output')
lacp = ET.SubElement(output, 'lacp')
partner_oper_priority = ET.SubElement(lacp, 'partner-oper-priority')
partner_oper_priority.text = kwargs.pop('partner_oper_priority')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def keyReleaseEvent(self, event):
"""Reimplement Qt method.
Handle "most recent used" tab behavior,
When ctrl is released and tab_switcher is visible, tab will be changed.
"""
if self.isVisible():
qsc = get_shortcut(context='Editor', name='Go to next file')
for key in qsc.split('+'):
key = key.lower()
if ((key == 'ctrl' and event.key() == Qt.Key_Control) or
(key == 'alt' and event.key() == Qt.Key_Alt)):
self.item_selected()
event.accept() | def function[keyReleaseEvent, parameter[self, event]]:
constant[Reimplement Qt method.
Handle "most recent used" tab behavior,
When ctrl is released and tab_switcher is visible, tab will be changed.
]
if call[name[self].isVisible, parameter[]] begin[:]
variable[qsc] assign[=] call[name[get_shortcut], parameter[]]
for taget[name[key]] in starred[call[name[qsc].split, parameter[constant[+]]]] begin[:]
variable[key] assign[=] call[name[key].lower, parameter[]]
if <ast.BoolOp object at 0x7da20c6c7eb0> begin[:]
call[name[self].item_selected, parameter[]]
call[name[event].accept, parameter[]] | keyword[def] identifier[keyReleaseEvent] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] identifier[self] . identifier[isVisible] ():
identifier[qsc] = identifier[get_shortcut] ( identifier[context] = literal[string] , identifier[name] = literal[string] )
keyword[for] identifier[key] keyword[in] identifier[qsc] . identifier[split] ( literal[string] ):
identifier[key] = identifier[key] . identifier[lower] ()
keyword[if] (( identifier[key] == literal[string] keyword[and] identifier[event] . identifier[key] ()== identifier[Qt] . identifier[Key_Control] ) keyword[or]
( identifier[key] == literal[string] keyword[and] identifier[event] . identifier[key] ()== identifier[Qt] . identifier[Key_Alt] )):
identifier[self] . identifier[item_selected] ()
identifier[event] . identifier[accept] () | def keyReleaseEvent(self, event):
"""Reimplement Qt method.
Handle "most recent used" tab behavior,
When ctrl is released and tab_switcher is visible, tab will be changed.
"""
if self.isVisible():
qsc = get_shortcut(context='Editor', name='Go to next file')
for key in qsc.split('+'):
key = key.lower()
if key == 'ctrl' and event.key() == Qt.Key_Control or (key == 'alt' and event.key() == Qt.Key_Alt):
self.item_selected() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
event.accept() |
def forward(data, model, mlm_loss, nsp_loss, vocab_size, dtype):
"""forward computation for evaluation"""
(input_id, masked_id, masked_position, masked_weight, \
next_sentence_label, segment_id, valid_length) = data
num_masks = masked_weight.sum() + 1e-8
valid_length = valid_length.reshape(-1)
masked_id = masked_id.reshape(-1)
valid_length_typed = valid_length.astype(dtype, copy=False)
_, _, classified, decoded = model(input_id, segment_id, valid_length_typed,
masked_position)
decoded = decoded.reshape((-1, vocab_size))
ls1 = mlm_loss(decoded.astype('float32', copy=False),
masked_id, masked_weight.reshape((-1, 1)))
ls2 = nsp_loss(classified.astype('float32', copy=False), next_sentence_label)
ls1 = ls1.sum() / num_masks
ls2 = ls2.mean()
ls = ls1 + ls2
return ls, next_sentence_label, classified, masked_id, decoded, \
masked_weight, ls1, ls2, valid_length.astype('float32', copy=False) | def function[forward, parameter[data, model, mlm_loss, nsp_loss, vocab_size, dtype]]:
constant[forward computation for evaluation]
<ast.Tuple object at 0x7da1b26acee0> assign[=] name[data]
variable[num_masks] assign[=] binary_operation[call[name[masked_weight].sum, parameter[]] + constant[1e-08]]
variable[valid_length] assign[=] call[name[valid_length].reshape, parameter[<ast.UnaryOp object at 0x7da1b26af130>]]
variable[masked_id] assign[=] call[name[masked_id].reshape, parameter[<ast.UnaryOp object at 0x7da1b26ac2b0>]]
variable[valid_length_typed] assign[=] call[name[valid_length].astype, parameter[name[dtype]]]
<ast.Tuple object at 0x7da1b26ac0a0> assign[=] call[name[model], parameter[name[input_id], name[segment_id], name[valid_length_typed], name[masked_position]]]
variable[decoded] assign[=] call[name[decoded].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b26ad450>, <ast.Name object at 0x7da1b26af460>]]]]
variable[ls1] assign[=] call[name[mlm_loss], parameter[call[name[decoded].astype, parameter[constant[float32]]], name[masked_id], call[name[masked_weight].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b26af670>, <ast.Constant object at 0x7da1b26acca0>]]]]]]
variable[ls2] assign[=] call[name[nsp_loss], parameter[call[name[classified].astype, parameter[constant[float32]]], name[next_sentence_label]]]
variable[ls1] assign[=] binary_operation[call[name[ls1].sum, parameter[]] / name[num_masks]]
variable[ls2] assign[=] call[name[ls2].mean, parameter[]]
variable[ls] assign[=] binary_operation[name[ls1] + name[ls2]]
return[tuple[[<ast.Name object at 0x7da2041db1c0>, <ast.Name object at 0x7da2041d8760>, <ast.Name object at 0x7da2041d9960>, <ast.Name object at 0x7da2041d80d0>, <ast.Name object at 0x7da2041d8550>, <ast.Name object at 0x7da2041d8610>, <ast.Name object at 0x7da2041d9c90>, <ast.Name object at 0x7da2041d9600>, <ast.Call object at 0x7da2041d9a50>]]] | keyword[def] identifier[forward] ( identifier[data] , identifier[model] , identifier[mlm_loss] , identifier[nsp_loss] , identifier[vocab_size] , identifier[dtype] ):
literal[string]
( identifier[input_id] , identifier[masked_id] , identifier[masked_position] , identifier[masked_weight] , identifier[next_sentence_label] , identifier[segment_id] , identifier[valid_length] )= identifier[data]
identifier[num_masks] = identifier[masked_weight] . identifier[sum] ()+ literal[int]
identifier[valid_length] = identifier[valid_length] . identifier[reshape] (- literal[int] )
identifier[masked_id] = identifier[masked_id] . identifier[reshape] (- literal[int] )
identifier[valid_length_typed] = identifier[valid_length] . identifier[astype] ( identifier[dtype] , identifier[copy] = keyword[False] )
identifier[_] , identifier[_] , identifier[classified] , identifier[decoded] = identifier[model] ( identifier[input_id] , identifier[segment_id] , identifier[valid_length_typed] ,
identifier[masked_position] )
identifier[decoded] = identifier[decoded] . identifier[reshape] ((- literal[int] , identifier[vocab_size] ))
identifier[ls1] = identifier[mlm_loss] ( identifier[decoded] . identifier[astype] ( literal[string] , identifier[copy] = keyword[False] ),
identifier[masked_id] , identifier[masked_weight] . identifier[reshape] ((- literal[int] , literal[int] )))
identifier[ls2] = identifier[nsp_loss] ( identifier[classified] . identifier[astype] ( literal[string] , identifier[copy] = keyword[False] ), identifier[next_sentence_label] )
identifier[ls1] = identifier[ls1] . identifier[sum] ()/ identifier[num_masks]
identifier[ls2] = identifier[ls2] . identifier[mean] ()
identifier[ls] = identifier[ls1] + identifier[ls2]
keyword[return] identifier[ls] , identifier[next_sentence_label] , identifier[classified] , identifier[masked_id] , identifier[decoded] , identifier[masked_weight] , identifier[ls1] , identifier[ls2] , identifier[valid_length] . identifier[astype] ( literal[string] , identifier[copy] = keyword[False] ) | def forward(data, model, mlm_loss, nsp_loss, vocab_size, dtype):
"""forward computation for evaluation"""
(input_id, masked_id, masked_position, masked_weight, next_sentence_label, segment_id, valid_length) = data
num_masks = masked_weight.sum() + 1e-08
valid_length = valid_length.reshape(-1)
masked_id = masked_id.reshape(-1)
valid_length_typed = valid_length.astype(dtype, copy=False)
(_, _, classified, decoded) = model(input_id, segment_id, valid_length_typed, masked_position)
decoded = decoded.reshape((-1, vocab_size))
ls1 = mlm_loss(decoded.astype('float32', copy=False), masked_id, masked_weight.reshape((-1, 1)))
ls2 = nsp_loss(classified.astype('float32', copy=False), next_sentence_label)
ls1 = ls1.sum() / num_masks
ls2 = ls2.mean()
ls = ls1 + ls2
return (ls, next_sentence_label, classified, masked_id, decoded, masked_weight, ls1, ls2, valid_length.astype('float32', copy=False)) |
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize') and value not in (datetime.datetime.min, datetime.datetime.max):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone) | def function[make_aware, parameter[value, timezone]]:
constant[
Makes a naive datetime.datetime in a given time zone aware.
]
if <ast.BoolOp object at 0x7da1aff6ed10> begin[:]
return[call[name[timezone].localize, parameter[name[value]]]] | keyword[def] identifier[make_aware] ( identifier[value] , identifier[timezone] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[timezone] , literal[string] ) keyword[and] identifier[value] keyword[not] keyword[in] ( identifier[datetime] . identifier[datetime] . identifier[min] , identifier[datetime] . identifier[datetime] . identifier[max] ):
keyword[return] identifier[timezone] . identifier[localize] ( identifier[value] , identifier[is_dst] = keyword[None] )
keyword[else] :
keyword[return] identifier[value] . identifier[replace] ( identifier[tzinfo] = identifier[timezone] ) | def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize') and value not in (datetime.datetime.min, datetime.datetime.max):
# available for pytz time zones
return timezone.localize(value, is_dst=None) # depends on [control=['if'], data=[]]
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone) |
def setup(gandi):
""" Initialize Gandi CLI configuration.
Create global configuration directory with API credentials
"""
intro = """Welcome to GandiCLI, let's configure a few things before we \
start.
"""
outro = """
Setup completed. You can now:
* use 'gandi' to see all command.
* use 'gandi vm create' to create and access a Virtual Machine.
* use 'gandi paas create' to create and access a SimpleHosting instance.
"""
gandi.echo(intro)
gandi.init_config()
gandi.echo(outro) | def function[setup, parameter[gandi]]:
constant[ Initialize Gandi CLI configuration.
Create global configuration directory with API credentials
]
variable[intro] assign[=] constant[Welcome to GandiCLI, let's configure a few things before we start.
]
variable[outro] assign[=] constant[
Setup completed. You can now:
* use 'gandi' to see all command.
* use 'gandi vm create' to create and access a Virtual Machine.
* use 'gandi paas create' to create and access a SimpleHosting instance.
]
call[name[gandi].echo, parameter[name[intro]]]
call[name[gandi].init_config, parameter[]]
call[name[gandi].echo, parameter[name[outro]]] | keyword[def] identifier[setup] ( identifier[gandi] ):
literal[string]
identifier[intro] = literal[string]
identifier[outro] = literal[string]
identifier[gandi] . identifier[echo] ( identifier[intro] )
identifier[gandi] . identifier[init_config] ()
identifier[gandi] . identifier[echo] ( identifier[outro] ) | def setup(gandi):
""" Initialize Gandi CLI configuration.
Create global configuration directory with API credentials
"""
intro = "Welcome to GandiCLI, let's configure a few things before we start.\n"
outro = "\nSetup completed. You can now:\n* use 'gandi' to see all command.\n* use 'gandi vm create' to create and access a Virtual Machine.\n* use 'gandi paas create' to create and access a SimpleHosting instance.\n"
gandi.echo(intro)
gandi.init_config()
gandi.echo(outro) |
def is_filtered(self, process):
"""Return True if the process item match the current filter
The proces item is a dict.
"""
if self.filter is None:
# No filter => Not filtered
return False
if self.filter_key is None:
# Apply filter on command line and process name
return self._is_process_filtered(process, key='name') or \
self._is_process_filtered(process, key='cmdline')
else:
# Apply filter on <key>
return self._is_process_filtered(process) | def function[is_filtered, parameter[self, process]]:
constant[Return True if the process item match the current filter
The proces item is a dict.
]
if compare[name[self].filter is constant[None]] begin[:]
return[constant[False]]
if compare[name[self].filter_key is constant[None]] begin[:]
return[<ast.BoolOp object at 0x7da18bc717e0>] | keyword[def] identifier[is_filtered] ( identifier[self] , identifier[process] ):
literal[string]
keyword[if] identifier[self] . identifier[filter] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[filter_key] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_is_process_filtered] ( identifier[process] , identifier[key] = literal[string] ) keyword[or] identifier[self] . identifier[_is_process_filtered] ( identifier[process] , identifier[key] = literal[string] )
keyword[else] :
keyword[return] identifier[self] . identifier[_is_process_filtered] ( identifier[process] ) | def is_filtered(self, process):
"""Return True if the process item match the current filter
The proces item is a dict.
"""
if self.filter is None:
# No filter => Not filtered
return False # depends on [control=['if'], data=[]]
if self.filter_key is None:
# Apply filter on command line and process name
return self._is_process_filtered(process, key='name') or self._is_process_filtered(process, key='cmdline') # depends on [control=['if'], data=[]]
else:
# Apply filter on <key>
return self._is_process_filtered(process) |
def compare(ver1, ver2):
# type: (Union[str, unicode], Union[str, unicode]) -> int
"""Compares two version string, returning {-1|0|1} just as cmp().
(-1: ver1 < ver2, 0: ver1==ver2, 1: ver1 > ver2)
>>> compare("0.1.1", "0.1.2")
-1
>>> compare("0.1.2", "0.1.1")
1
>>> compare("0.1", "0.1.1")
0
>>> compare("0.1.1rc1", "0.1.1a")
1
>>> compare("0.1.1rc1", "0.1.1")
-1
"""
chunks1 = parse(str(ver1))
chunks2 = parse(str(ver2))
min_len = min(len(chunks1), len(chunks2))
for i in range(min_len):
if chunks1[i] > chunks2[i]:
return 1
elif chunks1[i] < chunks2[i]:
return -1
if len(chunks1) > min_len and isinstance(chunks1[min_len], str):
return -1
if len(chunks2) > min_len and isinstance(chunks2[min_len], str):
return 1
return 0 | def function[compare, parameter[ver1, ver2]]:
constant[Compares two version string, returning {-1|0|1} just as cmp().
(-1: ver1 < ver2, 0: ver1==ver2, 1: ver1 > ver2)
>>> compare("0.1.1", "0.1.2")
-1
>>> compare("0.1.2", "0.1.1")
1
>>> compare("0.1", "0.1.1")
0
>>> compare("0.1.1rc1", "0.1.1a")
1
>>> compare("0.1.1rc1", "0.1.1")
-1
]
variable[chunks1] assign[=] call[name[parse], parameter[call[name[str], parameter[name[ver1]]]]]
variable[chunks2] assign[=] call[name[parse], parameter[call[name[str], parameter[name[ver2]]]]]
variable[min_len] assign[=] call[name[min], parameter[call[name[len], parameter[name[chunks1]]], call[name[len], parameter[name[chunks2]]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[min_len]]]] begin[:]
if compare[call[name[chunks1]][name[i]] greater[>] call[name[chunks2]][name[i]]] begin[:]
return[constant[1]]
if <ast.BoolOp object at 0x7da1b287e740> begin[:]
return[<ast.UnaryOp object at 0x7da1b287f550>]
if <ast.BoolOp object at 0x7da1b287e890> begin[:]
return[constant[1]]
return[constant[0]] | keyword[def] identifier[compare] ( identifier[ver1] , identifier[ver2] ):
literal[string]
identifier[chunks1] = identifier[parse] ( identifier[str] ( identifier[ver1] ))
identifier[chunks2] = identifier[parse] ( identifier[str] ( identifier[ver2] ))
identifier[min_len] = identifier[min] ( identifier[len] ( identifier[chunks1] ), identifier[len] ( identifier[chunks2] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[min_len] ):
keyword[if] identifier[chunks1] [ identifier[i] ]> identifier[chunks2] [ identifier[i] ]:
keyword[return] literal[int]
keyword[elif] identifier[chunks1] [ identifier[i] ]< identifier[chunks2] [ identifier[i] ]:
keyword[return] - literal[int]
keyword[if] identifier[len] ( identifier[chunks1] )> identifier[min_len] keyword[and] identifier[isinstance] ( identifier[chunks1] [ identifier[min_len] ], identifier[str] ):
keyword[return] - literal[int]
keyword[if] identifier[len] ( identifier[chunks2] )> identifier[min_len] keyword[and] identifier[isinstance] ( identifier[chunks2] [ identifier[min_len] ], identifier[str] ):
keyword[return] literal[int]
keyword[return] literal[int] | def compare(ver1, ver2):
# type: (Union[str, unicode], Union[str, unicode]) -> int
'Compares two version string, returning {-1|0|1} just as cmp().\n (-1: ver1 < ver2, 0: ver1==ver2, 1: ver1 > ver2)\n\n >>> compare("0.1.1", "0.1.2")\n -1\n >>> compare("0.1.2", "0.1.1")\n 1\n >>> compare("0.1", "0.1.1")\n 0\n >>> compare("0.1.1rc1", "0.1.1a")\n 1\n >>> compare("0.1.1rc1", "0.1.1")\n -1\n '
chunks1 = parse(str(ver1))
chunks2 = parse(str(ver2))
min_len = min(len(chunks1), len(chunks2))
for i in range(min_len):
if chunks1[i] > chunks2[i]:
return 1 # depends on [control=['if'], data=[]]
elif chunks1[i] < chunks2[i]:
return -1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if len(chunks1) > min_len and isinstance(chunks1[min_len], str):
return -1 # depends on [control=['if'], data=[]]
if len(chunks2) > min_len and isinstance(chunks2[min_len], str):
return 1 # depends on [control=['if'], data=[]]
return 0 |
def multiply_and_add(n):
'''Multiply the given number n by some configured multiplier, and
then add a configured offset.'''
multiplier, offset = di.resolver.unpack(multiply_and_add)
return (multiplier * n) + offset | def function[multiply_and_add, parameter[n]]:
constant[Multiply the given number n by some configured multiplier, and
then add a configured offset.]
<ast.Tuple object at 0x7da1b24aeaa0> assign[=] call[name[di].resolver.unpack, parameter[name[multiply_and_add]]]
return[binary_operation[binary_operation[name[multiplier] * name[n]] + name[offset]]] | keyword[def] identifier[multiply_and_add] ( identifier[n] ):
literal[string]
identifier[multiplier] , identifier[offset] = identifier[di] . identifier[resolver] . identifier[unpack] ( identifier[multiply_and_add] )
keyword[return] ( identifier[multiplier] * identifier[n] )+ identifier[offset] | def multiply_and_add(n):
"""Multiply the given number n by some configured multiplier, and
then add a configured offset."""
(multiplier, offset) = di.resolver.unpack(multiply_and_add)
return multiplier * n + offset |
def dbmax_stddev(self, value=None):
""" Corresponds to IDD Field `dbmax_stddev`
Standard deviation of extreme annual maximum dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax_stddev`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbmax_stddev`'.format(value))
self._dbmax_stddev = value | def function[dbmax_stddev, parameter[self, value]]:
constant[ Corresponds to IDD Field `dbmax_stddev`
Standard deviation of extreme annual maximum dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax_stddev`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b0fee470>
name[self]._dbmax_stddev assign[=] name[value] | keyword[def] identifier[dbmax_stddev] ( identifier[self] , identifier[value] = keyword[None] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[value] = identifier[float] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[value] ))
identifier[self] . identifier[_dbmax_stddev] = identifier[value] | def dbmax_stddev(self, value=None):
""" Corresponds to IDD Field `dbmax_stddev`
Standard deviation of extreme annual maximum dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax_stddev`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('value {} need to be of type float for field `dbmax_stddev`'.format(value)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['value']]
self._dbmax_stddev = value |
def get_raw_request_token(self, method='GET', **kwargs):
'''
Returns a Requests' response over the
:attr:`rauth.OAuth1Service.request_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param method: A string representation of the HTTP method to be used,
defaults to `GET`.
:type method: str
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
# ensure we've set the request_token_url
if self.request_token_url is None:
raise TypeError('request_token_url must not be None')
session = self.get_session()
self.request_token_response = session.request(method,
self.request_token_url,
**kwargs)
return self.request_token_response | def function[get_raw_request_token, parameter[self, method]]:
constant[
Returns a Requests' response over the
:attr:`rauth.OAuth1Service.request_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param method: A string representation of the HTTP method to be used,
defaults to `GET`.
:type method: str
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
]
if compare[name[self].request_token_url is constant[None]] begin[:]
<ast.Raise object at 0x7da1b07bdc90>
variable[session] assign[=] call[name[self].get_session, parameter[]]
name[self].request_token_response assign[=] call[name[session].request, parameter[name[method], name[self].request_token_url]]
return[name[self].request_token_response] | keyword[def] identifier[get_raw_request_token] ( identifier[self] , identifier[method] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[request_token_url] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[session] = identifier[self] . identifier[get_session] ()
identifier[self] . identifier[request_token_response] = identifier[session] . identifier[request] ( identifier[method] ,
identifier[self] . identifier[request_token_url] ,
** identifier[kwargs] )
keyword[return] identifier[self] . identifier[request_token_response] | def get_raw_request_token(self, method='GET', **kwargs):
"""
Returns a Requests' response over the
:attr:`rauth.OAuth1Service.request_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param method: A string representation of the HTTP method to be used,
defaults to `GET`.
:type method: str
:param \\*\\*kwargs: Optional arguments. Same as Requests.
:type \\*\\*kwargs: dict
"""
# ensure we've set the request_token_url
if self.request_token_url is None:
raise TypeError('request_token_url must not be None') # depends on [control=['if'], data=[]]
session = self.get_session()
self.request_token_response = session.request(method, self.request_token_url, **kwargs)
return self.request_token_response |
def zcount(self, name, min, max):
"""
Returns the number of elements in the sorted set at key ``name`` with
a score between ``min`` and ``max``.
"""
return self.execute_command('ZCOUNT', name, min, max) | def function[zcount, parameter[self, name, min, max]]:
constant[
Returns the number of elements in the sorted set at key ``name`` with
a score between ``min`` and ``max``.
]
return[call[name[self].execute_command, parameter[constant[ZCOUNT], name[name], name[min], name[max]]]] | keyword[def] identifier[zcount] ( identifier[self] , identifier[name] , identifier[min] , identifier[max] ):
literal[string]
keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] , identifier[min] , identifier[max] ) | def zcount(self, name, min, max):
"""
Returns the number of elements in the sorted set at key ``name`` with
a score between ``min`` and ``max``.
"""
return self.execute_command('ZCOUNT', name, min, max) |
def main(path, pid, queue):
"""
Standalone PSQ worker.
The queue argument must be the full importable path to a psq.Queue
instance.
Example usage:
psqworker config.q
psqworker --path /opt/app queues.fast
"""
setup_logging()
if pid:
with open(os.path.expanduser(pid), "w") as f:
f.write(str(os.getpid()))
if not path:
path = os.getcwd()
sys.path.insert(0, path)
queue = import_queue(queue)
import psq
worker = psq.Worker(queue=queue)
worker.listen() | def function[main, parameter[path, pid, queue]]:
constant[
Standalone PSQ worker.
The queue argument must be the full importable path to a psq.Queue
instance.
Example usage:
psqworker config.q
psqworker --path /opt/app queues.fast
]
call[name[setup_logging], parameter[]]
if name[pid] begin[:]
with call[name[open], parameter[call[name[os].path.expanduser, parameter[name[pid]]], constant[w]]] begin[:]
call[name[f].write, parameter[call[name[str], parameter[call[name[os].getpid, parameter[]]]]]]
if <ast.UnaryOp object at 0x7da1b008bbb0> begin[:]
variable[path] assign[=] call[name[os].getcwd, parameter[]]
call[name[sys].path.insert, parameter[constant[0], name[path]]]
variable[queue] assign[=] call[name[import_queue], parameter[name[queue]]]
import module[psq]
variable[worker] assign[=] call[name[psq].Worker, parameter[]]
call[name[worker].listen, parameter[]] | keyword[def] identifier[main] ( identifier[path] , identifier[pid] , identifier[queue] ):
literal[string]
identifier[setup_logging] ()
keyword[if] identifier[pid] :
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[pid] ), literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[str] ( identifier[os] . identifier[getpid] ()))
keyword[if] keyword[not] identifier[path] :
identifier[path] = identifier[os] . identifier[getcwd] ()
identifier[sys] . identifier[path] . identifier[insert] ( literal[int] , identifier[path] )
identifier[queue] = identifier[import_queue] ( identifier[queue] )
keyword[import] identifier[psq]
identifier[worker] = identifier[psq] . identifier[Worker] ( identifier[queue] = identifier[queue] )
identifier[worker] . identifier[listen] () | def main(path, pid, queue):
"""
Standalone PSQ worker.
The queue argument must be the full importable path to a psq.Queue
instance.
Example usage:
psqworker config.q
psqworker --path /opt/app queues.fast
"""
setup_logging()
if pid:
with open(os.path.expanduser(pid), 'w') as f:
f.write(str(os.getpid())) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
if not path:
path = os.getcwd() # depends on [control=['if'], data=[]]
sys.path.insert(0, path)
queue = import_queue(queue)
import psq
worker = psq.Worker(queue=queue)
worker.listen() |
def sqllab(self):
"""SQL Editor"""
d = {
'defaultDbId': config.get('SQLLAB_DEFAULT_DBID'),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='sqllab',
bootstrap_data=json.dumps(d, default=utils.json_iso_dttm_ser),
) | def function[sqllab, parameter[self]]:
constant[SQL Editor]
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da1b1e11660>, <ast.Constant object at 0x7da1b1e110c0>], [<ast.Call object at 0x7da1b1e10d60>, <ast.Call object at 0x7da1b1e12830>]]
return[call[name[self].render_template, parameter[constant[superset/basic.html]]]] | keyword[def] identifier[sqllab] ( identifier[self] ):
literal[string]
identifier[d] ={
literal[string] : identifier[config] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[common_bootsrap_payload] (),
}
keyword[return] identifier[self] . identifier[render_template] (
literal[string] ,
identifier[entry] = literal[string] ,
identifier[bootstrap_data] = identifier[json] . identifier[dumps] ( identifier[d] , identifier[default] = identifier[utils] . identifier[json_iso_dttm_ser] ),
) | def sqllab(self):
"""SQL Editor"""
d = {'defaultDbId': config.get('SQLLAB_DEFAULT_DBID'), 'common': self.common_bootsrap_payload()}
return self.render_template('superset/basic.html', entry='sqllab', bootstrap_data=json.dumps(d, default=utils.json_iso_dttm_ser)) |
def from_group(cls, group):
"""
Construct tags from the regex group
"""
if not group:
return
tag_items = group.split(";")
return list(map(cls.parse, tag_items)) | def function[from_group, parameter[cls, group]]:
constant[
Construct tags from the regex group
]
if <ast.UnaryOp object at 0x7da1b0b457b0> begin[:]
return[None]
variable[tag_items] assign[=] call[name[group].split, parameter[constant[;]]]
return[call[name[list], parameter[call[name[map], parameter[name[cls].parse, name[tag_items]]]]]] | keyword[def] identifier[from_group] ( identifier[cls] , identifier[group] ):
literal[string]
keyword[if] keyword[not] identifier[group] :
keyword[return]
identifier[tag_items] = identifier[group] . identifier[split] ( literal[string] )
keyword[return] identifier[list] ( identifier[map] ( identifier[cls] . identifier[parse] , identifier[tag_items] )) | def from_group(cls, group):
"""
Construct tags from the regex group
"""
if not group:
return # depends on [control=['if'], data=[]]
tag_items = group.split(';')
return list(map(cls.parse, tag_items)) |
def p_non_empty_array_pair_list_item(p):
'''non_empty_array_pair_list : non_empty_array_pair_list COMMA AND variable
| non_empty_array_pair_list COMMA expr
| AND variable
| expr'''
if len(p) == 5:
p[0] = p[1] + [ast.ArrayElement(None, p[4], True, lineno=p.lineno(2))]
elif len(p) == 4:
p[0] = p[1] + [ast.ArrayElement(None, p[3], False, lineno=p.lineno(2))]
elif len(p) == 3:
p[0] = [ast.ArrayElement(None, p[2], True, lineno=p.lineno(1))]
else:
p[0] = [ast.ArrayElement(None, p[1], False, lineno=p.lineno(1))] | def function[p_non_empty_array_pair_list_item, parameter[p]]:
constant[non_empty_array_pair_list : non_empty_array_pair_list COMMA AND variable
| non_empty_array_pair_list COMMA expr
| AND variable
| expr]
if compare[call[name[len], parameter[name[p]]] equal[==] constant[5]] begin[:]
call[name[p]][constant[0]] assign[=] binary_operation[call[name[p]][constant[1]] + list[[<ast.Call object at 0x7da1b0b39030>]]] | keyword[def] identifier[p_non_empty_array_pair_list_item] ( identifier[p] ):
literal[string]
keyword[if] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ]+[ identifier[ast] . identifier[ArrayElement] ( keyword[None] , identifier[p] [ literal[int] ], keyword[True] , identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))]
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ]+[ identifier[ast] . identifier[ArrayElement] ( keyword[None] , identifier[p] [ literal[int] ], keyword[False] , identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))]
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]=[ identifier[ast] . identifier[ArrayElement] ( keyword[None] , identifier[p] [ literal[int] ], keyword[True] , identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))]
keyword[else] :
identifier[p] [ literal[int] ]=[ identifier[ast] . identifier[ArrayElement] ( keyword[None] , identifier[p] [ literal[int] ], keyword[False] , identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))] | def p_non_empty_array_pair_list_item(p):
"""non_empty_array_pair_list : non_empty_array_pair_list COMMA AND variable
| non_empty_array_pair_list COMMA expr
| AND variable
| expr"""
if len(p) == 5:
p[0] = p[1] + [ast.ArrayElement(None, p[4], True, lineno=p.lineno(2))] # depends on [control=['if'], data=[]]
elif len(p) == 4:
p[0] = p[1] + [ast.ArrayElement(None, p[3], False, lineno=p.lineno(2))] # depends on [control=['if'], data=[]]
elif len(p) == 3:
p[0] = [ast.ArrayElement(None, p[2], True, lineno=p.lineno(1))] # depends on [control=['if'], data=[]]
else:
p[0] = [ast.ArrayElement(None, p[1], False, lineno=p.lineno(1))] |
def _separate_header_and_content(self, text_lines):
"""
From a given Org text, return the header separate from the content.
The given text must be separate line by line and be a list.
The return is a list of two items: header and content.
Theses two items are text separate line by line in format of a list
Keyword Arguments:
text_lines -- A list, each item is a line of the texte
Return:
[
header -- A list, each item is a line of the texte
content -- A list, each item is a line of the texte
]
"""
no_more_header = False
expr_metadata = re.compile(r'^#\+[a-zA-Z]+:.*')
header = []
content = []
for line in text_lines:
metadata = expr_metadata.match(line)
if metadata and not no_more_header:
header.append(line)
else:
no_more_header = True
content.append(line)
return header, content | def function[_separate_header_and_content, parameter[self, text_lines]]:
constant[
From a given Org text, return the header separate from the content.
The given text must be separate line by line and be a list.
The return is a list of two items: header and content.
Theses two items are text separate line by line in format of a list
Keyword Arguments:
text_lines -- A list, each item is a line of the texte
Return:
[
header -- A list, each item is a line of the texte
content -- A list, each item is a line of the texte
]
]
variable[no_more_header] assign[=] constant[False]
variable[expr_metadata] assign[=] call[name[re].compile, parameter[constant[^#\+[a-zA-Z]+:.*]]]
variable[header] assign[=] list[[]]
variable[content] assign[=] list[[]]
for taget[name[line]] in starred[name[text_lines]] begin[:]
variable[metadata] assign[=] call[name[expr_metadata].match, parameter[name[line]]]
if <ast.BoolOp object at 0x7da1b1d767a0> begin[:]
call[name[header].append, parameter[name[line]]]
return[tuple[[<ast.Name object at 0x7da1b1d757b0>, <ast.Name object at 0x7da1b1d74bb0>]]] | keyword[def] identifier[_separate_header_and_content] ( identifier[self] , identifier[text_lines] ):
literal[string]
identifier[no_more_header] = keyword[False]
identifier[expr_metadata] = identifier[re] . identifier[compile] ( literal[string] )
identifier[header] =[]
identifier[content] =[]
keyword[for] identifier[line] keyword[in] identifier[text_lines] :
identifier[metadata] = identifier[expr_metadata] . identifier[match] ( identifier[line] )
keyword[if] identifier[metadata] keyword[and] keyword[not] identifier[no_more_header] :
identifier[header] . identifier[append] ( identifier[line] )
keyword[else] :
identifier[no_more_header] = keyword[True]
identifier[content] . identifier[append] ( identifier[line] )
keyword[return] identifier[header] , identifier[content] | def _separate_header_and_content(self, text_lines):
"""
From a given Org text, return the header separate from the content.
The given text must be separate line by line and be a list.
The return is a list of two items: header and content.
Theses two items are text separate line by line in format of a list
Keyword Arguments:
text_lines -- A list, each item is a line of the texte
Return:
[
header -- A list, each item is a line of the texte
content -- A list, each item is a line of the texte
]
"""
no_more_header = False
expr_metadata = re.compile('^#\\+[a-zA-Z]+:.*')
header = []
content = []
for line in text_lines:
metadata = expr_metadata.match(line)
if metadata and (not no_more_header):
header.append(line) # depends on [control=['if'], data=[]]
else:
no_more_header = True
content.append(line) # depends on [control=['for'], data=['line']]
return (header, content) |
def connect(self, object, callback):
"""Subscribe to the signal."""
return subscription(self.map.setdefault(object, []), callback) | def function[connect, parameter[self, object, callback]]:
constant[Subscribe to the signal.]
return[call[name[subscription], parameter[call[name[self].map.setdefault, parameter[name[object], list[[]]]], name[callback]]]] | keyword[def] identifier[connect] ( identifier[self] , identifier[object] , identifier[callback] ):
literal[string]
keyword[return] identifier[subscription] ( identifier[self] . identifier[map] . identifier[setdefault] ( identifier[object] ,[]), identifier[callback] ) | def connect(self, object, callback):
"""Subscribe to the signal."""
return subscription(self.map.setdefault(object, []), callback) |
def with_context(cls, setup_phases, teardown_phases):
"""Create PhaseGroup creator function with setup and teardown phases.
Args:
setup_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the setup for the PhaseGroup
returned from the created function.
teardown_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the teardown for the
PhaseGroup returned from the created function.
Returns:
Function that takes *phases and returns a PhaseGroup with the predefined
setup and teardown phases, with *phases as the main phases.
"""
setup = flatten_phases_and_groups(setup_phases)
teardown = flatten_phases_and_groups(teardown_phases)
def _context_wrapper(*phases):
return cls(setup=setup,
main=flatten_phases_and_groups(phases),
teardown=teardown)
return _context_wrapper | def function[with_context, parameter[cls, setup_phases, teardown_phases]]:
constant[Create PhaseGroup creator function with setup and teardown phases.
Args:
setup_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the setup for the PhaseGroup
returned from the created function.
teardown_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the teardown for the
PhaseGroup returned from the created function.
Returns:
Function that takes *phases and returns a PhaseGroup with the predefined
setup and teardown phases, with *phases as the main phases.
]
variable[setup] assign[=] call[name[flatten_phases_and_groups], parameter[name[setup_phases]]]
variable[teardown] assign[=] call[name[flatten_phases_and_groups], parameter[name[teardown_phases]]]
def function[_context_wrapper, parameter[]]:
return[call[name[cls], parameter[]]]
return[name[_context_wrapper]] | keyword[def] identifier[with_context] ( identifier[cls] , identifier[setup_phases] , identifier[teardown_phases] ):
literal[string]
identifier[setup] = identifier[flatten_phases_and_groups] ( identifier[setup_phases] )
identifier[teardown] = identifier[flatten_phases_and_groups] ( identifier[teardown_phases] )
keyword[def] identifier[_context_wrapper] (* identifier[phases] ):
keyword[return] identifier[cls] ( identifier[setup] = identifier[setup] ,
identifier[main] = identifier[flatten_phases_and_groups] ( identifier[phases] ),
identifier[teardown] = identifier[teardown] )
keyword[return] identifier[_context_wrapper] | def with_context(cls, setup_phases, teardown_phases):
"""Create PhaseGroup creator function with setup and teardown phases.
Args:
setup_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the setup for the PhaseGroup
returned from the created function.
teardown_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the teardown for the
PhaseGroup returned from the created function.
Returns:
Function that takes *phases and returns a PhaseGroup with the predefined
setup and teardown phases, with *phases as the main phases.
"""
setup = flatten_phases_and_groups(setup_phases)
teardown = flatten_phases_and_groups(teardown_phases)
def _context_wrapper(*phases):
return cls(setup=setup, main=flatten_phases_and_groups(phases), teardown=teardown)
return _context_wrapper |
def report(self, reporter):
"""
Adds the problems that have been found so far to the given Reporter
instance.
"""
for symbol in sorted(self.unk_symbols.keys()):
err = '{} ({}) is not part of IPA'.format(symbol.char, symbol.name)
if symbol.char in self.common_err:
repl = self.common_err[symbol.char]
err += ', suggested replacement is {}'.format(repl)
if len(repl) == 1:
err += ' ({})'.format(unicodedata.name(repl))
reporter.add(self.unk_symbols[symbol], err) | def function[report, parameter[self, reporter]]:
constant[
Adds the problems that have been found so far to the given Reporter
instance.
]
for taget[name[symbol]] in starred[call[name[sorted], parameter[call[name[self].unk_symbols.keys, parameter[]]]]] begin[:]
variable[err] assign[=] call[constant[{} ({}) is not part of IPA].format, parameter[name[symbol].char, name[symbol].name]]
if compare[name[symbol].char in name[self].common_err] begin[:]
variable[repl] assign[=] call[name[self].common_err][name[symbol].char]
<ast.AugAssign object at 0x7da20c6aa8f0>
if compare[call[name[len], parameter[name[repl]]] equal[==] constant[1]] begin[:]
<ast.AugAssign object at 0x7da20c6ab2b0>
call[name[reporter].add, parameter[call[name[self].unk_symbols][name[symbol]], name[err]]] | keyword[def] identifier[report] ( identifier[self] , identifier[reporter] ):
literal[string]
keyword[for] identifier[symbol] keyword[in] identifier[sorted] ( identifier[self] . identifier[unk_symbols] . identifier[keys] ()):
identifier[err] = literal[string] . identifier[format] ( identifier[symbol] . identifier[char] , identifier[symbol] . identifier[name] )
keyword[if] identifier[symbol] . identifier[char] keyword[in] identifier[self] . identifier[common_err] :
identifier[repl] = identifier[self] . identifier[common_err] [ identifier[symbol] . identifier[char] ]
identifier[err] += literal[string] . identifier[format] ( identifier[repl] )
keyword[if] identifier[len] ( identifier[repl] )== literal[int] :
identifier[err] += literal[string] . identifier[format] ( identifier[unicodedata] . identifier[name] ( identifier[repl] ))
identifier[reporter] . identifier[add] ( identifier[self] . identifier[unk_symbols] [ identifier[symbol] ], identifier[err] ) | def report(self, reporter):
"""
Adds the problems that have been found so far to the given Reporter
instance.
"""
for symbol in sorted(self.unk_symbols.keys()):
err = '{} ({}) is not part of IPA'.format(symbol.char, symbol.name)
if symbol.char in self.common_err:
repl = self.common_err[symbol.char]
err += ', suggested replacement is {}'.format(repl)
if len(repl) == 1:
err += ' ({})'.format(unicodedata.name(repl)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
reporter.add(self.unk_symbols[symbol], err) # depends on [control=['for'], data=['symbol']] |
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
"""
if not isinstance(output_module, mysql_4n6time.MySQL4n6TimeOutputModule):
raise errors.BadConfigObject(
'Output module is not an instance of MySQL4n6TimeOutputModule')
MySQL4n6TimeDatabaseArgumentsHelper.ParseOptions(options, output_module)
shared_4n6time_output.Shared4n6TimeOutputArgumentsHelper.ParseOptions(
options, output_module) | def function[ParseOptions, parameter[cls, options, output_module]]:
constant[Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
]
if <ast.UnaryOp object at 0x7da18fe937f0> begin[:]
<ast.Raise object at 0x7da18fe902e0>
call[name[MySQL4n6TimeDatabaseArgumentsHelper].ParseOptions, parameter[name[options], name[output_module]]]
call[name[shared_4n6time_output].Shared4n6TimeOutputArgumentsHelper.ParseOptions, parameter[name[options], name[output_module]]] | keyword[def] identifier[ParseOptions] ( identifier[cls] , identifier[options] , identifier[output_module] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[output_module] , identifier[mysql_4n6time] . identifier[MySQL4n6TimeOutputModule] ):
keyword[raise] identifier[errors] . identifier[BadConfigObject] (
literal[string] )
identifier[MySQL4n6TimeDatabaseArgumentsHelper] . identifier[ParseOptions] ( identifier[options] , identifier[output_module] )
identifier[shared_4n6time_output] . identifier[Shared4n6TimeOutputArgumentsHelper] . identifier[ParseOptions] (
identifier[options] , identifier[output_module] ) | def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
"""
if not isinstance(output_module, mysql_4n6time.MySQL4n6TimeOutputModule):
raise errors.BadConfigObject('Output module is not an instance of MySQL4n6TimeOutputModule') # depends on [control=['if'], data=[]]
MySQL4n6TimeDatabaseArgumentsHelper.ParseOptions(options, output_module)
shared_4n6time_output.Shared4n6TimeOutputArgumentsHelper.ParseOptions(options, output_module) |
def listConstraints(self, login, tableName):
"""
Parameters:
- login
- tableName
"""
self.send_listConstraints(login, tableName)
return self.recv_listConstraints() | def function[listConstraints, parameter[self, login, tableName]]:
constant[
Parameters:
- login
- tableName
]
call[name[self].send_listConstraints, parameter[name[login], name[tableName]]]
return[call[name[self].recv_listConstraints, parameter[]]] | keyword[def] identifier[listConstraints] ( identifier[self] , identifier[login] , identifier[tableName] ):
literal[string]
identifier[self] . identifier[send_listConstraints] ( identifier[login] , identifier[tableName] )
keyword[return] identifier[self] . identifier[recv_listConstraints] () | def listConstraints(self, login, tableName):
"""
Parameters:
- login
- tableName
"""
self.send_listConstraints(login, tableName)
return self.recv_listConstraints() |
def get_object(self):
"""
Returns the row the view is displaying.
You may want to override this if you need to provide non-standard
queryset lookups. Eg if objects are referenced using multiple
keyword arguments in the url conf.
"""
dataframe = self.filter_dataframe(self.get_dataframe())
assert self.lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, self.lookup_url_kwarg)
)
try:
obj = self.index_row(dataframe)
except (IndexError, KeyError, ValueError):
raise Http404
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj | def function[get_object, parameter[self]]:
constant[
Returns the row the view is displaying.
You may want to override this if you need to provide non-standard
queryset lookups. Eg if objects are referenced using multiple
keyword arguments in the url conf.
]
variable[dataframe] assign[=] call[name[self].filter_dataframe, parameter[call[name[self].get_dataframe, parameter[]]]]
assert[compare[name[self].lookup_url_kwarg in name[self].kwargs]]
<ast.Try object at 0x7da20c6a9d80>
call[name[self].check_object_permissions, parameter[name[self].request, name[obj]]]
return[name[obj]] | keyword[def] identifier[get_object] ( identifier[self] ):
literal[string]
identifier[dataframe] = identifier[self] . identifier[filter_dataframe] ( identifier[self] . identifier[get_dataframe] ())
keyword[assert] identifier[self] . identifier[lookup_url_kwarg] keyword[in] identifier[self] . identifier[kwargs] ,(
literal[string]
literal[string]
literal[string] %
( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[self] . identifier[lookup_url_kwarg] )
)
keyword[try] :
identifier[obj] = identifier[self] . identifier[index_row] ( identifier[dataframe] )
keyword[except] ( identifier[IndexError] , identifier[KeyError] , identifier[ValueError] ):
keyword[raise] identifier[Http404]
identifier[self] . identifier[check_object_permissions] ( identifier[self] . identifier[request] , identifier[obj] )
keyword[return] identifier[obj] | def get_object(self):
"""
Returns the row the view is displaying.
You may want to override this if you need to provide non-standard
queryset lookups. Eg if objects are referenced using multiple
keyword arguments in the url conf.
"""
dataframe = self.filter_dataframe(self.get_dataframe())
assert self.lookup_url_kwarg in self.kwargs, 'Expected view %s to be called with a URL keyword argument named "%s". Fix your URL conf, or set the `.lookup_field` attribute on the view correctly.' % (self.__class__.__name__, self.lookup_url_kwarg)
try:
obj = self.index_row(dataframe) # depends on [control=['try'], data=[]]
except (IndexError, KeyError, ValueError):
raise Http404 # depends on [control=['except'], data=[]]
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj |
def use_double_hash(self, password_hash=None):
"""
Return a bool indicating whether a password should be hashed twice.
"""
single_hash = current_app.config.SECURITY_PASSWORD_SINGLE_HASH
if single_hash and self.security.password_salt:
raise RuntimeError('You may not specify a salt with '
'SECURITY_PASSWORD_SINGLE_HASH')
if password_hash is None:
is_plaintext = self.security.password_hash == 'plaintext'
else:
is_plaintext = \
self.security.pwd_context.identify(password_hash) == 'plaintext'
return not (is_plaintext or single_hash) | def function[use_double_hash, parameter[self, password_hash]]:
constant[
Return a bool indicating whether a password should be hashed twice.
]
variable[single_hash] assign[=] name[current_app].config.SECURITY_PASSWORD_SINGLE_HASH
if <ast.BoolOp object at 0x7da207f99690> begin[:]
<ast.Raise object at 0x7da207f9afe0>
if compare[name[password_hash] is constant[None]] begin[:]
variable[is_plaintext] assign[=] compare[name[self].security.password_hash equal[==] constant[plaintext]]
return[<ast.UnaryOp object at 0x7da207f98b20>] | keyword[def] identifier[use_double_hash] ( identifier[self] , identifier[password_hash] = keyword[None] ):
literal[string]
identifier[single_hash] = identifier[current_app] . identifier[config] . identifier[SECURITY_PASSWORD_SINGLE_HASH]
keyword[if] identifier[single_hash] keyword[and] identifier[self] . identifier[security] . identifier[password_salt] :
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string] )
keyword[if] identifier[password_hash] keyword[is] keyword[None] :
identifier[is_plaintext] = identifier[self] . identifier[security] . identifier[password_hash] == literal[string]
keyword[else] :
identifier[is_plaintext] = identifier[self] . identifier[security] . identifier[pwd_context] . identifier[identify] ( identifier[password_hash] )== literal[string]
keyword[return] keyword[not] ( identifier[is_plaintext] keyword[or] identifier[single_hash] ) | def use_double_hash(self, password_hash=None):
"""
Return a bool indicating whether a password should be hashed twice.
"""
single_hash = current_app.config.SECURITY_PASSWORD_SINGLE_HASH
if single_hash and self.security.password_salt:
raise RuntimeError('You may not specify a salt with SECURITY_PASSWORD_SINGLE_HASH') # depends on [control=['if'], data=[]]
if password_hash is None:
is_plaintext = self.security.password_hash == 'plaintext' # depends on [control=['if'], data=[]]
else:
is_plaintext = self.security.pwd_context.identify(password_hash) == 'plaintext'
return not (is_plaintext or single_hash) |
def timestamp(value):
"""
Return the timestamp of a datetime.datetime object.
:param value: a datetime object
:type value: datetime.datetime
:return: the timestamp
:rtype: str
"""
value = value if timezone.is_naive(value) else timezone.localtime(value)
return value.strftime(settings.DATE_FORMAT) | def function[timestamp, parameter[value]]:
constant[
Return the timestamp of a datetime.datetime object.
:param value: a datetime object
:type value: datetime.datetime
:return: the timestamp
:rtype: str
]
variable[value] assign[=] <ast.IfExp object at 0x7da1b11a63b0>
return[call[name[value].strftime, parameter[name[settings].DATE_FORMAT]]] | keyword[def] identifier[timestamp] ( identifier[value] ):
literal[string]
identifier[value] = identifier[value] keyword[if] identifier[timezone] . identifier[is_naive] ( identifier[value] ) keyword[else] identifier[timezone] . identifier[localtime] ( identifier[value] )
keyword[return] identifier[value] . identifier[strftime] ( identifier[settings] . identifier[DATE_FORMAT] ) | def timestamp(value):
"""
Return the timestamp of a datetime.datetime object.
:param value: a datetime object
:type value: datetime.datetime
:return: the timestamp
:rtype: str
"""
value = value if timezone.is_naive(value) else timezone.localtime(value)
return value.strftime(settings.DATE_FORMAT) |
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
conncetion pool.
"""
# TODO: Add optional support for socket.gethostbyname checking.
return (url.startswith('/') or
get_host(url) == (self.scheme, self.host, self.port)) | def function[is_same_host, parameter[self, url]]:
constant[
Check if the given ``url`` is a member of the same host as this
conncetion pool.
]
return[<ast.BoolOp object at 0x7da1b25db070>] | keyword[def] identifier[is_same_host] ( identifier[self] , identifier[url] ):
literal[string]
keyword[return] ( identifier[url] . identifier[startswith] ( literal[string] ) keyword[or]
identifier[get_host] ( identifier[url] )==( identifier[self] . identifier[scheme] , identifier[self] . identifier[host] , identifier[self] . identifier[port] )) | def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
conncetion pool.
"""
# TODO: Add optional support for socket.gethostbyname checking.
return url.startswith('/') or get_host(url) == (self.scheme, self.host, self.port) |
def tag(self):
"""
Return the tag's name (or id number) for this task.
:returns: An int (tag id) or string (tag name, eg "foo-build").
This seems to depend on the task method. For example,
buildArch, tagBuild, and tagNotification tasks always return
a tag ID here.
If you do get an int back here, you'll have to make a
separate getTag RPC to get the tag's name.
"""
if self.method == 'buildArch':
# Note: buildArch tag will be an int here.
return self.params[1]
if self.method in ('createdistrepo', 'distRepo', 'newRepo', 'runroot',
'tagBuild', 'waitrepo'):
return self.params[0]
if self.method == 'tagNotification':
return self.params[2]
if self.method == 'buildMaven':
return self.params[1]['name'] | def function[tag, parameter[self]]:
constant[
Return the tag's name (or id number) for this task.
:returns: An int (tag id) or string (tag name, eg "foo-build").
This seems to depend on the task method. For example,
buildArch, tagBuild, and tagNotification tasks always return
a tag ID here.
If you do get an int back here, you'll have to make a
separate getTag RPC to get the tag's name.
]
if compare[name[self].method equal[==] constant[buildArch]] begin[:]
return[call[name[self].params][constant[1]]]
if compare[name[self].method in tuple[[<ast.Constant object at 0x7da1b1f35f60>, <ast.Constant object at 0x7da1b1f36b90>, <ast.Constant object at 0x7da1b1f36c50>, <ast.Constant object at 0x7da1b1f36bc0>, <ast.Constant object at 0x7da1b1f36980>, <ast.Constant object at 0x7da1b1f365f0>]]] begin[:]
return[call[name[self].params][constant[0]]]
if compare[name[self].method equal[==] constant[tagNotification]] begin[:]
return[call[name[self].params][constant[2]]]
if compare[name[self].method equal[==] constant[buildMaven]] begin[:]
return[call[call[name[self].params][constant[1]]][constant[name]]] | keyword[def] identifier[tag] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[method] == literal[string] :
keyword[return] identifier[self] . identifier[params] [ literal[int] ]
keyword[if] identifier[self] . identifier[method] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ):
keyword[return] identifier[self] . identifier[params] [ literal[int] ]
keyword[if] identifier[self] . identifier[method] == literal[string] :
keyword[return] identifier[self] . identifier[params] [ literal[int] ]
keyword[if] identifier[self] . identifier[method] == literal[string] :
keyword[return] identifier[self] . identifier[params] [ literal[int] ][ literal[string] ] | def tag(self):
"""
Return the tag's name (or id number) for this task.
:returns: An int (tag id) or string (tag name, eg "foo-build").
This seems to depend on the task method. For example,
buildArch, tagBuild, and tagNotification tasks always return
a tag ID here.
If you do get an int back here, you'll have to make a
separate getTag RPC to get the tag's name.
"""
if self.method == 'buildArch':
# Note: buildArch tag will be an int here.
return self.params[1] # depends on [control=['if'], data=[]]
if self.method in ('createdistrepo', 'distRepo', 'newRepo', 'runroot', 'tagBuild', 'waitrepo'):
return self.params[0] # depends on [control=['if'], data=[]]
if self.method == 'tagNotification':
return self.params[2] # depends on [control=['if'], data=[]]
if self.method == 'buildMaven':
return self.params[1]['name'] # depends on [control=['if'], data=[]] |
def get_users(self, fetch=True):
"""Return this Applications's users object, populating it if fetch
is True."""
return Users(self.resource.users, self.client, populate=fetch) | def function[get_users, parameter[self, fetch]]:
constant[Return this Applications's users object, populating it if fetch
is True.]
return[call[name[Users], parameter[name[self].resource.users, name[self].client]]] | keyword[def] identifier[get_users] ( identifier[self] , identifier[fetch] = keyword[True] ):
literal[string]
keyword[return] identifier[Users] ( identifier[self] . identifier[resource] . identifier[users] , identifier[self] . identifier[client] , identifier[populate] = identifier[fetch] ) | def get_users(self, fetch=True):
"""Return this Applications's users object, populating it if fetch
is True."""
return Users(self.resource.users, self.client, populate=fetch) |
def find_all_models(models):
""" Yield all models and their parents. """
for model in models:
yield model
# noinspection PyProtectedMember
for parent in model._meta.parents.keys():
for parent_model in find_all_models((parent,)):
yield parent_model | def function[find_all_models, parameter[models]]:
constant[ Yield all models and their parents. ]
for taget[name[model]] in starred[name[models]] begin[:]
<ast.Yield object at 0x7da1b0791000>
for taget[name[parent]] in starred[call[name[model]._meta.parents.keys, parameter[]]] begin[:]
for taget[name[parent_model]] in starred[call[name[find_all_models], parameter[tuple[[<ast.Name object at 0x7da1b07918d0>]]]]] begin[:]
<ast.Yield object at 0x7da1b0791ba0> | keyword[def] identifier[find_all_models] ( identifier[models] ):
literal[string]
keyword[for] identifier[model] keyword[in] identifier[models] :
keyword[yield] identifier[model]
keyword[for] identifier[parent] keyword[in] identifier[model] . identifier[_meta] . identifier[parents] . identifier[keys] ():
keyword[for] identifier[parent_model] keyword[in] identifier[find_all_models] (( identifier[parent] ,)):
keyword[yield] identifier[parent_model] | def find_all_models(models):
""" Yield all models and their parents. """
for model in models:
yield model
# noinspection PyProtectedMember
for parent in model._meta.parents.keys():
for parent_model in find_all_models((parent,)):
yield parent_model # depends on [control=['for'], data=['parent_model']] # depends on [control=['for'], data=['parent']] # depends on [control=['for'], data=['model']] |
def simplify(self, chord):
'''Simplify a chord string down to the vocabulary space'''
# Drop inversions
chord = re.sub(r'/.*$', r'', chord)
# Drop any additional or suppressed tones
chord = re.sub(r'\(.*?\)', r'', chord)
# Drop dangling : indicators
chord = re.sub(r':$', r'', chord)
# Encode the chord
root, pitches, _ = mir_eval.chord.encode(chord)
# Build the query
# To map the binary vector pitches down to bit masked integer,
# we just dot against powers of 2
P = 2**np.arange(12, dtype=int)
query = self.mask_ & pitches[::-1].dot(P)
if root < 0 and chord[0].upper() == 'N':
return 'N'
if query not in QUALITIES:
return 'X'
return '{}:{}'.format(PITCHES[root], QUALITIES[query]) | def function[simplify, parameter[self, chord]]:
constant[Simplify a chord string down to the vocabulary space]
variable[chord] assign[=] call[name[re].sub, parameter[constant[/.*$], constant[], name[chord]]]
variable[chord] assign[=] call[name[re].sub, parameter[constant[\(.*?\)], constant[], name[chord]]]
variable[chord] assign[=] call[name[re].sub, parameter[constant[:$], constant[], name[chord]]]
<ast.Tuple object at 0x7da1b10edd80> assign[=] call[name[mir_eval].chord.encode, parameter[name[chord]]]
variable[P] assign[=] binary_operation[constant[2] ** call[name[np].arange, parameter[constant[12]]]]
variable[query] assign[=] binary_operation[name[self].mask_ <ast.BitAnd object at 0x7da2590d6b60> call[call[name[pitches]][<ast.Slice object at 0x7da1b10eda20>].dot, parameter[name[P]]]]
if <ast.BoolOp object at 0x7da1b10ef010> begin[:]
return[constant[N]]
if compare[name[query] <ast.NotIn object at 0x7da2590d7190> name[QUALITIES]] begin[:]
return[constant[X]]
return[call[constant[{}:{}].format, parameter[call[name[PITCHES]][name[root]], call[name[QUALITIES]][name[query]]]]] | keyword[def] identifier[simplify] ( identifier[self] , identifier[chord] ):
literal[string]
identifier[chord] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[chord] )
identifier[chord] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[chord] )
identifier[chord] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[chord] )
identifier[root] , identifier[pitches] , identifier[_] = identifier[mir_eval] . identifier[chord] . identifier[encode] ( identifier[chord] )
identifier[P] = literal[int] ** identifier[np] . identifier[arange] ( literal[int] , identifier[dtype] = identifier[int] )
identifier[query] = identifier[self] . identifier[mask_] & identifier[pitches] [::- literal[int] ]. identifier[dot] ( identifier[P] )
keyword[if] identifier[root] < literal[int] keyword[and] identifier[chord] [ literal[int] ]. identifier[upper] ()== literal[string] :
keyword[return] literal[string]
keyword[if] identifier[query] keyword[not] keyword[in] identifier[QUALITIES] :
keyword[return] literal[string]
keyword[return] literal[string] . identifier[format] ( identifier[PITCHES] [ identifier[root] ], identifier[QUALITIES] [ identifier[query] ]) | def simplify(self, chord):
"""Simplify a chord string down to the vocabulary space"""
# Drop inversions
chord = re.sub('/.*$', '', chord)
# Drop any additional or suppressed tones
chord = re.sub('\\(.*?\\)', '', chord)
# Drop dangling : indicators
chord = re.sub(':$', '', chord)
# Encode the chord
(root, pitches, _) = mir_eval.chord.encode(chord)
# Build the query
# To map the binary vector pitches down to bit masked integer,
# we just dot against powers of 2
P = 2 ** np.arange(12, dtype=int)
query = self.mask_ & pitches[::-1].dot(P)
if root < 0 and chord[0].upper() == 'N':
return 'N' # depends on [control=['if'], data=[]]
if query not in QUALITIES:
return 'X' # depends on [control=['if'], data=[]]
return '{}:{}'.format(PITCHES[root], QUALITIES[query]) |
def choice(choices):
"""Test that the data items are members of the set `choices`."""
def decorator(function):
"""Decorate a function with args."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
"""Wrap the function."""
series = function(*args, **kwargs)
return series.isin(set(choices))
return wrapper
return decorator | def function[choice, parameter[choices]]:
constant[Test that the data items are members of the set `choices`.]
def function[decorator, parameter[function]]:
constant[Decorate a function with args.]
def function[wrapper, parameter[]]:
constant[Wrap the function.]
variable[series] assign[=] call[name[function], parameter[<ast.Starred object at 0x7da204620f70>]]
return[call[name[series].isin, parameter[call[name[set], parameter[name[choices]]]]]]
return[name[wrapper]]
return[name[decorator]] | keyword[def] identifier[choice] ( identifier[choices] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[function] ):
literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[function] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[series] = identifier[function] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[series] . identifier[isin] ( identifier[set] ( identifier[choices] ))
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator] | def choice(choices):
"""Test that the data items are members of the set `choices`."""
def decorator(function):
"""Decorate a function with args."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
"""Wrap the function."""
series = function(*args, **kwargs)
return series.isin(set(choices))
return wrapper
return decorator |
def print_block(self, section_key, f=sys.stdout, file_format="mwtab"):
"""Print `mwtab` section into a file or stdout.
:param str section_key: Section name.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `mwtab` or `json`.
:return: None
:rtype: :py:obj:`None`
"""
if file_format == "mwtab":
for key, value in self[section_key].items():
if section_key == "METABOLOMICS WORKBENCH" and key not in ("VERSION", "CREATED_ON"):
continue
if key in ("VERSION", "CREATED_ON"):
cw = 20 - len(key)
elif key in ("SUBJECT_SAMPLE_FACTORS", ):
cw = 33 - len(key)
else:
cw = 30 - len(key)
if "\n" in value:
for line in value.split("\n"):
print("{}{}{}\t{}".format(self.prefixes.get(section_key, ""), key, cw * " ", line), file=f)
elif key == "SUBJECT_SAMPLE_FACTORS":
for factor in value:
print("{}{}\t{}".format(key, cw * " ", "\t".join(factor.values())), file=f)
elif key.endswith(":UNITS"):
print("{}\t{}".format(key, value), file=f)
elif key.endswith("_RESULTS_FILE"):
if isinstance(value, dict):
print("{}{} \t{}\t{}:{}".format(self.prefixes.get(section_key, ""),
*[i for pair in value.items() for i in pair]), file=f)
else:
print("{}{}{}\t{}".format(self.prefixes.get(section_key, ""), key, cw * " ", value), file=f)
elif key.endswith("_START"):
start_key = key
end_key = "{}{}".format(start_key[:-5], "END")
print(start_key, file=f)
for data_key in value:
if data_key in ("Samples", "Factors"):
print("{}\t{}".format(data_key, "\t".join(self[section_key][key][data_key])), file=f)
elif data_key in ("Fields", ):
print("{}".format("\t".join(self[section_key][key][data_key])), file=f)
elif data_key == "DATA":
for data in self[section_key][key][data_key]:
print("\t".join(data.values()), file=f)
print(end_key, file=f)
else:
print("{}{}{}\t{}".format(self.prefixes.get(section_key, ""), key, cw * " ", value), file=f)
elif file_format == "json":
print(json.dumps(self[section_key], sort_keys=False, indent=4), file=f) | def function[print_block, parameter[self, section_key, f, file_format]]:
constant[Print `mwtab` section into a file or stdout.
:param str section_key: Section name.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `mwtab` or `json`.
:return: None
:rtype: :py:obj:`None`
]
if compare[name[file_format] equal[==] constant[mwtab]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b2380670>, <ast.Name object at 0x7da1b2587070>]]] in starred[call[call[name[self]][name[section_key]].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b2585690> begin[:]
continue
if compare[name[key] in tuple[[<ast.Constant object at 0x7da1b25864d0>, <ast.Constant object at 0x7da1b2585990>]]] begin[:]
variable[cw] assign[=] binary_operation[constant[20] - call[name[len], parameter[name[key]]]]
if compare[constant[
] in name[value]] begin[:]
for taget[name[line]] in starred[call[name[value].split, parameter[constant[
]]]] begin[:]
call[name[print], parameter[call[constant[{}{}{} {}].format, parameter[call[name[self].prefixes.get, parameter[name[section_key], constant[]]], name[key], binary_operation[name[cw] * constant[ ]], name[line]]]]] | keyword[def] identifier[print_block] ( identifier[self] , identifier[section_key] , identifier[f] = identifier[sys] . identifier[stdout] , identifier[file_format] = literal[string] ):
literal[string]
keyword[if] identifier[file_format] == literal[string] :
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] [ identifier[section_key] ]. identifier[items] ():
keyword[if] identifier[section_key] == literal[string] keyword[and] identifier[key] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[continue]
keyword[if] identifier[key] keyword[in] ( literal[string] , literal[string] ):
identifier[cw] = literal[int] - identifier[len] ( identifier[key] )
keyword[elif] identifier[key] keyword[in] ( literal[string] ,):
identifier[cw] = literal[int] - identifier[len] ( identifier[key] )
keyword[else] :
identifier[cw] = literal[int] - identifier[len] ( identifier[key] )
keyword[if] literal[string] keyword[in] identifier[value] :
keyword[for] identifier[line] keyword[in] identifier[value] . identifier[split] ( literal[string] ):
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[prefixes] . identifier[get] ( identifier[section_key] , literal[string] ), identifier[key] , identifier[cw] * literal[string] , identifier[line] ), identifier[file] = identifier[f] )
keyword[elif] identifier[key] == literal[string] :
keyword[for] identifier[factor] keyword[in] identifier[value] :
identifier[print] ( literal[string] . identifier[format] ( identifier[key] , identifier[cw] * literal[string] , literal[string] . identifier[join] ( identifier[factor] . identifier[values] ())), identifier[file] = identifier[f] )
keyword[elif] identifier[key] . identifier[endswith] ( literal[string] ):
identifier[print] ( literal[string] . identifier[format] ( identifier[key] , identifier[value] ), identifier[file] = identifier[f] )
keyword[elif] identifier[key] . identifier[endswith] ( literal[string] ):
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[prefixes] . identifier[get] ( identifier[section_key] , literal[string] ),
*[ identifier[i] keyword[for] identifier[pair] keyword[in] identifier[value] . identifier[items] () keyword[for] identifier[i] keyword[in] identifier[pair] ]), identifier[file] = identifier[f] )
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[prefixes] . identifier[get] ( identifier[section_key] , literal[string] ), identifier[key] , identifier[cw] * literal[string] , identifier[value] ), identifier[file] = identifier[f] )
keyword[elif] identifier[key] . identifier[endswith] ( literal[string] ):
identifier[start_key] = identifier[key]
identifier[end_key] = literal[string] . identifier[format] ( identifier[start_key] [:- literal[int] ], literal[string] )
identifier[print] ( identifier[start_key] , identifier[file] = identifier[f] )
keyword[for] identifier[data_key] keyword[in] identifier[value] :
keyword[if] identifier[data_key] keyword[in] ( literal[string] , literal[string] ):
identifier[print] ( literal[string] . identifier[format] ( identifier[data_key] , literal[string] . identifier[join] ( identifier[self] [ identifier[section_key] ][ identifier[key] ][ identifier[data_key] ])), identifier[file] = identifier[f] )
keyword[elif] identifier[data_key] keyword[in] ( literal[string] ,):
identifier[print] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[self] [ identifier[section_key] ][ identifier[key] ][ identifier[data_key] ])), identifier[file] = identifier[f] )
keyword[elif] identifier[data_key] == literal[string] :
keyword[for] identifier[data] keyword[in] identifier[self] [ identifier[section_key] ][ identifier[key] ][ identifier[data_key] ]:
identifier[print] ( literal[string] . identifier[join] ( identifier[data] . identifier[values] ()), identifier[file] = identifier[f] )
identifier[print] ( identifier[end_key] , identifier[file] = identifier[f] )
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[prefixes] . identifier[get] ( identifier[section_key] , literal[string] ), identifier[key] , identifier[cw] * literal[string] , identifier[value] ), identifier[file] = identifier[f] )
keyword[elif] identifier[file_format] == literal[string] :
identifier[print] ( identifier[json] . identifier[dumps] ( identifier[self] [ identifier[section_key] ], identifier[sort_keys] = keyword[False] , identifier[indent] = literal[int] ), identifier[file] = identifier[f] ) | def print_block(self, section_key, f=sys.stdout, file_format='mwtab'):
"""Print `mwtab` section into a file or stdout.
:param str section_key: Section name.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `mwtab` or `json`.
:return: None
:rtype: :py:obj:`None`
"""
if file_format == 'mwtab':
for (key, value) in self[section_key].items():
if section_key == 'METABOLOMICS WORKBENCH' and key not in ('VERSION', 'CREATED_ON'):
continue # depends on [control=['if'], data=[]]
if key in ('VERSION', 'CREATED_ON'):
cw = 20 - len(key) # depends on [control=['if'], data=['key']]
elif key in ('SUBJECT_SAMPLE_FACTORS',):
cw = 33 - len(key) # depends on [control=['if'], data=['key']]
else:
cw = 30 - len(key)
if '\n' in value:
for line in value.split('\n'):
print('{}{}{}\t{}'.format(self.prefixes.get(section_key, ''), key, cw * ' ', line), file=f) # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=['value']]
elif key == 'SUBJECT_SAMPLE_FACTORS':
for factor in value:
print('{}{}\t{}'.format(key, cw * ' ', '\t'.join(factor.values())), file=f) # depends on [control=['for'], data=['factor']] # depends on [control=['if'], data=['key']]
elif key.endswith(':UNITS'):
print('{}\t{}'.format(key, value), file=f) # depends on [control=['if'], data=[]]
elif key.endswith('_RESULTS_FILE'):
if isinstance(value, dict):
print('{}{} \t{}\t{}:{}'.format(self.prefixes.get(section_key, ''), *[i for pair in value.items() for i in pair]), file=f) # depends on [control=['if'], data=[]]
else:
print('{}{}{}\t{}'.format(self.prefixes.get(section_key, ''), key, cw * ' ', value), file=f) # depends on [control=['if'], data=[]]
elif key.endswith('_START'):
start_key = key
end_key = '{}{}'.format(start_key[:-5], 'END')
print(start_key, file=f)
for data_key in value:
if data_key in ('Samples', 'Factors'):
print('{}\t{}'.format(data_key, '\t'.join(self[section_key][key][data_key])), file=f) # depends on [control=['if'], data=['data_key']]
elif data_key in ('Fields',):
print('{}'.format('\t'.join(self[section_key][key][data_key])), file=f) # depends on [control=['if'], data=['data_key']]
elif data_key == 'DATA':
for data in self[section_key][key][data_key]:
print('\t'.join(data.values()), file=f) # depends on [control=['for'], data=['data']] # depends on [control=['if'], data=['data_key']] # depends on [control=['for'], data=['data_key']]
print(end_key, file=f) # depends on [control=['if'], data=[]]
else:
print('{}{}{}\t{}'.format(self.prefixes.get(section_key, ''), key, cw * ' ', value), file=f) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif file_format == 'json':
print(json.dumps(self[section_key], sort_keys=False, indent=4), file=f) # depends on [control=['if'], data=[]] |
def restore(self, removals):
"Undo a supposition and all inferences from it."
for B, b in removals:
self.curr_domains[B].append(b) | def function[restore, parameter[self, removals]]:
constant[Undo a supposition and all inferences from it.]
for taget[tuple[[<ast.Name object at 0x7da204564bb0>, <ast.Name object at 0x7da2045667a0>]]] in starred[name[removals]] begin[:]
call[call[name[self].curr_domains][name[B]].append, parameter[name[b]]] | keyword[def] identifier[restore] ( identifier[self] , identifier[removals] ):
literal[string]
keyword[for] identifier[B] , identifier[b] keyword[in] identifier[removals] :
identifier[self] . identifier[curr_domains] [ identifier[B] ]. identifier[append] ( identifier[b] ) | def restore(self, removals):
"""Undo a supposition and all inferences from it."""
for (B, b) in removals:
self.curr_domains[B].append(b) # depends on [control=['for'], data=[]] |
def validate_format(self, obj, pointer=None):
"""
================= ============
Expected draft04 Alias of
----------------- ------------
date-time rfc3339.datetime
email email
hostname hostname
ipv4 ipv4
ipv6 ipv6
uri uri
================= ============
"""
if 'format' in self.attrs:
substituted = {
'date-time': 'rfc3339.datetime',
'email': 'email',
'hostname': 'hostname',
'ipv4': 'ipv4',
'ipv6': 'ipv6',
'uri': 'uri',
}.get(self.attrs['format'], self.attrs['format'])
logger.debug('use %s', substituted)
try:
return self.formats[substituted](obj)
except ValidationError as error:
logger.error(error)
self.fail('Forbidden value', obj, pointer)
return obj | def function[validate_format, parameter[self, obj, pointer]]:
constant[
================= ============
Expected draft04 Alias of
----------------- ------------
date-time rfc3339.datetime
email email
hostname hostname
ipv4 ipv4
ipv6 ipv6
uri uri
================= ============
]
if compare[constant[format] in name[self].attrs] begin[:]
variable[substituted] assign[=] call[dictionary[[<ast.Constant object at 0x7da1b2557160>, <ast.Constant object at 0x7da1b2557190>, <ast.Constant object at 0x7da1b25571c0>, <ast.Constant object at 0x7da1b2449d20>, <ast.Constant object at 0x7da1b244ad70>, <ast.Constant object at 0x7da1b24492a0>], [<ast.Constant object at 0x7da1b244a110>, <ast.Constant object at 0x7da1b2448d60>, <ast.Constant object at 0x7da1b2449090>, <ast.Constant object at 0x7da1b244b610>, <ast.Constant object at 0x7da1b2448dc0>, <ast.Constant object at 0x7da1b2449a50>]].get, parameter[call[name[self].attrs][constant[format]], call[name[self].attrs][constant[format]]]]
call[name[logger].debug, parameter[constant[use %s], name[substituted]]]
<ast.Try object at 0x7da1b244bc10>
return[name[obj]] | keyword[def] identifier[validate_format] ( identifier[self] , identifier[obj] , identifier[pointer] = keyword[None] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[attrs] :
identifier[substituted] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}. identifier[get] ( identifier[self] . identifier[attrs] [ literal[string] ], identifier[self] . identifier[attrs] [ literal[string] ])
identifier[logger] . identifier[debug] ( literal[string] , identifier[substituted] )
keyword[try] :
keyword[return] identifier[self] . identifier[formats] [ identifier[substituted] ]( identifier[obj] )
keyword[except] identifier[ValidationError] keyword[as] identifier[error] :
identifier[logger] . identifier[error] ( identifier[error] )
identifier[self] . identifier[fail] ( literal[string] , identifier[obj] , identifier[pointer] )
keyword[return] identifier[obj] | def validate_format(self, obj, pointer=None):
"""
================= ============
Expected draft04 Alias of
----------------- ------------
date-time rfc3339.datetime
email email
hostname hostname
ipv4 ipv4
ipv6 ipv6
uri uri
================= ============
"""
if 'format' in self.attrs:
substituted = {'date-time': 'rfc3339.datetime', 'email': 'email', 'hostname': 'hostname', 'ipv4': 'ipv4', 'ipv6': 'ipv6', 'uri': 'uri'}.get(self.attrs['format'], self.attrs['format'])
logger.debug('use %s', substituted)
try:
return self.formats[substituted](obj) # depends on [control=['try'], data=[]]
except ValidationError as error:
logger.error(error)
self.fail('Forbidden value', obj, pointer) # depends on [control=['except'], data=['error']] # depends on [control=['if'], data=[]]
return obj |
def do_until(lambda_expr, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5, message=None):
'''
A retry wrapper that'll keep performing the action until it succeeds.
(main differnce between do_until and wait_until is do_until will keep trying
until a value is returned, while wait until will wait until the function
evaluates True.)
Args:
lambda_expr (lambda) : Expression to evaluate.
Kwargs:
timeout (number): Timeout period in seconds.
sleep (number) : Sleep time to wait between iterations
message (str) : Provide a message for TimeoutError raised.
Returns:
The value of the evaluated lambda expression.
Usage::
do_until(lambda: driver.find_element_by_id("save").click(),
timeout=30,
sleep=0.5)
Is equivalent to:
end_time = datetime.now() + timedelta(seconds=30)
while datetime.now() < end_time:
try:
return driver.find_element_by_id("save").click()
except:
pass
time.sleep(0.5)
raise OperationTimeoutError()
'''
__check_condition_parameter_is_function(lambda_expr)
end_time = datetime.now() + timedelta(seconds=timeout)
last_exception = None
while datetime.now() < end_time:
try:
return lambda_expr()
except Exception as e:
last_exception = e
time.sleep(sleep)
if message:
raise OperationTimeoutError(message, last_exception)
else:
raise OperationTimeoutError("Operation timed out.", last_exception) | def function[do_until, parameter[lambda_expr, timeout, sleep, message]]:
constant[
A retry wrapper that'll keep performing the action until it succeeds.
(main differnce between do_until and wait_until is do_until will keep trying
until a value is returned, while wait until will wait until the function
evaluates True.)
Args:
lambda_expr (lambda) : Expression to evaluate.
Kwargs:
timeout (number): Timeout period in seconds.
sleep (number) : Sleep time to wait between iterations
message (str) : Provide a message for TimeoutError raised.
Returns:
The value of the evaluated lambda expression.
Usage::
do_until(lambda: driver.find_element_by_id("save").click(),
timeout=30,
sleep=0.5)
Is equivalent to:
end_time = datetime.now() + timedelta(seconds=30)
while datetime.now() < end_time:
try:
return driver.find_element_by_id("save").click()
except:
pass
time.sleep(0.5)
raise OperationTimeoutError()
]
call[name[__check_condition_parameter_is_function], parameter[name[lambda_expr]]]
variable[end_time] assign[=] binary_operation[call[name[datetime].now, parameter[]] + call[name[timedelta], parameter[]]]
variable[last_exception] assign[=] constant[None]
while compare[call[name[datetime].now, parameter[]] less[<] name[end_time]] begin[:]
<ast.Try object at 0x7da1b11da200>
if name[message] begin[:]
<ast.Raise object at 0x7da1b11d9ab0> | keyword[def] identifier[do_until] ( identifier[lambda_expr] , identifier[timeout] = identifier[WTF_TIMEOUT_MANAGER] . identifier[NORMAL] , identifier[sleep] = literal[int] , identifier[message] = keyword[None] ):
literal[string]
identifier[__check_condition_parameter_is_function] ( identifier[lambda_expr] )
identifier[end_time] = identifier[datetime] . identifier[now] ()+ identifier[timedelta] ( identifier[seconds] = identifier[timeout] )
identifier[last_exception] = keyword[None]
keyword[while] identifier[datetime] . identifier[now] ()< identifier[end_time] :
keyword[try] :
keyword[return] identifier[lambda_expr] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[last_exception] = identifier[e]
identifier[time] . identifier[sleep] ( identifier[sleep] )
keyword[if] identifier[message] :
keyword[raise] identifier[OperationTimeoutError] ( identifier[message] , identifier[last_exception] )
keyword[else] :
keyword[raise] identifier[OperationTimeoutError] ( literal[string] , identifier[last_exception] ) | def do_until(lambda_expr, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5, message=None):
"""
A retry wrapper that'll keep performing the action until it succeeds.
(main differnce between do_until and wait_until is do_until will keep trying
until a value is returned, while wait until will wait until the function
evaluates True.)
Args:
lambda_expr (lambda) : Expression to evaluate.
Kwargs:
timeout (number): Timeout period in seconds.
sleep (number) : Sleep time to wait between iterations
message (str) : Provide a message for TimeoutError raised.
Returns:
The value of the evaluated lambda expression.
Usage::
do_until(lambda: driver.find_element_by_id("save").click(),
timeout=30,
sleep=0.5)
Is equivalent to:
end_time = datetime.now() + timedelta(seconds=30)
while datetime.now() < end_time:
try:
return driver.find_element_by_id("save").click()
except:
pass
time.sleep(0.5)
raise OperationTimeoutError()
"""
__check_condition_parameter_is_function(lambda_expr)
end_time = datetime.now() + timedelta(seconds=timeout)
last_exception = None
while datetime.now() < end_time:
try:
return lambda_expr() # depends on [control=['try'], data=[]]
except Exception as e:
last_exception = e
time.sleep(sleep) # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]]
if message:
raise OperationTimeoutError(message, last_exception) # depends on [control=['if'], data=[]]
else:
raise OperationTimeoutError('Operation timed out.', last_exception) |
def rgb2termhex(r: int, g: int, b: int) -> str:
""" Convert an rgb value to the nearest hex value that matches a term code.
The hex value will be one in `hex2term_map`.
"""
incs = [0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff]
res = []
parts = r, g, b
for part in parts:
if (part < 0) or (part > 255):
raise ValueError(
'Expecting 0-255 for RGB code, got: {!r}'.format(parts)
)
i = 0
while i < len(incs) - 1:
s, b = incs[i], incs[i + 1] # smaller, bigger
if s <= part <= b:
s1 = abs(s - part)
b1 = abs(b - part)
if s1 < b1:
closest = s
else:
closest = b
res.append(closest)
break
i += 1
# Convert back into nearest hex value.
return rgb2hex(*res) | def function[rgb2termhex, parameter[r, g, b]]:
constant[ Convert an rgb value to the nearest hex value that matches a term code.
The hex value will be one in `hex2term_map`.
]
variable[incs] assign[=] list[[<ast.Constant object at 0x7da1b0277e50>, <ast.Constant object at 0x7da1b0275600>, <ast.Constant object at 0x7da1b0274af0>, <ast.Constant object at 0x7da1b02746d0>, <ast.Constant object at 0x7da1b0275420>, <ast.Constant object at 0x7da1b0274c70>]]
variable[res] assign[=] list[[]]
variable[parts] assign[=] tuple[[<ast.Name object at 0x7da1b0275000>, <ast.Name object at 0x7da1b0274d00>, <ast.Name object at 0x7da1b02756f0>]]
for taget[name[part]] in starred[name[parts]] begin[:]
if <ast.BoolOp object at 0x7da1b0275780> begin[:]
<ast.Raise object at 0x7da1b02754b0>
variable[i] assign[=] constant[0]
while compare[name[i] less[<] binary_operation[call[name[len], parameter[name[incs]]] - constant[1]]] begin[:]
<ast.Tuple object at 0x7da1b0277dc0> assign[=] tuple[[<ast.Subscript object at 0x7da1b0274f40>, <ast.Subscript object at 0x7da1b0274c40>]]
if compare[name[s] less_or_equal[<=] name[part]] begin[:]
variable[s1] assign[=] call[name[abs], parameter[binary_operation[name[s] - name[part]]]]
variable[b1] assign[=] call[name[abs], parameter[binary_operation[name[b] - name[part]]]]
if compare[name[s1] less[<] name[b1]] begin[:]
variable[closest] assign[=] name[s]
call[name[res].append, parameter[name[closest]]]
break
<ast.AugAssign object at 0x7da18ede53f0>
return[call[name[rgb2hex], parameter[<ast.Starred object at 0x7da18ede62f0>]]] | keyword[def] identifier[rgb2termhex] ( identifier[r] : identifier[int] , identifier[g] : identifier[int] , identifier[b] : identifier[int] )-> identifier[str] :
literal[string]
identifier[incs] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[res] =[]
identifier[parts] = identifier[r] , identifier[g] , identifier[b]
keyword[for] identifier[part] keyword[in] identifier[parts] :
keyword[if] ( identifier[part] < literal[int] ) keyword[or] ( identifier[part] > literal[int] ):
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[parts] )
)
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[incs] )- literal[int] :
identifier[s] , identifier[b] = identifier[incs] [ identifier[i] ], identifier[incs] [ identifier[i] + literal[int] ]
keyword[if] identifier[s] <= identifier[part] <= identifier[b] :
identifier[s1] = identifier[abs] ( identifier[s] - identifier[part] )
identifier[b1] = identifier[abs] ( identifier[b] - identifier[part] )
keyword[if] identifier[s1] < identifier[b1] :
identifier[closest] = identifier[s]
keyword[else] :
identifier[closest] = identifier[b]
identifier[res] . identifier[append] ( identifier[closest] )
keyword[break]
identifier[i] += literal[int]
keyword[return] identifier[rgb2hex] (* identifier[res] ) | def rgb2termhex(r: int, g: int, b: int) -> str:
""" Convert an rgb value to the nearest hex value that matches a term code.
The hex value will be one in `hex2term_map`.
"""
incs = [0, 95, 135, 175, 215, 255]
res = []
parts = (r, g, b)
for part in parts:
if part < 0 or part > 255:
raise ValueError('Expecting 0-255 for RGB code, got: {!r}'.format(parts)) # depends on [control=['if'], data=[]]
i = 0
while i < len(incs) - 1:
(s, b) = (incs[i], incs[i + 1]) # smaller, bigger
if s <= part <= b:
s1 = abs(s - part)
b1 = abs(b - part)
if s1 < b1:
closest = s # depends on [control=['if'], data=[]]
else:
closest = b
res.append(closest)
break # depends on [control=['if'], data=['s', 'part']]
i += 1 # depends on [control=['while'], data=['i']] # depends on [control=['for'], data=['part']]
# Convert back into nearest hex value.
return rgb2hex(*res) |
def set_pixel(self, x, y, *args):
"""
Updates the single [R,G,B] pixel specified by x and y on the LED matrix
Top left = 0,0 Bottom right = 7,7
e.g. ap.set_pixel(x, y, r, g, b)
or
pixel = (r, g, b)
ap.set_pixel(x, y, pixel)
"""
pixel_error = 'Pixel arguments must be given as (r, g, b) or r, g, b'
if len(args) == 1:
pixel = args[0]
if len(pixel) != 3:
raise ValueError(pixel_error)
elif len(args) == 3:
pixel = args
else:
raise ValueError(pixel_error)
if x > 7 or x < 0:
raise ValueError('X position must be between 0 and 7')
if y > 7 or y < 0:
raise ValueError('Y position must be between 0 and 7')
for element in pixel:
if element > 255 or element < 0:
raise ValueError('Pixel elements must be between 0 and 255')
with open(self._fb_device, 'wb') as f:
map = self._pix_map[self._rotation]
# Two bytes per pixel in fb memory, 16 bit RGB565
f.seek(map[y][x] * 2) # row, column
f.write(self._pack_bin(pixel)) | def function[set_pixel, parameter[self, x, y]]:
constant[
Updates the single [R,G,B] pixel specified by x and y on the LED matrix
Top left = 0,0 Bottom right = 7,7
e.g. ap.set_pixel(x, y, r, g, b)
or
pixel = (r, g, b)
ap.set_pixel(x, y, pixel)
]
variable[pixel_error] assign[=] constant[Pixel arguments must be given as (r, g, b) or r, g, b]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[1]] begin[:]
variable[pixel] assign[=] call[name[args]][constant[0]]
if compare[call[name[len], parameter[name[pixel]]] not_equal[!=] constant[3]] begin[:]
<ast.Raise object at 0x7da1b08a1570>
if <ast.BoolOp object at 0x7da1b08a2380> begin[:]
<ast.Raise object at 0x7da1b08a36a0>
if <ast.BoolOp object at 0x7da1b08a0fa0> begin[:]
<ast.Raise object at 0x7da1b08a3670>
for taget[name[element]] in starred[name[pixel]] begin[:]
if <ast.BoolOp object at 0x7da204960940> begin[:]
<ast.Raise object at 0x7da204961360>
with call[name[open], parameter[name[self]._fb_device, constant[wb]]] begin[:]
variable[map] assign[=] call[name[self]._pix_map][name[self]._rotation]
call[name[f].seek, parameter[binary_operation[call[call[name[map]][name[y]]][name[x]] * constant[2]]]]
call[name[f].write, parameter[call[name[self]._pack_bin, parameter[name[pixel]]]]] | keyword[def] identifier[set_pixel] ( identifier[self] , identifier[x] , identifier[y] ,* identifier[args] ):
literal[string]
identifier[pixel_error] = literal[string]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[pixel] = identifier[args] [ literal[int] ]
keyword[if] identifier[len] ( identifier[pixel] )!= literal[int] :
keyword[raise] identifier[ValueError] ( identifier[pixel_error] )
keyword[elif] identifier[len] ( identifier[args] )== literal[int] :
identifier[pixel] = identifier[args]
keyword[else] :
keyword[raise] identifier[ValueError] ( identifier[pixel_error] )
keyword[if] identifier[x] > literal[int] keyword[or] identifier[x] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[y] > literal[int] keyword[or] identifier[y] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[element] keyword[in] identifier[pixel] :
keyword[if] identifier[element] > literal[int] keyword[or] identifier[element] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[with] identifier[open] ( identifier[self] . identifier[_fb_device] , literal[string] ) keyword[as] identifier[f] :
identifier[map] = identifier[self] . identifier[_pix_map] [ identifier[self] . identifier[_rotation] ]
identifier[f] . identifier[seek] ( identifier[map] [ identifier[y] ][ identifier[x] ]* literal[int] )
identifier[f] . identifier[write] ( identifier[self] . identifier[_pack_bin] ( identifier[pixel] )) | def set_pixel(self, x, y, *args):
"""
Updates the single [R,G,B] pixel specified by x and y on the LED matrix
Top left = 0,0 Bottom right = 7,7
e.g. ap.set_pixel(x, y, r, g, b)
or
pixel = (r, g, b)
ap.set_pixel(x, y, pixel)
"""
pixel_error = 'Pixel arguments must be given as (r, g, b) or r, g, b'
if len(args) == 1:
pixel = args[0]
if len(pixel) != 3:
raise ValueError(pixel_error) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif len(args) == 3:
pixel = args # depends on [control=['if'], data=[]]
else:
raise ValueError(pixel_error)
if x > 7 or x < 0:
raise ValueError('X position must be between 0 and 7') # depends on [control=['if'], data=[]]
if y > 7 or y < 0:
raise ValueError('Y position must be between 0 and 7') # depends on [control=['if'], data=[]]
for element in pixel:
if element > 255 or element < 0:
raise ValueError('Pixel elements must be between 0 and 255') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['element']]
with open(self._fb_device, 'wb') as f:
map = self._pix_map[self._rotation]
# Two bytes per pixel in fb memory, 16 bit RGB565
f.seek(map[y][x] * 2) # row, column
f.write(self._pack_bin(pixel)) # depends on [control=['with'], data=['f']] |
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information | def function[_build_encryption_key_information, parameter[self, value]]:
constant[
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
]
if compare[name[value] is constant[None]] begin[:]
return[constant[None]]
if <ast.UnaryOp object at 0x7da18fe91300> begin[:]
<ast.Raise object at 0x7da18fe93a60>
variable[cryptographic_parameters] assign[=] call[name[value].get, parameter[constant[cryptographic_parameters]]]
if name[cryptographic_parameters] begin[:]
variable[cryptographic_parameters] assign[=] call[name[self]._build_cryptographic_parameters, parameter[name[cryptographic_parameters]]]
variable[encryption_key_information] assign[=] call[name[cobjects].EncryptionKeyInformation, parameter[]]
return[name[encryption_key_information]] | keyword[def] identifier[_build_encryption_key_information] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[dict] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[cryptographic_parameters] = identifier[value] . identifier[get] ( literal[string] )
keyword[if] identifier[cryptographic_parameters] :
identifier[cryptographic_parameters] = identifier[self] . identifier[_build_cryptographic_parameters] (
identifier[cryptographic_parameters]
)
identifier[encryption_key_information] = identifier[cobjects] . identifier[EncryptionKeyInformation] (
identifier[unique_identifier] = identifier[value] . identifier[get] ( literal[string] ),
identifier[cryptographic_parameters] = identifier[cryptographic_parameters]
)
keyword[return] identifier[encryption_key_information] | def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None # depends on [control=['if'], data=[]]
if not isinstance(value, dict):
raise TypeError('Encryption key information must be a dictionary.') # depends on [control=['if'], data=[]]
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(cryptographic_parameters) # depends on [control=['if'], data=[]]
encryption_key_information = cobjects.EncryptionKeyInformation(unique_identifier=value.get('unique_identifier'), cryptographic_parameters=cryptographic_parameters)
return encryption_key_information |
def fixed_inputs(model, non_fixed_inputs, fix_routine='median', as_list=True, X_all=False):
"""
Convenience function for returning back fixed_inputs where the other inputs
are fixed using fix_routine
:param model: model
:type model: Model
:param non_fixed_inputs: dimensions of non fixed inputs
:type non_fixed_inputs: list
:param fix_routine: fixing routine to use, 'mean', 'median', 'zero'
:type fix_routine: string
:param as_list: if true, will return a list of tuples with (dimension, fixed_val) otherwise it will create the corresponding X matrix
:type as_list: boolean
"""
from ...inference.latent_function_inference.posterior import VariationalPosterior
f_inputs = []
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
X = model.X.mean.values.copy()
elif isinstance(model.X, VariationalPosterior):
X = model.X.values.copy()
else:
if X_all:
X = model.X_all.copy()
else:
X = model.X.copy()
for i in range(X.shape[1]):
if i not in non_fixed_inputs:
if fix_routine == 'mean':
f_inputs.append( (i, np.mean(X[:,i])) )
if fix_routine == 'median':
f_inputs.append( (i, np.median(X[:,i])) )
else: # set to zero zero
f_inputs.append( (i, 0) )
if not as_list:
X[:,i] = f_inputs[-1][1]
if as_list:
return f_inputs
else:
return X | def function[fixed_inputs, parameter[model, non_fixed_inputs, fix_routine, as_list, X_all]]:
constant[
Convenience function for returning back fixed_inputs where the other inputs
are fixed using fix_routine
:param model: model
:type model: Model
:param non_fixed_inputs: dimensions of non fixed inputs
:type non_fixed_inputs: list
:param fix_routine: fixing routine to use, 'mean', 'median', 'zero'
:type fix_routine: string
:param as_list: if true, will return a list of tuples with (dimension, fixed_val) otherwise it will create the corresponding X matrix
:type as_list: boolean
]
from relative_module[inference.latent_function_inference.posterior] import module[VariationalPosterior]
variable[f_inputs] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b1b2b220> begin[:]
variable[X] assign[=] call[name[model].X.mean.values.copy, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[X].shape][constant[1]]]]] begin[:]
if compare[name[i] <ast.NotIn object at 0x7da2590d7190> name[non_fixed_inputs]] begin[:]
if compare[name[fix_routine] equal[==] constant[mean]] begin[:]
call[name[f_inputs].append, parameter[tuple[[<ast.Name object at 0x7da1b1b2b880>, <ast.Call object at 0x7da1b1b2bb20>]]]]
if compare[name[fix_routine] equal[==] constant[median]] begin[:]
call[name[f_inputs].append, parameter[tuple[[<ast.Name object at 0x7da1b1b28460>, <ast.Call object at 0x7da1b1b28280>]]]]
if <ast.UnaryOp object at 0x7da1b1b28670> begin[:]
call[name[X]][tuple[[<ast.Slice object at 0x7da1b1b299c0>, <ast.Name object at 0x7da1b1b29ab0>]]] assign[=] call[call[name[f_inputs]][<ast.UnaryOp object at 0x7da1b1b291b0>]][constant[1]]
if name[as_list] begin[:]
return[name[f_inputs]] | keyword[def] identifier[fixed_inputs] ( identifier[model] , identifier[non_fixed_inputs] , identifier[fix_routine] = literal[string] , identifier[as_list] = keyword[True] , identifier[X_all] = keyword[False] ):
literal[string]
keyword[from] ... identifier[inference] . identifier[latent_function_inference] . identifier[posterior] keyword[import] identifier[VariationalPosterior]
identifier[f_inputs] =[]
keyword[if] identifier[hasattr] ( identifier[model] , literal[string] ) keyword[and] identifier[model] . identifier[has_uncertain_inputs] ():
identifier[X] = identifier[model] . identifier[X] . identifier[mean] . identifier[values] . identifier[copy] ()
keyword[elif] identifier[isinstance] ( identifier[model] . identifier[X] , identifier[VariationalPosterior] ):
identifier[X] = identifier[model] . identifier[X] . identifier[values] . identifier[copy] ()
keyword[else] :
keyword[if] identifier[X_all] :
identifier[X] = identifier[model] . identifier[X_all] . identifier[copy] ()
keyword[else] :
identifier[X] = identifier[model] . identifier[X] . identifier[copy] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[X] . identifier[shape] [ literal[int] ]):
keyword[if] identifier[i] keyword[not] keyword[in] identifier[non_fixed_inputs] :
keyword[if] identifier[fix_routine] == literal[string] :
identifier[f_inputs] . identifier[append] (( identifier[i] , identifier[np] . identifier[mean] ( identifier[X] [:, identifier[i] ])))
keyword[if] identifier[fix_routine] == literal[string] :
identifier[f_inputs] . identifier[append] (( identifier[i] , identifier[np] . identifier[median] ( identifier[X] [:, identifier[i] ])))
keyword[else] :
identifier[f_inputs] . identifier[append] (( identifier[i] , literal[int] ))
keyword[if] keyword[not] identifier[as_list] :
identifier[X] [:, identifier[i] ]= identifier[f_inputs] [- literal[int] ][ literal[int] ]
keyword[if] identifier[as_list] :
keyword[return] identifier[f_inputs]
keyword[else] :
keyword[return] identifier[X] | def fixed_inputs(model, non_fixed_inputs, fix_routine='median', as_list=True, X_all=False):
"""
Convenience function for returning back fixed_inputs where the other inputs
are fixed using fix_routine
:param model: model
:type model: Model
:param non_fixed_inputs: dimensions of non fixed inputs
:type non_fixed_inputs: list
:param fix_routine: fixing routine to use, 'mean', 'median', 'zero'
:type fix_routine: string
:param as_list: if true, will return a list of tuples with (dimension, fixed_val) otherwise it will create the corresponding X matrix
:type as_list: boolean
"""
from ...inference.latent_function_inference.posterior import VariationalPosterior
f_inputs = []
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
X = model.X.mean.values.copy() # depends on [control=['if'], data=[]]
elif isinstance(model.X, VariationalPosterior):
X = model.X.values.copy() # depends on [control=['if'], data=[]]
elif X_all:
X = model.X_all.copy() # depends on [control=['if'], data=[]]
else:
X = model.X.copy()
for i in range(X.shape[1]):
if i not in non_fixed_inputs:
if fix_routine == 'mean':
f_inputs.append((i, np.mean(X[:, i]))) # depends on [control=['if'], data=[]]
if fix_routine == 'median':
f_inputs.append((i, np.median(X[:, i]))) # depends on [control=['if'], data=[]]
else: # set to zero zero
f_inputs.append((i, 0))
if not as_list:
X[:, i] = f_inputs[-1][1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['i']] # depends on [control=['for'], data=['i']]
if as_list:
return f_inputs # depends on [control=['if'], data=[]]
else:
return X |
def auto_name_prefix(self):
"""
Generate platform prefix for cross-platform downloads.
"""
# if the platform is not native, auto_name would clobber native downloads.
# make a prefix to avoid this
native_system = std_platform.system()
native_machine = self.CPU_ALIASES.get(std_platform.machine(), std_platform.machine())
if native_system == self.system and native_machine == self.machine:
return ''
platform = {
'linux': 'linux32',
'android-api-16': 'android-arm',
'android-aarch64': 'android-arm64',
}.get(self.gecko_platform, self.gecko_platform)
return platform + '-' | def function[auto_name_prefix, parameter[self]]:
constant[
Generate platform prefix for cross-platform downloads.
]
variable[native_system] assign[=] call[name[std_platform].system, parameter[]]
variable[native_machine] assign[=] call[name[self].CPU_ALIASES.get, parameter[call[name[std_platform].machine, parameter[]], call[name[std_platform].machine, parameter[]]]]
if <ast.BoolOp object at 0x7da18dc9a620> begin[:]
return[constant[]]
variable[platform] assign[=] call[dictionary[[<ast.Constant object at 0x7da204346a70>, <ast.Constant object at 0x7da2043449d0>, <ast.Constant object at 0x7da2043479d0>], [<ast.Constant object at 0x7da204344130>, <ast.Constant object at 0x7da204346b60>, <ast.Constant object at 0x7da204345cc0>]].get, parameter[name[self].gecko_platform, name[self].gecko_platform]]
return[binary_operation[name[platform] + constant[-]]] | keyword[def] identifier[auto_name_prefix] ( identifier[self] ):
literal[string]
identifier[native_system] = identifier[std_platform] . identifier[system] ()
identifier[native_machine] = identifier[self] . identifier[CPU_ALIASES] . identifier[get] ( identifier[std_platform] . identifier[machine] (), identifier[std_platform] . identifier[machine] ())
keyword[if] identifier[native_system] == identifier[self] . identifier[system] keyword[and] identifier[native_machine] == identifier[self] . identifier[machine] :
keyword[return] literal[string]
identifier[platform] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}. identifier[get] ( identifier[self] . identifier[gecko_platform] , identifier[self] . identifier[gecko_platform] )
keyword[return] identifier[platform] + literal[string] | def auto_name_prefix(self):
"""
Generate platform prefix for cross-platform downloads.
"""
# if the platform is not native, auto_name would clobber native downloads.
# make a prefix to avoid this
native_system = std_platform.system()
native_machine = self.CPU_ALIASES.get(std_platform.machine(), std_platform.machine())
if native_system == self.system and native_machine == self.machine:
return '' # depends on [control=['if'], data=[]]
platform = {'linux': 'linux32', 'android-api-16': 'android-arm', 'android-aarch64': 'android-arm64'}.get(self.gecko_platform, self.gecko_platform)
return platform + '-' |
def update(self, uid):
'''
Update the wiki.
'''
postinfo = MWiki.get_by_uid(uid)
if self.check_post_role()['EDIT'] or postinfo.user_name == self.get_current_user():
pass
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
cnt_old = tornado.escape.xhtml_unescape(postinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass
else:
MWikiHist.create_wiki_history(postinfo)
MWiki.update(uid, post_data)
# cele_gen_whoosh.delay()
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/wiki/{0}'.format(tornado.escape.url_escape(post_data['title']))) | def function[update, parameter[self, uid]]:
constant[
Update the wiki.
]
variable[postinfo] assign[=] call[name[MWiki].get_by_uid, parameter[name[uid]]]
if <ast.BoolOp object at 0x7da1b04f97b0> begin[:]
pass
variable[post_data] assign[=] call[name[self].get_post_data, parameter[]]
call[name[post_data]][constant[user_name]] assign[=] name[self].userinfo.user_name
variable[cnt_old] assign[=] call[call[name[tornado].escape.xhtml_unescape, parameter[name[postinfo].cnt_md]].strip, parameter[]]
variable[cnt_new] assign[=] call[call[name[post_data]][constant[cnt_md]].strip, parameter[]]
if compare[name[cnt_old] equal[==] name[cnt_new]] begin[:]
pass
call[name[MWiki].update, parameter[name[uid], name[post_data]]]
call[call[name[tornado].ioloop.IOLoop.instance, parameter[]].add_callback, parameter[name[self].cele_gen_whoosh]]
call[name[self].redirect, parameter[call[constant[/wiki/{0}].format, parameter[call[name[tornado].escape.url_escape, parameter[call[name[post_data]][constant[title]]]]]]]] | keyword[def] identifier[update] ( identifier[self] , identifier[uid] ):
literal[string]
identifier[postinfo] = identifier[MWiki] . identifier[get_by_uid] ( identifier[uid] )
keyword[if] identifier[self] . identifier[check_post_role] ()[ literal[string] ] keyword[or] identifier[postinfo] . identifier[user_name] == identifier[self] . identifier[get_current_user] ():
keyword[pass]
keyword[else] :
keyword[return] keyword[False]
identifier[post_data] = identifier[self] . identifier[get_post_data] ()
identifier[post_data] [ literal[string] ]= identifier[self] . identifier[userinfo] . identifier[user_name]
identifier[cnt_old] = identifier[tornado] . identifier[escape] . identifier[xhtml_unescape] ( identifier[postinfo] . identifier[cnt_md] ). identifier[strip] ()
identifier[cnt_new] = identifier[post_data] [ literal[string] ]. identifier[strip] ()
keyword[if] identifier[cnt_old] == identifier[cnt_new] :
keyword[pass]
keyword[else] :
identifier[MWikiHist] . identifier[create_wiki_history] ( identifier[postinfo] )
identifier[MWiki] . identifier[update] ( identifier[uid] , identifier[post_data] )
identifier[tornado] . identifier[ioloop] . identifier[IOLoop] . identifier[instance] (). identifier[add_callback] ( identifier[self] . identifier[cele_gen_whoosh] )
identifier[self] . identifier[redirect] ( literal[string] . identifier[format] ( identifier[tornado] . identifier[escape] . identifier[url_escape] ( identifier[post_data] [ literal[string] ]))) | def update(self, uid):
"""
Update the wiki.
"""
postinfo = MWiki.get_by_uid(uid)
if self.check_post_role()['EDIT'] or postinfo.user_name == self.get_current_user():
pass # depends on [control=['if'], data=[]]
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
cnt_old = tornado.escape.xhtml_unescape(postinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass # depends on [control=['if'], data=[]]
else:
MWikiHist.create_wiki_history(postinfo)
MWiki.update(uid, post_data)
# cele_gen_whoosh.delay()
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/wiki/{0}'.format(tornado.escape.url_escape(post_data['title']))) |
def launch(self):
""" launch a file - used for starting html pages """
#os.system(self.fullname) # gives permission denied seeing it needs to be chmod +x
import subprocess
try:
retcode = subprocess.call(self.fullname, shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
return False
else:
print("Child returned", retcode, file=sys.stderr)
return True
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
return False | def function[launch, parameter[self]]:
constant[ launch a file - used for starting html pages ]
import module[subprocess]
<ast.Try object at 0x7da18f58e5c0> | keyword[def] identifier[launch] ( identifier[self] ):
literal[string]
keyword[import] identifier[subprocess]
keyword[try] :
identifier[retcode] = identifier[subprocess] . identifier[call] ( identifier[self] . identifier[fullname] , identifier[shell] = keyword[True] )
keyword[if] identifier[retcode] < literal[int] :
identifier[print] ( literal[string] ,- identifier[retcode] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[return] keyword[False]
keyword[else] :
identifier[print] ( literal[string] , identifier[retcode] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[return] keyword[True]
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[print] ( literal[string] , identifier[e] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[return] keyword[False] | def launch(self):
""" launch a file - used for starting html pages """
#os.system(self.fullname) # gives permission denied seeing it needs to be chmod +x
import subprocess
try:
retcode = subprocess.call(self.fullname, shell=True)
if retcode < 0:
print('Child was terminated by signal', -retcode, file=sys.stderr)
return False # depends on [control=['if'], data=['retcode']]
else:
print('Child returned', retcode, file=sys.stderr)
return True # depends on [control=['try'], data=[]]
except OSError as e:
print('Execution failed:', e, file=sys.stderr)
return False # depends on [control=['except'], data=['e']] |
def _compute_bounding_box(points):
"""Given the list of coordinates (x,y), this procedure computes
the smallest rectangle that covers all the points."""
(xmin, ymin, xmax, ymax) = (999999, 999999, -999999, -999999)
for p in points:
xmin = min(xmin, p[0])
xmax = max(xmax, p[0])
ymin = min(ymin, p[1])
ymax = max(ymax, p[1])
return (xmin, ymin, xmax, ymax) | def function[_compute_bounding_box, parameter[points]]:
constant[Given the list of coordinates (x,y), this procedure computes
the smallest rectangle that covers all the points.]
<ast.Tuple object at 0x7da18f7208e0> assign[=] tuple[[<ast.Constant object at 0x7da18f723a90>, <ast.Constant object at 0x7da18f722bc0>, <ast.UnaryOp object at 0x7da18f7225f0>, <ast.UnaryOp object at 0x7da18f7233a0>]]
for taget[name[p]] in starred[name[points]] begin[:]
variable[xmin] assign[=] call[name[min], parameter[name[xmin], call[name[p]][constant[0]]]]
variable[xmax] assign[=] call[name[max], parameter[name[xmax], call[name[p]][constant[0]]]]
variable[ymin] assign[=] call[name[min], parameter[name[ymin], call[name[p]][constant[1]]]]
variable[ymax] assign[=] call[name[max], parameter[name[ymax], call[name[p]][constant[1]]]]
return[tuple[[<ast.Name object at 0x7da18f722aa0>, <ast.Name object at 0x7da18f722da0>, <ast.Name object at 0x7da18f721360>, <ast.Name object at 0x7da18f721270>]]] | keyword[def] identifier[_compute_bounding_box] ( identifier[points] ):
literal[string]
( identifier[xmin] , identifier[ymin] , identifier[xmax] , identifier[ymax] )=( literal[int] , literal[int] ,- literal[int] ,- literal[int] )
keyword[for] identifier[p] keyword[in] identifier[points] :
identifier[xmin] = identifier[min] ( identifier[xmin] , identifier[p] [ literal[int] ])
identifier[xmax] = identifier[max] ( identifier[xmax] , identifier[p] [ literal[int] ])
identifier[ymin] = identifier[min] ( identifier[ymin] , identifier[p] [ literal[int] ])
identifier[ymax] = identifier[max] ( identifier[ymax] , identifier[p] [ literal[int] ])
keyword[return] ( identifier[xmin] , identifier[ymin] , identifier[xmax] , identifier[ymax] ) | def _compute_bounding_box(points):
"""Given the list of coordinates (x,y), this procedure computes
the smallest rectangle that covers all the points."""
(xmin, ymin, xmax, ymax) = (999999, 999999, -999999, -999999)
for p in points:
xmin = min(xmin, p[0])
xmax = max(xmax, p[0])
ymin = min(ymin, p[1])
ymax = max(ymax, p[1]) # depends on [control=['for'], data=['p']]
return (xmin, ymin, xmax, ymax) |
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name)) | def function[pull_dependencies, parameter[self, nodes]]:
constant[Pull all the dependencies.]
variable[visitor] assign[=] call[name[DependencyFinderVisitor], parameter[]]
for taget[name[node]] in starred[name[nodes]] begin[:]
call[name[visitor].visit, parameter[name[node]]]
for taget[name[dependency]] in starred[tuple[[<ast.Constant object at 0x7da1b2088460>, <ast.Constant object at 0x7da1b20885b0>]]] begin[:]
variable[mapping] assign[=] call[name[getattr], parameter[name[self], name[dependency]]]
for taget[name[name]] in starred[call[name[getattr], parameter[name[visitor], name[dependency]]]] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[mapping]] begin[:]
call[name[mapping]][name[name]] assign[=] call[name[self].temporary_identifier, parameter[]]
call[name[self].writeline, parameter[binary_operation[constant[%s = environment.%s[%r]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b2041750>, <ast.Name object at 0x7da1b2043a90>, <ast.Name object at 0x7da1b2040b80>]]]]] | keyword[def] identifier[pull_dependencies] ( identifier[self] , identifier[nodes] ):
literal[string]
identifier[visitor] = identifier[DependencyFinderVisitor] ()
keyword[for] identifier[node] keyword[in] identifier[nodes] :
identifier[visitor] . identifier[visit] ( identifier[node] )
keyword[for] identifier[dependency] keyword[in] literal[string] , literal[string] :
identifier[mapping] = identifier[getattr] ( identifier[self] , identifier[dependency] )
keyword[for] identifier[name] keyword[in] identifier[getattr] ( identifier[visitor] , identifier[dependency] ):
keyword[if] identifier[name] keyword[not] keyword[in] identifier[mapping] :
identifier[mapping] [ identifier[name] ]= identifier[self] . identifier[temporary_identifier] ()
identifier[self] . identifier[writeline] ( literal[string] %
( identifier[mapping] [ identifier[name] ], identifier[dependency] , identifier[name] )) | def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node) # depends on [control=['for'], data=['node']]
for dependency in ('filters', 'tests'):
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier() # depends on [control=['if'], data=['name', 'mapping']]
self.writeline('%s = environment.%s[%r]' % (mapping[name], dependency, name)) # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=['dependency']] |
def _remove_keys_from_dict_with_nonunique_values(self, d, log_fh=None, log_outprefix=None):
'''Returns a new dictionary, with keys from input dict removed if their value was not unique'''
value_counts = collections.Counter(d.values())
new_d = {}
writing_log_file = None not in [log_fh, log_outprefix]
for key in d:
if value_counts[d[key]] == 1:
new_d[key] = d[key]
elif writing_log_file:
print(log_outprefix, 'Reject because non-unique:', d[key], sep='\t', file=log_fh)
return new_d | def function[_remove_keys_from_dict_with_nonunique_values, parameter[self, d, log_fh, log_outprefix]]:
constant[Returns a new dictionary, with keys from input dict removed if their value was not unique]
variable[value_counts] assign[=] call[name[collections].Counter, parameter[call[name[d].values, parameter[]]]]
variable[new_d] assign[=] dictionary[[], []]
variable[writing_log_file] assign[=] compare[constant[None] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Name object at 0x7da1b06895a0>, <ast.Name object at 0x7da1b0688220>]]]
for taget[name[key]] in starred[name[d]] begin[:]
if compare[call[name[value_counts]][call[name[d]][name[key]]] equal[==] constant[1]] begin[:]
call[name[new_d]][name[key]] assign[=] call[name[d]][name[key]]
return[name[new_d]] | keyword[def] identifier[_remove_keys_from_dict_with_nonunique_values] ( identifier[self] , identifier[d] , identifier[log_fh] = keyword[None] , identifier[log_outprefix] = keyword[None] ):
literal[string]
identifier[value_counts] = identifier[collections] . identifier[Counter] ( identifier[d] . identifier[values] ())
identifier[new_d] ={}
identifier[writing_log_file] = keyword[None] keyword[not] keyword[in] [ identifier[log_fh] , identifier[log_outprefix] ]
keyword[for] identifier[key] keyword[in] identifier[d] :
keyword[if] identifier[value_counts] [ identifier[d] [ identifier[key] ]]== literal[int] :
identifier[new_d] [ identifier[key] ]= identifier[d] [ identifier[key] ]
keyword[elif] identifier[writing_log_file] :
identifier[print] ( identifier[log_outprefix] , literal[string] , identifier[d] [ identifier[key] ], identifier[sep] = literal[string] , identifier[file] = identifier[log_fh] )
keyword[return] identifier[new_d] | def _remove_keys_from_dict_with_nonunique_values(self, d, log_fh=None, log_outprefix=None):
"""Returns a new dictionary, with keys from input dict removed if their value was not unique"""
value_counts = collections.Counter(d.values())
new_d = {}
writing_log_file = None not in [log_fh, log_outprefix]
for key in d:
if value_counts[d[key]] == 1:
new_d[key] = d[key] # depends on [control=['if'], data=[]]
elif writing_log_file:
print(log_outprefix, 'Reject because non-unique:', d[key], sep='\t', file=log_fh) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
return new_d |
def copy_attrs(obj1, obj2, attrs):
"""
Allows copy a list of attributes from object2 to object1.
Useful for copy ccs attributes to fragment
"""
for attr in attrs:
value = getattr(obj2, attr) if hasattr(obj2, attr) else None
if value is None and isinstance(obj2, dict) and attr in obj2:
value = obj2[attr]
setattr(obj1, attr, value) | def function[copy_attrs, parameter[obj1, obj2, attrs]]:
constant[
Allows copy a list of attributes from object2 to object1.
Useful for copy ccs attributes to fragment
]
for taget[name[attr]] in starred[name[attrs]] begin[:]
variable[value] assign[=] <ast.IfExp object at 0x7da1b1387b20>
if <ast.BoolOp object at 0x7da1b1386fb0> begin[:]
variable[value] assign[=] call[name[obj2]][name[attr]]
call[name[setattr], parameter[name[obj1], name[attr], name[value]]] | keyword[def] identifier[copy_attrs] ( identifier[obj1] , identifier[obj2] , identifier[attrs] ):
literal[string]
keyword[for] identifier[attr] keyword[in] identifier[attrs] :
identifier[value] = identifier[getattr] ( identifier[obj2] , identifier[attr] ) keyword[if] identifier[hasattr] ( identifier[obj2] , identifier[attr] ) keyword[else] keyword[None]
keyword[if] identifier[value] keyword[is] keyword[None] keyword[and] identifier[isinstance] ( identifier[obj2] , identifier[dict] ) keyword[and] identifier[attr] keyword[in] identifier[obj2] :
identifier[value] = identifier[obj2] [ identifier[attr] ]
identifier[setattr] ( identifier[obj1] , identifier[attr] , identifier[value] ) | def copy_attrs(obj1, obj2, attrs):
"""
Allows copy a list of attributes from object2 to object1.
Useful for copy ccs attributes to fragment
"""
for attr in attrs:
value = getattr(obj2, attr) if hasattr(obj2, attr) else None
if value is None and isinstance(obj2, dict) and (attr in obj2):
value = obj2[attr] # depends on [control=['if'], data=[]]
setattr(obj1, attr, value) # depends on [control=['for'], data=['attr']] |
def hold(self):
"""真实持仓
"""
return pd.concat(
[self.init_hold,
self.hold_available]
).groupby('code').sum().replace(0,
np.nan).dropna().sort_index() | def function[hold, parameter[self]]:
constant[真实持仓
]
return[call[call[call[call[call[call[name[pd].concat, parameter[list[[<ast.Attribute object at 0x7da1b2047250>, <ast.Attribute object at 0x7da1b20449d0>]]]].groupby, parameter[constant[code]]].sum, parameter[]].replace, parameter[constant[0], name[np].nan]].dropna, parameter[]].sort_index, parameter[]]] | keyword[def] identifier[hold] ( identifier[self] ):
literal[string]
keyword[return] identifier[pd] . identifier[concat] (
[ identifier[self] . identifier[init_hold] ,
identifier[self] . identifier[hold_available] ]
). identifier[groupby] ( literal[string] ). identifier[sum] (). identifier[replace] ( literal[int] ,
identifier[np] . identifier[nan] ). identifier[dropna] (). identifier[sort_index] () | def hold(self):
"""真实持仓
"""
return pd.concat([self.init_hold, self.hold_available]).groupby('code').sum().replace(0, np.nan).dropna().sort_index() |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkersCumulativeStatisticsContext for this WorkersCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsContext
"""
if self._context is None:
self._context = WorkersCumulativeStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
return self._context | def function[_proxy, parameter[self]]:
constant[
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkersCumulativeStatisticsContext for this WorkersCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsContext
]
if compare[name[self]._context is constant[None]] begin[:]
name[self]._context assign[=] call[name[WorkersCumulativeStatisticsContext], parameter[name[self]._version]]
return[name[self]._context] | keyword[def] identifier[_proxy] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_context] keyword[is] keyword[None] :
identifier[self] . identifier[_context] = identifier[WorkersCumulativeStatisticsContext] (
identifier[self] . identifier[_version] ,
identifier[workspace_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
)
keyword[return] identifier[self] . identifier[_context] | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkersCumulativeStatisticsContext for this WorkersCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsContext
"""
if self._context is None:
self._context = WorkersCumulativeStatisticsContext(self._version, workspace_sid=self._solution['workspace_sid']) # depends on [control=['if'], data=[]]
return self._context |
def sort(args):
"""
%prog sort <blastfile|coordsfile>
Sort lines so that same query grouped together with scores descending. The
sort is 'in-place'.
"""
p = OptionParser(sort.__doc__)
p.add_option("--query", default=False, action="store_true",
help="Sort by query position [default: %default]")
p.add_option("--ref", default=False, action="store_true",
help="Sort by reference position [default: %default]")
p.add_option("--refscore", default=False, action="store_true",
help="Sort by reference name, then score descending [default: %default]")
p.add_option("--coords", default=False, action="store_true",
help="File is .coords generated by NUCMER [default: %default]")
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
if opts.coords:
if opts.query:
key = "-k13,13 -k3,3n"
elif opts.ref:
key = "-k12,12 -k1,1n"
else:
if opts.query:
key = "-k1,1 -k7,7n"
elif opts.ref:
key = "-k2,2 -k9,9n"
elif opts.refscore:
key = "-k2,2 -k12,12gr"
else:
key = "-k1,1 -k12,12gr"
cmd = "sort"
if opts.tmpdir:
cmd += " -T {0}".format(opts.tmpdir)
cmd += " {0} {1} -o {1}".format(key, blastfile)
sh(cmd) | def function[sort, parameter[args]]:
constant[
%prog sort <blastfile|coordsfile>
Sort lines so that same query grouped together with scores descending. The
sort is 'in-place'.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[sort].__doc__]]
call[name[p].add_option, parameter[constant[--query]]]
call[name[p].add_option, parameter[constant[--ref]]]
call[name[p].add_option, parameter[constant[--refscore]]]
call[name[p].add_option, parameter[constant[--coords]]]
call[name[p].set_tmpdir, parameter[]]
<ast.Tuple object at 0x7da1b08a0f70> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b08a3340>]]
<ast.Tuple object at 0x7da2047e93f0> assign[=] name[args]
if name[opts].coords begin[:]
if name[opts].query begin[:]
variable[key] assign[=] constant[-k13,13 -k3,3n]
variable[cmd] assign[=] constant[sort]
if name[opts].tmpdir begin[:]
<ast.AugAssign object at 0x7da1b08fffa0>
<ast.AugAssign object at 0x7da1b08fc2e0>
call[name[sh], parameter[name[cmd]]] | keyword[def] identifier[sort] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[sort] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[set_tmpdir] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[blastfile] ,= identifier[args]
keyword[if] identifier[opts] . identifier[coords] :
keyword[if] identifier[opts] . identifier[query] :
identifier[key] = literal[string]
keyword[elif] identifier[opts] . identifier[ref] :
identifier[key] = literal[string]
keyword[else] :
keyword[if] identifier[opts] . identifier[query] :
identifier[key] = literal[string]
keyword[elif] identifier[opts] . identifier[ref] :
identifier[key] = literal[string]
keyword[elif] identifier[opts] . identifier[refscore] :
identifier[key] = literal[string]
keyword[else] :
identifier[key] = literal[string]
identifier[cmd] = literal[string]
keyword[if] identifier[opts] . identifier[tmpdir] :
identifier[cmd] += literal[string] . identifier[format] ( identifier[opts] . identifier[tmpdir] )
identifier[cmd] += literal[string] . identifier[format] ( identifier[key] , identifier[blastfile] )
identifier[sh] ( identifier[cmd] ) | def sort(args):
"""
%prog sort <blastfile|coordsfile>
Sort lines so that same query grouped together with scores descending. The
sort is 'in-place'.
"""
p = OptionParser(sort.__doc__)
p.add_option('--query', default=False, action='store_true', help='Sort by query position [default: %default]')
p.add_option('--ref', default=False, action='store_true', help='Sort by reference position [default: %default]')
p.add_option('--refscore', default=False, action='store_true', help='Sort by reference name, then score descending [default: %default]')
p.add_option('--coords', default=False, action='store_true', help='File is .coords generated by NUCMER [default: %default]')
p.set_tmpdir()
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(blastfile,) = args
if opts.coords:
if opts.query:
key = '-k13,13 -k3,3n' # depends on [control=['if'], data=[]]
elif opts.ref:
key = '-k12,12 -k1,1n' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif opts.query:
key = '-k1,1 -k7,7n' # depends on [control=['if'], data=[]]
elif opts.ref:
key = '-k2,2 -k9,9n' # depends on [control=['if'], data=[]]
elif opts.refscore:
key = '-k2,2 -k12,12gr' # depends on [control=['if'], data=[]]
else:
key = '-k1,1 -k12,12gr'
cmd = 'sort'
if opts.tmpdir:
cmd += ' -T {0}'.format(opts.tmpdir) # depends on [control=['if'], data=[]]
cmd += ' {0} {1} -o {1}'.format(key, blastfile)
sh(cmd) |
def listen(self):
"""Starts the client listener to listen for server responses.
Args:
None
Returns:
None
"""
logger.info("Listening on port " + str(self.listener.listen_port))
self.listener.listen() | def function[listen, parameter[self]]:
constant[Starts the client listener to listen for server responses.
Args:
None
Returns:
None
]
call[name[logger].info, parameter[binary_operation[constant[Listening on port ] + call[name[str], parameter[name[self].listener.listen_port]]]]]
call[name[self].listener.listen, parameter[]] | keyword[def] identifier[listen] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] + identifier[str] ( identifier[self] . identifier[listener] . identifier[listen_port] ))
identifier[self] . identifier[listener] . identifier[listen] () | def listen(self):
"""Starts the client listener to listen for server responses.
Args:
None
Returns:
None
"""
logger.info('Listening on port ' + str(self.listener.listen_port))
self.listener.listen() |
def create_hit(MaxAssignments=None, AutoApprovalDelayInSeconds=None, LifetimeInSeconds=None, AssignmentDurationInSeconds=None, Reward=None, Title=None, Keywords=None, Description=None, Question=None, RequesterAnnotation=None, QualificationRequirements=None, UniqueRequestToken=None, AssignmentReviewPolicy=None, HITReviewPolicy=None, HITLayoutId=None, HITLayoutParameters=None):
"""
The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) --
"""
pass | def function[create_hit, parameter[MaxAssignments, AutoApprovalDelayInSeconds, LifetimeInSeconds, AssignmentDurationInSeconds, Reward, Title, Keywords, Description, Question, RequesterAnnotation, QualificationRequirements, UniqueRequestToken, AssignmentReviewPolicy, HITReviewPolicy, HITLayoutId, HITLayoutParameters]]:
constant[
The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) --
]
pass | keyword[def] identifier[create_hit] ( identifier[MaxAssignments] = keyword[None] , identifier[AutoApprovalDelayInSeconds] = keyword[None] , identifier[LifetimeInSeconds] = keyword[None] , identifier[AssignmentDurationInSeconds] = keyword[None] , identifier[Reward] = keyword[None] , identifier[Title] = keyword[None] , identifier[Keywords] = keyword[None] , identifier[Description] = keyword[None] , identifier[Question] = keyword[None] , identifier[RequesterAnnotation] = keyword[None] , identifier[QualificationRequirements] = keyword[None] , identifier[UniqueRequestToken] = keyword[None] , identifier[AssignmentReviewPolicy] = keyword[None] , identifier[HITReviewPolicy] = keyword[None] , identifier[HITLayoutId] = keyword[None] , identifier[HITLayoutParameters] = keyword[None] ):
literal[string]
keyword[pass] | def create_hit(MaxAssignments=None, AutoApprovalDelayInSeconds=None, LifetimeInSeconds=None, AssignmentDurationInSeconds=None, Reward=None, Title=None, Keywords=None, Description=None, Question=None, RequesterAnnotation=None, QualificationRequirements=None, UniqueRequestToken=None, AssignmentReviewPolicy=None, HITReviewPolicy=None, HITLayoutId=None, HITLayoutParameters=None):
"""
The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) --
"""
pass |
def forget(empowered, powerupClass, interface):
"""
Forgets powerups previously stored with ``remember``.
:param empowered: The Empowered (Store or Item) to be powered down.
:type empowered: ``axiom.item.Empowered``
:param powerupClass: The class for which powerups will be forgotten.
:type powerupClass: class
:param interface: The interface the powerups were installed for.
:type interface: ``zope.interface.Interface``
:returns: ``None``
:raises ValueError: Class wasn't previously remembered.
"""
className = fullyQualifiedName(powerupClass)
withThisName = _StoredByName.className == className
items = empowered.store.query(_StoredByName, withThisName)
if items.count() == 0:
template = "No named powerups for {} (interface: {})".format
raise ValueError(template(powerupClass, interface))
for stored in items:
empowered.powerDown(stored, interface)
stored.deleteFromStore() | def function[forget, parameter[empowered, powerupClass, interface]]:
constant[
Forgets powerups previously stored with ``remember``.
:param empowered: The Empowered (Store or Item) to be powered down.
:type empowered: ``axiom.item.Empowered``
:param powerupClass: The class for which powerups will be forgotten.
:type powerupClass: class
:param interface: The interface the powerups were installed for.
:type interface: ``zope.interface.Interface``
:returns: ``None``
:raises ValueError: Class wasn't previously remembered.
]
variable[className] assign[=] call[name[fullyQualifiedName], parameter[name[powerupClass]]]
variable[withThisName] assign[=] compare[name[_StoredByName].className equal[==] name[className]]
variable[items] assign[=] call[name[empowered].store.query, parameter[name[_StoredByName], name[withThisName]]]
if compare[call[name[items].count, parameter[]] equal[==] constant[0]] begin[:]
variable[template] assign[=] constant[No named powerups for {} (interface: {})].format
<ast.Raise object at 0x7da1b14a9840>
for taget[name[stored]] in starred[name[items]] begin[:]
call[name[empowered].powerDown, parameter[name[stored], name[interface]]]
call[name[stored].deleteFromStore, parameter[]] | keyword[def] identifier[forget] ( identifier[empowered] , identifier[powerupClass] , identifier[interface] ):
literal[string]
identifier[className] = identifier[fullyQualifiedName] ( identifier[powerupClass] )
identifier[withThisName] = identifier[_StoredByName] . identifier[className] == identifier[className]
identifier[items] = identifier[empowered] . identifier[store] . identifier[query] ( identifier[_StoredByName] , identifier[withThisName] )
keyword[if] identifier[items] . identifier[count] ()== literal[int] :
identifier[template] = literal[string] . identifier[format]
keyword[raise] identifier[ValueError] ( identifier[template] ( identifier[powerupClass] , identifier[interface] ))
keyword[for] identifier[stored] keyword[in] identifier[items] :
identifier[empowered] . identifier[powerDown] ( identifier[stored] , identifier[interface] )
identifier[stored] . identifier[deleteFromStore] () | def forget(empowered, powerupClass, interface):
"""
Forgets powerups previously stored with ``remember``.
:param empowered: The Empowered (Store or Item) to be powered down.
:type empowered: ``axiom.item.Empowered``
:param powerupClass: The class for which powerups will be forgotten.
:type powerupClass: class
:param interface: The interface the powerups were installed for.
:type interface: ``zope.interface.Interface``
:returns: ``None``
:raises ValueError: Class wasn't previously remembered.
"""
className = fullyQualifiedName(powerupClass)
withThisName = _StoredByName.className == className
items = empowered.store.query(_StoredByName, withThisName)
if items.count() == 0:
template = 'No named powerups for {} (interface: {})'.format
raise ValueError(template(powerupClass, interface)) # depends on [control=['if'], data=[]]
for stored in items:
empowered.powerDown(stored, interface)
stored.deleteFromStore() # depends on [control=['for'], data=['stored']] |
def function(script, red=255, green=255, blue=255, alpha=255, color=None):
"""Color function using muparser lib to generate new RGBA color for every
vertex
Red, Green, Blue and Alpha channels may be defined by specifying a function
for each.
See help(mlx.muparser_ref) for muparser reference documentation.
It's possible to use the following per-vertex variables in the expression:
Variables (per vertex):
x, y, z (coordinates)
nx, ny, nz (normal)
r, g, b, a (color)
q (quality)
rad (radius)
vi (vertex index)
vtu, vtv (texture coordinates)
ti (texture index)
vsel (is the vertex selected? 1 yes, 0 no)
and all custom vertex attributes already defined by user.
Args:
script: the FilterScript object or script filename to write
the filter to.
red (str [0, 255]): function to generate red component
green (str [0, 255]): function to generate green component
blue (str [0, 255]): function to generate blue component
alpha (str [0, 255]): function to generate alpha component
color (str): name of one of the 140 HTML Color Names defined
in CSS & SVG.
Ref: https://en.wikipedia.org/wiki/Web_colors#X11_color_names
If not None this will override the per component variables.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
# TODO: add options for HSV
# https://www.cs.rit.edu/~ncs/color/t_convert.html
if color is not None:
red, green, blue, _ = color_name[color.lower()]
filter_xml = ''.join([
' <filter name="Per Vertex Color Function">\n',
' <Param name="x" ',
'value="{}" '.format(str(red).replace('&', '&').replace('<', '<')),
'description="func r = " ',
'type="RichString" ',
'/>\n',
' <Param name="y" ',
'value="{}" '.format(str(green).replace('&', '&').replace('<', '<')),
'description="func g = " ',
'type="RichString" ',
'/>\n',
' <Param name="z" ',
'value="{}" '.format(str(blue).replace('&', '&').replace('<', '<')),
'description="func b = " ',
'type="RichString" ',
'/>\n',
' <Param name="a" ',
'value="{}" '.format(str(alpha).replace('&', '&').replace('<', '<')),
'description="func alpha = " ',
'type="RichString" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | def function[function, parameter[script, red, green, blue, alpha, color]]:
constant[Color function using muparser lib to generate new RGBA color for every
vertex
Red, Green, Blue and Alpha channels may be defined by specifying a function
for each.
See help(mlx.muparser_ref) for muparser reference documentation.
It's possible to use the following per-vertex variables in the expression:
Variables (per vertex):
x, y, z (coordinates)
nx, ny, nz (normal)
r, g, b, a (color)
q (quality)
rad (radius)
vi (vertex index)
vtu, vtv (texture coordinates)
ti (texture index)
vsel (is the vertex selected? 1 yes, 0 no)
and all custom vertex attributes already defined by user.
Args:
script: the FilterScript object or script filename to write
the filter to.
red (str [0, 255]): function to generate red component
green (str [0, 255]): function to generate green component
blue (str [0, 255]): function to generate blue component
alpha (str [0, 255]): function to generate alpha component
color (str): name of one of the 140 HTML Color Names defined
in CSS & SVG.
Ref: https://en.wikipedia.org/wiki/Web_colors#X11_color_names
If not None this will override the per component variables.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
]
if compare[name[color] is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da18bc703d0> assign[=] call[name[color_name]][call[name[color].lower, parameter[]]]
variable[filter_xml] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da18bc700d0>, <ast.Constant object at 0x7da18bc71ab0>, <ast.Call object at 0x7da18bc72380>, <ast.Constant object at 0x7da18bc727a0>, <ast.Constant object at 0x7da18bc71a20>, <ast.Constant object at 0x7da18bc73c10>, <ast.Constant object at 0x7da18bc72470>, <ast.Call object at 0x7da18bc717e0>, <ast.Constant object at 0x7da18bc733a0>, <ast.Constant object at 0x7da18bc72e60>, <ast.Constant object at 0x7da18bc72a70>, <ast.Constant object at 0x7da18bc70ca0>, <ast.Call object at 0x7da18bc73af0>, <ast.Constant object at 0x7da20c6c7b20>, <ast.Constant object at 0x7da20c6c7730>, <ast.Constant object at 0x7da20c6c6200>, <ast.Constant object at 0x7da20c6c6350>, <ast.Call object at 0x7da20c6c4d90>, <ast.Constant object at 0x7da20c6c56f0>, <ast.Constant object at 0x7da20c6c4dc0>, <ast.Constant object at 0x7da20c6c7070>, <ast.Constant object at 0x7da20c6c4a60>]]]]
call[name[util].write_filter, parameter[name[script], name[filter_xml]]]
return[constant[None]] | keyword[def] identifier[function] ( identifier[script] , identifier[red] = literal[int] , identifier[green] = literal[int] , identifier[blue] = literal[int] , identifier[alpha] = literal[int] , identifier[color] = keyword[None] ):
literal[string]
keyword[if] identifier[color] keyword[is] keyword[not] keyword[None] :
identifier[red] , identifier[green] , identifier[blue] , identifier[_] = identifier[color_name] [ identifier[color] . identifier[lower] ()]
identifier[filter_xml] = literal[string] . identifier[join] ([
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[str] ( identifier[red] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[str] ( identifier[green] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[str] ( identifier[blue] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[str] ( identifier[alpha] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ])
identifier[util] . identifier[write_filter] ( identifier[script] , identifier[filter_xml] )
keyword[return] keyword[None] | def function(script, red=255, green=255, blue=255, alpha=255, color=None):
"""Color function using muparser lib to generate new RGBA color for every
vertex
Red, Green, Blue and Alpha channels may be defined by specifying a function
for each.
See help(mlx.muparser_ref) for muparser reference documentation.
It's possible to use the following per-vertex variables in the expression:
Variables (per vertex):
x, y, z (coordinates)
nx, ny, nz (normal)
r, g, b, a (color)
q (quality)
rad (radius)
vi (vertex index)
vtu, vtv (texture coordinates)
ti (texture index)
vsel (is the vertex selected? 1 yes, 0 no)
and all custom vertex attributes already defined by user.
Args:
script: the FilterScript object or script filename to write
the filter to.
red (str [0, 255]): function to generate red component
green (str [0, 255]): function to generate green component
blue (str [0, 255]): function to generate blue component
alpha (str [0, 255]): function to generate alpha component
color (str): name of one of the 140 HTML Color Names defined
in CSS & SVG.
Ref: https://en.wikipedia.org/wiki/Web_colors#X11_color_names
If not None this will override the per component variables.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
# TODO: add options for HSV
# https://www.cs.rit.edu/~ncs/color/t_convert.html
if color is not None:
(red, green, blue, _) = color_name[color.lower()] # depends on [control=['if'], data=['color']]
filter_xml = ''.join([' <filter name="Per Vertex Color Function">\n', ' <Param name="x" ', 'value="{}" '.format(str(red).replace('&', '&').replace('<', '<')), 'description="func r = " ', 'type="RichString" ', '/>\n', ' <Param name="y" ', 'value="{}" '.format(str(green).replace('&', '&').replace('<', '<')), 'description="func g = " ', 'type="RichString" ', '/>\n', ' <Param name="z" ', 'value="{}" '.format(str(blue).replace('&', '&').replace('<', '<')), 'description="func b = " ', 'type="RichString" ', '/>\n', ' <Param name="a" ', 'value="{}" '.format(str(alpha).replace('&', '&').replace('<', '<')), 'description="func alpha = " ', 'type="RichString" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
return None |
def interp_qa_v1(self):
"""Calculate the lake outflow based on linear interpolation.
Required control parameters:
|N|
|llake_control.Q|
Required derived parameters:
|llake_derived.TOY|
|llake_derived.VQ|
Required aide sequence:
|llake_aides.VQ|
Calculated aide sequence:
|llake_aides.QA|
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01','2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep()
Next, for the sake of brevity, define a test function:
>>> def test(*vqs):
... for vq in vqs:
... aides.vq(vq)
... model.interp_qa_v1()
... print(repr(aides.vq), repr(aides.qa))
The following three relationships between the auxiliary term `vq` and
the tabulated discharge `q` are taken as examples. Each one is valid
for one of the first three days in January and is defined via five
nodes:
>>> n(5)
>>> derived.toy.update()
>>> derived.vq(_1_1_6=[0., 1., 2., 2., 3.],
... _1_2_6=[0., 1., 2., 2., 3.],
... _1_3_6=[0., 1., 2., 3., 4.])
>>> q(_1_1_6=[0., 0., 0., 0., 0.],
... _1_2_6=[0., 2., 5., 6., 9.],
... _1_3_6=[0., 2., 1., 3., 2.])
In the first example, discharge does not depend on the actual value
of the auxiliary term and is always zero:
>>> model.idx_sim = pub.timegrids.init['2000.01.01']
>>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.)
vq(0.0) qa(0.0)
vq(0.75) qa(0.0)
vq(1.0) qa(0.0)
vq(1.333333) qa(0.0)
vq(2.0) qa(0.0)
vq(2.333333) qa(0.0)
vq(3.0) qa(0.0)
vq(3.333333) qa(0.0)
The seconds example demonstrates that relationships are allowed to
contain jumps, which is the case for the (`vq`,`q`) pairs (2,6) and
(2,7). Also it demonstrates that when the highest `vq` value is
exceeded linear extrapolation based on the two highest (`vq`,`q`)
pairs is performed:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
>>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.)
vq(0.0) qa(0.0)
vq(0.75) qa(1.5)
vq(1.0) qa(2.0)
vq(1.333333) qa(3.0)
vq(2.0) qa(5.0)
vq(2.333333) qa(7.0)
vq(3.0) qa(9.0)
vq(3.333333) qa(10.0)
The third example shows that the relationships do not need to be
arranged monotonously increasing. Particualarly for the extrapolation
range, this could result in negative values of `qa`, which is avoided
by setting it to zero in such cases:
>>> model.idx_sim = pub.timegrids.init['2000.01.03']
>>> test(.5, 1.5, 2.5, 3.5, 4.5, 10.)
vq(0.5) qa(1.0)
vq(1.5) qa(1.5)
vq(2.5) qa(2.0)
vq(3.5) qa(2.5)
vq(4.5) qa(1.5)
vq(10.0) qa(0.0)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
aid = self.sequences.aides.fastaccess
idx = der.toy[self.idx_sim]
for jdx in range(1, con.n):
if der.vq[idx, jdx] >= aid.vq:
break
aid.qa = ((aid.vq-der.vq[idx, jdx-1]) *
(con.q[idx, jdx]-con.q[idx, jdx-1]) /
(der.vq[idx, jdx]-der.vq[idx, jdx-1]) +
con.q[idx, jdx-1])
aid.qa = max(aid.qa, 0.) | def function[interp_qa_v1, parameter[self]]:
constant[Calculate the lake outflow based on linear interpolation.
Required control parameters:
|N|
|llake_control.Q|
Required derived parameters:
|llake_derived.TOY|
|llake_derived.VQ|
Required aide sequence:
|llake_aides.VQ|
Calculated aide sequence:
|llake_aides.QA|
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01','2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep()
Next, for the sake of brevity, define a test function:
>>> def test(*vqs):
... for vq in vqs:
... aides.vq(vq)
... model.interp_qa_v1()
... print(repr(aides.vq), repr(aides.qa))
The following three relationships between the auxiliary term `vq` and
the tabulated discharge `q` are taken as examples. Each one is valid
for one of the first three days in January and is defined via five
nodes:
>>> n(5)
>>> derived.toy.update()
>>> derived.vq(_1_1_6=[0., 1., 2., 2., 3.],
... _1_2_6=[0., 1., 2., 2., 3.],
... _1_3_6=[0., 1., 2., 3., 4.])
>>> q(_1_1_6=[0., 0., 0., 0., 0.],
... _1_2_6=[0., 2., 5., 6., 9.],
... _1_3_6=[0., 2., 1., 3., 2.])
In the first example, discharge does not depend on the actual value
of the auxiliary term and is always zero:
>>> model.idx_sim = pub.timegrids.init['2000.01.01']
>>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.)
vq(0.0) qa(0.0)
vq(0.75) qa(0.0)
vq(1.0) qa(0.0)
vq(1.333333) qa(0.0)
vq(2.0) qa(0.0)
vq(2.333333) qa(0.0)
vq(3.0) qa(0.0)
vq(3.333333) qa(0.0)
The seconds example demonstrates that relationships are allowed to
contain jumps, which is the case for the (`vq`,`q`) pairs (2,6) and
(2,7). Also it demonstrates that when the highest `vq` value is
exceeded linear extrapolation based on the two highest (`vq`,`q`)
pairs is performed:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
>>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.)
vq(0.0) qa(0.0)
vq(0.75) qa(1.5)
vq(1.0) qa(2.0)
vq(1.333333) qa(3.0)
vq(2.0) qa(5.0)
vq(2.333333) qa(7.0)
vq(3.0) qa(9.0)
vq(3.333333) qa(10.0)
The third example shows that the relationships do not need to be
arranged monotonously increasing. Particualarly for the extrapolation
range, this could result in negative values of `qa`, which is avoided
by setting it to zero in such cases:
>>> model.idx_sim = pub.timegrids.init['2000.01.03']
>>> test(.5, 1.5, 2.5, 3.5, 4.5, 10.)
vq(0.5) qa(1.0)
vq(1.5) qa(1.5)
vq(2.5) qa(2.0)
vq(3.5) qa(2.5)
vq(4.5) qa(1.5)
vq(10.0) qa(0.0)
]
variable[con] assign[=] name[self].parameters.control.fastaccess
variable[der] assign[=] name[self].parameters.derived.fastaccess
variable[aid] assign[=] name[self].sequences.aides.fastaccess
variable[idx] assign[=] call[name[der].toy][name[self].idx_sim]
for taget[name[jdx]] in starred[call[name[range], parameter[constant[1], name[con].n]]] begin[:]
if compare[call[name[der].vq][tuple[[<ast.Name object at 0x7da2044c3af0>, <ast.Name object at 0x7da2044c3970>]]] greater_or_equal[>=] name[aid].vq] begin[:]
break
name[aid].qa assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[aid].vq - call[name[der].vq][tuple[[<ast.Name object at 0x7da2044c1db0>, <ast.BinOp object at 0x7da2044c37c0>]]]] * binary_operation[call[name[con].q][tuple[[<ast.Name object at 0x7da2044c04f0>, <ast.Name object at 0x7da2044c16c0>]]] - call[name[con].q][tuple[[<ast.Name object at 0x7da2044c1f90>, <ast.BinOp object at 0x7da2041db670>]]]]] / binary_operation[call[name[der].vq][tuple[[<ast.Name object at 0x7da2041d9b40>, <ast.Name object at 0x7da2041d98d0>]]] - call[name[der].vq][tuple[[<ast.Name object at 0x7da2041d9090>, <ast.BinOp object at 0x7da2041d9840>]]]]] + call[name[con].q][tuple[[<ast.Name object at 0x7da2041d8eb0>, <ast.BinOp object at 0x7da2041da350>]]]]
name[aid].qa assign[=] call[name[max], parameter[name[aid].qa, constant[0.0]]] | keyword[def] identifier[interp_qa_v1] ( identifier[self] ):
literal[string]
identifier[con] = identifier[self] . identifier[parameters] . identifier[control] . identifier[fastaccess]
identifier[der] = identifier[self] . identifier[parameters] . identifier[derived] . identifier[fastaccess]
identifier[aid] = identifier[self] . identifier[sequences] . identifier[aides] . identifier[fastaccess]
identifier[idx] = identifier[der] . identifier[toy] [ identifier[self] . identifier[idx_sim] ]
keyword[for] identifier[jdx] keyword[in] identifier[range] ( literal[int] , identifier[con] . identifier[n] ):
keyword[if] identifier[der] . identifier[vq] [ identifier[idx] , identifier[jdx] ]>= identifier[aid] . identifier[vq] :
keyword[break]
identifier[aid] . identifier[qa] =(( identifier[aid] . identifier[vq] - identifier[der] . identifier[vq] [ identifier[idx] , identifier[jdx] - literal[int] ])*
( identifier[con] . identifier[q] [ identifier[idx] , identifier[jdx] ]- identifier[con] . identifier[q] [ identifier[idx] , identifier[jdx] - literal[int] ])/
( identifier[der] . identifier[vq] [ identifier[idx] , identifier[jdx] ]- identifier[der] . identifier[vq] [ identifier[idx] , identifier[jdx] - literal[int] ])+
identifier[con] . identifier[q] [ identifier[idx] , identifier[jdx] - literal[int] ])
identifier[aid] . identifier[qa] = identifier[max] ( identifier[aid] . identifier[qa] , literal[int] ) | def interp_qa_v1(self):
"""Calculate the lake outflow based on linear interpolation.
Required control parameters:
|N|
|llake_control.Q|
Required derived parameters:
|llake_derived.TOY|
|llake_derived.VQ|
Required aide sequence:
|llake_aides.VQ|
Calculated aide sequence:
|llake_aides.QA|
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01','2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep()
Next, for the sake of brevity, define a test function:
>>> def test(*vqs):
... for vq in vqs:
... aides.vq(vq)
... model.interp_qa_v1()
... print(repr(aides.vq), repr(aides.qa))
The following three relationships between the auxiliary term `vq` and
the tabulated discharge `q` are taken as examples. Each one is valid
for one of the first three days in January and is defined via five
nodes:
>>> n(5)
>>> derived.toy.update()
>>> derived.vq(_1_1_6=[0., 1., 2., 2., 3.],
... _1_2_6=[0., 1., 2., 2., 3.],
... _1_3_6=[0., 1., 2., 3., 4.])
>>> q(_1_1_6=[0., 0., 0., 0., 0.],
... _1_2_6=[0., 2., 5., 6., 9.],
... _1_3_6=[0., 2., 1., 3., 2.])
In the first example, discharge does not depend on the actual value
of the auxiliary term and is always zero:
>>> model.idx_sim = pub.timegrids.init['2000.01.01']
>>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.)
vq(0.0) qa(0.0)
vq(0.75) qa(0.0)
vq(1.0) qa(0.0)
vq(1.333333) qa(0.0)
vq(2.0) qa(0.0)
vq(2.333333) qa(0.0)
vq(3.0) qa(0.0)
vq(3.333333) qa(0.0)
The seconds example demonstrates that relationships are allowed to
contain jumps, which is the case for the (`vq`,`q`) pairs (2,6) and
(2,7). Also it demonstrates that when the highest `vq` value is
exceeded linear extrapolation based on the two highest (`vq`,`q`)
pairs is performed:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
>>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.)
vq(0.0) qa(0.0)
vq(0.75) qa(1.5)
vq(1.0) qa(2.0)
vq(1.333333) qa(3.0)
vq(2.0) qa(5.0)
vq(2.333333) qa(7.0)
vq(3.0) qa(9.0)
vq(3.333333) qa(10.0)
The third example shows that the relationships do not need to be
arranged monotonously increasing. Particualarly for the extrapolation
range, this could result in negative values of `qa`, which is avoided
by setting it to zero in such cases:
>>> model.idx_sim = pub.timegrids.init['2000.01.03']
>>> test(.5, 1.5, 2.5, 3.5, 4.5, 10.)
vq(0.5) qa(1.0)
vq(1.5) qa(1.5)
vq(2.5) qa(2.0)
vq(3.5) qa(2.5)
vq(4.5) qa(1.5)
vq(10.0) qa(0.0)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
aid = self.sequences.aides.fastaccess
idx = der.toy[self.idx_sim]
for jdx in range(1, con.n):
if der.vq[idx, jdx] >= aid.vq:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['jdx']]
aid.qa = (aid.vq - der.vq[idx, jdx - 1]) * (con.q[idx, jdx] - con.q[idx, jdx - 1]) / (der.vq[idx, jdx] - der.vq[idx, jdx - 1]) + con.q[idx, jdx - 1]
aid.qa = max(aid.qa, 0.0) |
def get_url(self, action, obj=None, domain=True):
"""
Returns an RFC3987 IRI for a HTML representation of the given object, action.
If domain is true, the current site's domain will be added.
"""
if not obj:
url = reverse('actstream_detail', None, (action.pk,))
elif hasattr(obj, 'get_absolute_url'):
url = obj.get_absolute_url()
else:
ctype = ContentType.objects.get_for_model(obj)
url = reverse('actstream_actor', None, (ctype.pk, obj.pk))
if domain:
return add_domain(Site.objects.get_current().domain, url)
return url | def function[get_url, parameter[self, action, obj, domain]]:
constant[
Returns an RFC3987 IRI for a HTML representation of the given object, action.
If domain is true, the current site's domain will be added.
]
if <ast.UnaryOp object at 0x7da1b2235060> begin[:]
variable[url] assign[=] call[name[reverse], parameter[constant[actstream_detail], constant[None], tuple[[<ast.Attribute object at 0x7da1b2235120>]]]]
if name[domain] begin[:]
return[call[name[add_domain], parameter[call[name[Site].objects.get_current, parameter[]].domain, name[url]]]]
return[name[url]] | keyword[def] identifier[get_url] ( identifier[self] , identifier[action] , identifier[obj] = keyword[None] , identifier[domain] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[obj] :
identifier[url] = identifier[reverse] ( literal[string] , keyword[None] ,( identifier[action] . identifier[pk] ,))
keyword[elif] identifier[hasattr] ( identifier[obj] , literal[string] ):
identifier[url] = identifier[obj] . identifier[get_absolute_url] ()
keyword[else] :
identifier[ctype] = identifier[ContentType] . identifier[objects] . identifier[get_for_model] ( identifier[obj] )
identifier[url] = identifier[reverse] ( literal[string] , keyword[None] ,( identifier[ctype] . identifier[pk] , identifier[obj] . identifier[pk] ))
keyword[if] identifier[domain] :
keyword[return] identifier[add_domain] ( identifier[Site] . identifier[objects] . identifier[get_current] (). identifier[domain] , identifier[url] )
keyword[return] identifier[url] | def get_url(self, action, obj=None, domain=True):
"""
Returns an RFC3987 IRI for a HTML representation of the given object, action.
If domain is true, the current site's domain will be added.
"""
if not obj:
url = reverse('actstream_detail', None, (action.pk,)) # depends on [control=['if'], data=[]]
elif hasattr(obj, 'get_absolute_url'):
url = obj.get_absolute_url() # depends on [control=['if'], data=[]]
else:
ctype = ContentType.objects.get_for_model(obj)
url = reverse('actstream_actor', None, (ctype.pk, obj.pk))
if domain:
return add_domain(Site.objects.get_current().domain, url) # depends on [control=['if'], data=[]]
return url |
def get_engine(self, app=None, bind=None):
"""Returns a specific engine."""
app = self.get_app(app)
state = get_state(app)
with self._engine_lock:
connector = state.connectors.get(bind)
if connector is None:
connector = self.make_connector(app, bind)
state.connectors[bind] = connector
return connector.get_engine() | def function[get_engine, parameter[self, app, bind]]:
constant[Returns a specific engine.]
variable[app] assign[=] call[name[self].get_app, parameter[name[app]]]
variable[state] assign[=] call[name[get_state], parameter[name[app]]]
with name[self]._engine_lock begin[:]
variable[connector] assign[=] call[name[state].connectors.get, parameter[name[bind]]]
if compare[name[connector] is constant[None]] begin[:]
variable[connector] assign[=] call[name[self].make_connector, parameter[name[app], name[bind]]]
call[name[state].connectors][name[bind]] assign[=] name[connector]
return[call[name[connector].get_engine, parameter[]]] | keyword[def] identifier[get_engine] ( identifier[self] , identifier[app] = keyword[None] , identifier[bind] = keyword[None] ):
literal[string]
identifier[app] = identifier[self] . identifier[get_app] ( identifier[app] )
identifier[state] = identifier[get_state] ( identifier[app] )
keyword[with] identifier[self] . identifier[_engine_lock] :
identifier[connector] = identifier[state] . identifier[connectors] . identifier[get] ( identifier[bind] )
keyword[if] identifier[connector] keyword[is] keyword[None] :
identifier[connector] = identifier[self] . identifier[make_connector] ( identifier[app] , identifier[bind] )
identifier[state] . identifier[connectors] [ identifier[bind] ]= identifier[connector]
keyword[return] identifier[connector] . identifier[get_engine] () | def get_engine(self, app=None, bind=None):
"""Returns a specific engine."""
app = self.get_app(app)
state = get_state(app)
with self._engine_lock:
connector = state.connectors.get(bind)
if connector is None:
connector = self.make_connector(app, bind)
state.connectors[bind] = connector # depends on [control=['if'], data=['connector']]
return connector.get_engine() # depends on [control=['with'], data=[]] |
def unpack(rv):
"""Unpack the response from a view.
:param rv: the view response
:type rv: either a :class:`werkzeug.wrappers.Response` or a
tuple of (data, status_code, headers)
"""
if isinstance(rv, ResponseBase):
return rv
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('View function did not return a response')
if status is None:
status = 200
return rv, status, headers or {} | def function[unpack, parameter[rv]]:
constant[Unpack the response from a view.
:param rv: the view response
:type rv: either a :class:`werkzeug.wrappers.Response` or a
tuple of (data, status_code, headers)
]
if call[name[isinstance], parameter[name[rv], name[ResponseBase]]] begin[:]
return[name[rv]]
variable[status] assign[=] constant[None]
if call[name[isinstance], parameter[name[rv], name[tuple]]] begin[:]
<ast.Tuple object at 0x7da1b0a068f0> assign[=] binary_operation[name[rv] + binary_operation[tuple[[<ast.Constant object at 0x7da1b0a064d0>]] * binary_operation[constant[3] - call[name[len], parameter[name[rv]]]]]]
if compare[name[rv] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0a053f0>
if compare[name[status] is constant[None]] begin[:]
variable[status] assign[=] constant[200]
return[tuple[[<ast.Name object at 0x7da1b0a06830>, <ast.Name object at 0x7da1b0a06710>, <ast.BoolOp object at 0x7da1b0a04040>]]] | keyword[def] identifier[unpack] ( identifier[rv] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[rv] , identifier[ResponseBase] ):
keyword[return] identifier[rv]
identifier[status] = identifier[headers] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[rv] , identifier[tuple] ):
identifier[rv] , identifier[status] , identifier[headers] = identifier[rv] +( keyword[None] ,)*( literal[int] - identifier[len] ( identifier[rv] ))
keyword[if] identifier[rv] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[status] keyword[is] keyword[None] :
identifier[status] = literal[int]
keyword[return] identifier[rv] , identifier[status] , identifier[headers] keyword[or] {} | def unpack(rv):
"""Unpack the response from a view.
:param rv: the view response
:type rv: either a :class:`werkzeug.wrappers.Response` or a
tuple of (data, status_code, headers)
"""
if isinstance(rv, ResponseBase):
return rv # depends on [control=['if'], data=[]]
status = headers = None
if isinstance(rv, tuple):
(rv, status, headers) = rv + (None,) * (3 - len(rv)) # depends on [control=['if'], data=[]]
if rv is None:
raise ValueError('View function did not return a response') # depends on [control=['if'], data=[]]
if status is None:
status = 200 # depends on [control=['if'], data=['status']]
return (rv, status, headers or {}) |
def _validate(self, inst: "InstanceNode", scope: ValidationScope,
ctype: ContentType) -> None:
"""Extend the superclass method."""
if scope.value & ValidationScope.syntax.value: # schema
self._check_schema_pattern(inst, ctype)
for m in inst:
inst._member(m).validate(scope, ctype)
super()._validate(inst, scope, ctype) | def function[_validate, parameter[self, inst, scope, ctype]]:
constant[Extend the superclass method.]
if binary_operation[name[scope].value <ast.BitAnd object at 0x7da2590d6b60> name[ValidationScope].syntax.value] begin[:]
call[name[self]._check_schema_pattern, parameter[name[inst], name[ctype]]]
for taget[name[m]] in starred[name[inst]] begin[:]
call[call[name[inst]._member, parameter[name[m]]].validate, parameter[name[scope], name[ctype]]]
call[call[name[super], parameter[]]._validate, parameter[name[inst], name[scope], name[ctype]]] | keyword[def] identifier[_validate] ( identifier[self] , identifier[inst] : literal[string] , identifier[scope] : identifier[ValidationScope] ,
identifier[ctype] : identifier[ContentType] )-> keyword[None] :
literal[string]
keyword[if] identifier[scope] . identifier[value] & identifier[ValidationScope] . identifier[syntax] . identifier[value] :
identifier[self] . identifier[_check_schema_pattern] ( identifier[inst] , identifier[ctype] )
keyword[for] identifier[m] keyword[in] identifier[inst] :
identifier[inst] . identifier[_member] ( identifier[m] ). identifier[validate] ( identifier[scope] , identifier[ctype] )
identifier[super] (). identifier[_validate] ( identifier[inst] , identifier[scope] , identifier[ctype] ) | def _validate(self, inst: 'InstanceNode', scope: ValidationScope, ctype: ContentType) -> None:
"""Extend the superclass method."""
if scope.value & ValidationScope.syntax.value: # schema
self._check_schema_pattern(inst, ctype) # depends on [control=['if'], data=[]]
for m in inst:
inst._member(m).validate(scope, ctype) # depends on [control=['for'], data=['m']]
super()._validate(inst, scope, ctype) |
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
) | def function[get_day_of_week_description, parameter[self]]:
constant[Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
]
if <ast.BoolOp object at 0x7da1b04d9a80> begin[:]
return[constant[]]
def function[get_day_name, parameter[s]]:
variable[exp] assign[=] name[s]
if compare[constant[#] in name[s]] begin[:]
<ast.Tuple object at 0x7da1b04d9ba0> assign[=] call[name[s].split, parameter[constant[#], constant[2]]]
return[call[name[self].number_to_day, parameter[call[name[int], parameter[name[exp]]]]]]
def function[get_format, parameter[s]]:
if compare[constant[#] in name[s]] begin[:]
variable[day_of_week_of_month] assign[=] call[name[s]][<ast.Slice object at 0x7da1b04f5d80>]
<ast.Try object at 0x7da1b04f4bb0>
variable[formated] assign[=] call[constant[{}{}{}].format, parameter[call[name[_], parameter[constant[, on the ]]], name[day_of_week_of_month_description], call[name[_], parameter[constant[ {0} of the month]]]]]
return[name[formated]]
return[call[name[self].get_segment_description, parameter[call[name[self]._expression_parts][constant[5]], call[name[_], parameter[constant[, every day]]], <ast.Lambda object at 0x7da1b04663b0>, <ast.Lambda object at 0x7da1b0467040>, <ast.Lambda object at 0x7da1b04d9bd0>, <ast.Lambda object at 0x7da1b04db520>]]] | keyword[def] identifier[get_day_of_week_description] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_expression_parts] [ literal[int] ]== literal[string] keyword[and] identifier[self] . identifier[_expression_parts] [ literal[int] ]!= literal[string] :
keyword[return] literal[string]
keyword[def] identifier[get_day_name] ( identifier[s] ):
identifier[exp] = identifier[s]
keyword[if] literal[string] keyword[in] identifier[s] :
identifier[exp] , identifier[useless] = identifier[s] . identifier[split] ( literal[string] , literal[int] )
keyword[elif] literal[string] keyword[in] identifier[s] :
identifier[exp] = identifier[exp] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[self] . identifier[number_to_day] ( identifier[int] ( identifier[exp] ))
keyword[def] identifier[get_format] ( identifier[s] ):
keyword[if] literal[string] keyword[in] identifier[s] :
identifier[day_of_week_of_month] = identifier[s] [ identifier[s] . identifier[find] ( literal[string] )+ literal[int] :]
keyword[try] :
identifier[day_of_week_of_month_number] = identifier[int] ( identifier[day_of_week_of_month] )
identifier[choices] ={
literal[int] : identifier[_] ( literal[string] ),
literal[int] : identifier[_] ( literal[string] ),
literal[int] : identifier[_] ( literal[string] ),
literal[int] : identifier[_] ( literal[string] ),
literal[int] : identifier[_] ( literal[string] ),
}
identifier[day_of_week_of_month_description] = identifier[choices] . identifier[get] ( identifier[day_of_week_of_month_number] , literal[string] )
keyword[except] identifier[ValueError] :
identifier[day_of_week_of_month_description] = literal[string]
identifier[formated] = literal[string] . identifier[format] ( identifier[_] ( literal[string] ),
identifier[day_of_week_of_month_description] , identifier[_] ( literal[string] ))
keyword[elif] literal[string] keyword[in] identifier[s] :
identifier[formated] = identifier[_] ( literal[string] )
keyword[else] :
identifier[formated] = identifier[_] ( literal[string] )
keyword[return] identifier[formated]
keyword[return] identifier[self] . identifier[get_segment_description] (
identifier[self] . identifier[_expression_parts] [ literal[int] ],
identifier[_] ( literal[string] ),
keyword[lambda] identifier[s] : identifier[get_day_name] ( identifier[s] ),
keyword[lambda] identifier[s] : identifier[_] ( literal[string] ). identifier[format] ( identifier[s] ),
keyword[lambda] identifier[s] : identifier[_] ( literal[string] ),
keyword[lambda] identifier[s] : identifier[get_format] ( identifier[s] )
) | def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == '*' and self._expression_parts[3] != '*':
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return '' # depends on [control=['if'], data=[]]
def get_day_name(s):
exp = s
if '#' in s:
(exp, useless) = s.split('#', 2) # depends on [control=['if'], data=['s']]
elif 'L' in s:
exp = exp.replace('L', '') # depends on [control=['if'], data=[]]
return self.number_to_day(int(exp))
def get_format(s):
if '#' in s:
day_of_week_of_month = s[s.find('#') + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {1: _('first'), 2: _('second'), 3: _('third'), 4: _('forth'), 5: _('fifth')}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '') # depends on [control=['try'], data=[]]
except ValueError:
day_of_week_of_month_description = '' # depends on [control=['except'], data=[]]
formated = '{}{}{}'.format(_(', on the '), day_of_week_of_month_description, _(' {0} of the month')) # depends on [control=['if'], data=['s']]
elif 'L' in s:
formated = _(', on the last {0} of the month') # depends on [control=['if'], data=[]]
else:
formated = _(', only on {0}')
return formated
return self.get_segment_description(self._expression_parts[5], _(', every day'), lambda s: get_day_name(s), lambda s: _(', every {0} days of the week').format(s), lambda s: _(', {0} through {1}'), lambda s: get_format(s)) |
def get_all_media(exclude=None):
"""
Get all media from MEDIA_ROOT
"""
if not exclude:
exclude = []
media = set()
for root, dirs, files in os.walk(six.text_type(settings.MEDIA_ROOT)):
for name in files:
path = os.path.abspath(os.path.join(root, name))
relpath = os.path.relpath(path, settings.MEDIA_ROOT)
for e in exclude:
if re.match(r'^%s$' % re.escape(e).replace('\\*', '.*'), relpath):
break
else:
media.add(path)
return media | def function[get_all_media, parameter[exclude]]:
constant[
Get all media from MEDIA_ROOT
]
if <ast.UnaryOp object at 0x7da1b06cc760> begin[:]
variable[exclude] assign[=] list[[]]
variable[media] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b06cdf90>, <ast.Name object at 0x7da1b06ce2f0>, <ast.Name object at 0x7da1b06cd4b0>]]] in starred[call[name[os].walk, parameter[call[name[six].text_type, parameter[name[settings].MEDIA_ROOT]]]]] begin[:]
for taget[name[name]] in starred[name[files]] begin[:]
variable[path] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[name[root], name[name]]]]]
variable[relpath] assign[=] call[name[os].path.relpath, parameter[name[path], name[settings].MEDIA_ROOT]]
for taget[name[e]] in starred[name[exclude]] begin[:]
if call[name[re].match, parameter[binary_operation[constant[^%s$] <ast.Mod object at 0x7da2590d6920> call[call[name[re].escape, parameter[name[e]]].replace, parameter[constant[\*], constant[.*]]]], name[relpath]]] begin[:]
break
return[name[media]] | keyword[def] identifier[get_all_media] ( identifier[exclude] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[exclude] :
identifier[exclude] =[]
identifier[media] = identifier[set] ()
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[six] . identifier[text_type] ( identifier[settings] . identifier[MEDIA_ROOT] )):
keyword[for] identifier[name] keyword[in] identifier[files] :
identifier[path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[name] ))
identifier[relpath] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[path] , identifier[settings] . identifier[MEDIA_ROOT] )
keyword[for] identifier[e] keyword[in] identifier[exclude] :
keyword[if] identifier[re] . identifier[match] ( literal[string] % identifier[re] . identifier[escape] ( identifier[e] ). identifier[replace] ( literal[string] , literal[string] ), identifier[relpath] ):
keyword[break]
keyword[else] :
identifier[media] . identifier[add] ( identifier[path] )
keyword[return] identifier[media] | def get_all_media(exclude=None):
"""
Get all media from MEDIA_ROOT
"""
if not exclude:
exclude = [] # depends on [control=['if'], data=[]]
media = set()
for (root, dirs, files) in os.walk(six.text_type(settings.MEDIA_ROOT)):
for name in files:
path = os.path.abspath(os.path.join(root, name))
relpath = os.path.relpath(path, settings.MEDIA_ROOT)
for e in exclude:
if re.match('^%s$' % re.escape(e).replace('\\*', '.*'), relpath):
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']]
else:
media.add(path) # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=[]]
return media |
def vertical_headers(self, value):
"""
Setter for **self.__vertical_headers** attribute.
:param value: Attribute value.
:type value: OrderedDict
"""
if value is not None:
assert type(value) is OrderedDict, "'{0}' attribute: '{1}' type is not 'OrderedDict'!".format(
"vertical_headers", value)
self.__vertical_headers = value | def function[vertical_headers, parameter[self, value]]:
constant[
Setter for **self.__vertical_headers** attribute.
:param value: Attribute value.
:type value: OrderedDict
]
if compare[name[value] is_not constant[None]] begin[:]
assert[compare[call[name[type], parameter[name[value]]] is name[OrderedDict]]]
name[self].__vertical_headers assign[=] name[value] | keyword[def] identifier[vertical_headers] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[type] ( identifier[value] ) keyword[is] identifier[OrderedDict] , literal[string] . identifier[format] (
literal[string] , identifier[value] )
identifier[self] . identifier[__vertical_headers] = identifier[value] | def vertical_headers(self, value):
"""
Setter for **self.__vertical_headers** attribute.
:param value: Attribute value.
:type value: OrderedDict
"""
if value is not None:
assert type(value) is OrderedDict, "'{0}' attribute: '{1}' type is not 'OrderedDict'!".format('vertical_headers', value) # depends on [control=['if'], data=['value']]
self.__vertical_headers = value |
def posterior_predictive_to_xarray(self):
"""Convert posterior_predictive samples to xarray."""
posterior = self.posterior
posterior_model = self.posterior_model
posterior_predictive = self.posterior_predictive
data = get_draws_stan3(posterior, model=posterior_model, variables=posterior_predictive)
return dict_to_dataset(data, library=self.stan, coords=self.coords, dims=self.dims) | def function[posterior_predictive_to_xarray, parameter[self]]:
constant[Convert posterior_predictive samples to xarray.]
variable[posterior] assign[=] name[self].posterior
variable[posterior_model] assign[=] name[self].posterior_model
variable[posterior_predictive] assign[=] name[self].posterior_predictive
variable[data] assign[=] call[name[get_draws_stan3], parameter[name[posterior]]]
return[call[name[dict_to_dataset], parameter[name[data]]]] | keyword[def] identifier[posterior_predictive_to_xarray] ( identifier[self] ):
literal[string]
identifier[posterior] = identifier[self] . identifier[posterior]
identifier[posterior_model] = identifier[self] . identifier[posterior_model]
identifier[posterior_predictive] = identifier[self] . identifier[posterior_predictive]
identifier[data] = identifier[get_draws_stan3] ( identifier[posterior] , identifier[model] = identifier[posterior_model] , identifier[variables] = identifier[posterior_predictive] )
keyword[return] identifier[dict_to_dataset] ( identifier[data] , identifier[library] = identifier[self] . identifier[stan] , identifier[coords] = identifier[self] . identifier[coords] , identifier[dims] = identifier[self] . identifier[dims] ) | def posterior_predictive_to_xarray(self):
"""Convert posterior_predictive samples to xarray."""
posterior = self.posterior
posterior_model = self.posterior_model
posterior_predictive = self.posterior_predictive
data = get_draws_stan3(posterior, model=posterior_model, variables=posterior_predictive)
return dict_to_dataset(data, library=self.stan, coords=self.coords, dims=self.dims) |
def clean_kwargs(**kwargs):
'''
Return a dict without any of the __pub* keys (or any other keys starting
with a dunder) from the kwargs dict passed into the execution module
functions. These keys are useful for tracking what was used to invoke
the function call, but they may not be desirable to have if passing the
kwargs forward wholesale.
Usage example:
.. code-block:: python
kwargs = __utils__['args.clean_kwargs'](**kwargs)
'''
ret = {}
for key, val in six.iteritems(kwargs):
if not key.startswith('__'):
ret[key] = val
return ret | def function[clean_kwargs, parameter[]]:
constant[
Return a dict without any of the __pub* keys (or any other keys starting
with a dunder) from the kwargs dict passed into the execution module
functions. These keys are useful for tracking what was used to invoke
the function call, but they may not be desirable to have if passing the
kwargs forward wholesale.
Usage example:
.. code-block:: python
kwargs = __utils__['args.clean_kwargs'](**kwargs)
]
variable[ret] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c6e7460>, <ast.Name object at 0x7da20c6e7df0>]]] in starred[call[name[six].iteritems, parameter[name[kwargs]]]] begin[:]
if <ast.UnaryOp object at 0x7da20c6e7e20> begin[:]
call[name[ret]][name[key]] assign[=] name[val]
return[name[ret]] | keyword[def] identifier[clean_kwargs] (** identifier[kwargs] ):
literal[string]
identifier[ret] ={}
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[six] . identifier[iteritems] ( identifier[kwargs] ):
keyword[if] keyword[not] identifier[key] . identifier[startswith] ( literal[string] ):
identifier[ret] [ identifier[key] ]= identifier[val]
keyword[return] identifier[ret] | def clean_kwargs(**kwargs):
"""
Return a dict without any of the __pub* keys (or any other keys starting
with a dunder) from the kwargs dict passed into the execution module
functions. These keys are useful for tracking what was used to invoke
the function call, but they may not be desirable to have if passing the
kwargs forward wholesale.
Usage example:
.. code-block:: python
kwargs = __utils__['args.clean_kwargs'](**kwargs)
"""
ret = {}
for (key, val) in six.iteritems(kwargs):
if not key.startswith('__'):
ret[key] = val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return ret |
def conversational_mac(self, **kwargs):
"""Enable conversational mac learning on vdx switches
Args:
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-learning. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.conversational_mac()
... output = dev.interface.conversational_mac(get=True)
... output = dev.interface.conversational_mac(delete=True)
"""
callback = kwargs.pop('callback', self._callback)
mac_learning = getattr(self._mac_address_table,
'mac_address_table_learning_mode')
config = mac_learning(learning_mode='conversational')
if kwargs.pop('get', False):
output = callback(config, handler='get_config')
item = output.data.find('.//{*}learning-mode')
if item is not None:
return True
if kwargs.pop('delete', False):
config.find('.//*learning-mode').set('operation', 'delete')
return callback(config) | def function[conversational_mac, parameter[self]]:
constant[Enable conversational mac learning on vdx switches
Args:
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-learning. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.conversational_mac()
... output = dev.interface.conversational_mac(get=True)
... output = dev.interface.conversational_mac(delete=True)
]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
variable[mac_learning] assign[=] call[name[getattr], parameter[name[self]._mac_address_table, constant[mac_address_table_learning_mode]]]
variable[config] assign[=] call[name[mac_learning], parameter[]]
if call[name[kwargs].pop, parameter[constant[get], constant[False]]] begin[:]
variable[output] assign[=] call[name[callback], parameter[name[config]]]
variable[item] assign[=] call[name[output].data.find, parameter[constant[.//{*}learning-mode]]]
if compare[name[item] is_not constant[None]] begin[:]
return[constant[True]]
if call[name[kwargs].pop, parameter[constant[delete], constant[False]]] begin[:]
call[call[name[config].find, parameter[constant[.//*learning-mode]]].set, parameter[constant[operation], constant[delete]]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[conversational_mac] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
identifier[mac_learning] = identifier[getattr] ( identifier[self] . identifier[_mac_address_table] ,
literal[string] )
identifier[config] = identifier[mac_learning] ( identifier[learning_mode] = literal[string] )
keyword[if] identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ):
identifier[output] = identifier[callback] ( identifier[config] , identifier[handler] = literal[string] )
identifier[item] = identifier[output] . identifier[data] . identifier[find] ( literal[string] )
keyword[if] identifier[item] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[True]
keyword[if] identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ):
identifier[config] . identifier[find] ( literal[string] ). identifier[set] ( literal[string] , literal[string] )
keyword[return] identifier[callback] ( identifier[config] ) | def conversational_mac(self, **kwargs):
"""Enable conversational mac learning on vdx switches
Args:
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-learning. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.conversational_mac()
... output = dev.interface.conversational_mac(get=True)
... output = dev.interface.conversational_mac(delete=True)
"""
callback = kwargs.pop('callback', self._callback)
mac_learning = getattr(self._mac_address_table, 'mac_address_table_learning_mode')
config = mac_learning(learning_mode='conversational')
if kwargs.pop('get', False):
output = callback(config, handler='get_config')
item = output.data.find('.//{*}learning-mode')
if item is not None:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if kwargs.pop('delete', False):
config.find('.//*learning-mode').set('operation', 'delete') # depends on [control=['if'], data=[]]
return callback(config) |
def _validation_error(prop, prop_type, prop_value, expected):
""" Default validation for updated properties """
if prop_type is None:
attrib = 'value'
assigned = prop_value
else:
attrib = 'type'
assigned = prop_type
raise ValidationError(
'Invalid property {attrib} for {prop}:\n\t{attrib}: {assigned}\n\texpected: {expected}',
attrib=attrib, prop=prop, assigned=assigned, expected=expected,
invalid={prop: prop_value} if attrib == 'value' else {}
) | def function[_validation_error, parameter[prop, prop_type, prop_value, expected]]:
constant[ Default validation for updated properties ]
if compare[name[prop_type] is constant[None]] begin[:]
variable[attrib] assign[=] constant[value]
variable[assigned] assign[=] name[prop_value]
<ast.Raise object at 0x7da20c6a94e0> | keyword[def] identifier[_validation_error] ( identifier[prop] , identifier[prop_type] , identifier[prop_value] , identifier[expected] ):
literal[string]
keyword[if] identifier[prop_type] keyword[is] keyword[None] :
identifier[attrib] = literal[string]
identifier[assigned] = identifier[prop_value]
keyword[else] :
identifier[attrib] = literal[string]
identifier[assigned] = identifier[prop_type]
keyword[raise] identifier[ValidationError] (
literal[string] ,
identifier[attrib] = identifier[attrib] , identifier[prop] = identifier[prop] , identifier[assigned] = identifier[assigned] , identifier[expected] = identifier[expected] ,
identifier[invalid] ={ identifier[prop] : identifier[prop_value] } keyword[if] identifier[attrib] == literal[string] keyword[else] {}
) | def _validation_error(prop, prop_type, prop_value, expected):
""" Default validation for updated properties """
if prop_type is None:
attrib = 'value'
assigned = prop_value # depends on [control=['if'], data=[]]
else:
attrib = 'type'
assigned = prop_type
raise ValidationError('Invalid property {attrib} for {prop}:\n\t{attrib}: {assigned}\n\texpected: {expected}', attrib=attrib, prop=prop, assigned=assigned, expected=expected, invalid={prop: prop_value} if attrib == 'value' else {}) |
def Collect(
self, knowledge_base, artifact_definition, searcher):
"""Collects values using a Windows Registry value artifact definition.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
artifact_definition (artifacts.ArtifactDefinition): artifact definition.
searcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to
preprocess the Windows Registry.
Raises:
PreProcessFail: if the Windows Registry key or value cannot be read.
"""
for source in artifact_definition.sources:
if source.type_indicator not in (
artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY,
artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
continue
if source.type_indicator == (
artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
key_value_pairs = [{'key': key} for key in source.keys]
else:
key_value_pairs = source.key_value_pairs
for key_value_pair in key_value_pairs:
key_path = key_value_pair['key']
# The artifact definitions currently incorrectly define
# CurrentControlSet so we correct it here for now.
# Also see: https://github.com/ForensicArtifacts/artifacts/issues/120
key_path_upper = key_path.upper()
if key_path_upper.startswith('%%CURRENT_CONTROL_SET%%'):
key_path = '{0:s}{1:s}'.format(
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet', key_path[23:])
find_spec = registry_searcher.FindSpec(key_path_glob=key_path)
for key_path in searcher.Find(find_specs=[find_spec]):
try:
registry_key = searcher.GetKeyByPath(key_path)
except IOError as exception:
raise errors.PreProcessFail((
'Unable to retrieve Windows Registry key: {0:s} with error: '
'{1!s}').format(key_path, exception))
if registry_key:
value_name = key_value_pair.get('value', None)
self._ParseKey(knowledge_base, registry_key, value_name) | def function[Collect, parameter[self, knowledge_base, artifact_definition, searcher]]:
constant[Collects values using a Windows Registry value artifact definition.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
artifact_definition (artifacts.ArtifactDefinition): artifact definition.
searcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to
preprocess the Windows Registry.
Raises:
PreProcessFail: if the Windows Registry key or value cannot be read.
]
for taget[name[source]] in starred[name[artifact_definition].sources] begin[:]
if compare[name[source].type_indicator <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Attribute object at 0x7da20c795f00>, <ast.Attribute object at 0x7da20c7964d0>]]] begin[:]
continue
if compare[name[source].type_indicator equal[==] name[artifact_definitions].TYPE_INDICATOR_WINDOWS_REGISTRY_KEY] begin[:]
variable[key_value_pairs] assign[=] <ast.ListComp object at 0x7da20c794d30>
for taget[name[key_value_pair]] in starred[name[key_value_pairs]] begin[:]
variable[key_path] assign[=] call[name[key_value_pair]][constant[key]]
variable[key_path_upper] assign[=] call[name[key_path].upper, parameter[]]
if call[name[key_path_upper].startswith, parameter[constant[%%CURRENT_CONTROL_SET%%]]] begin[:]
variable[key_path] assign[=] call[constant[{0:s}{1:s}].format, parameter[constant[HKEY_LOCAL_MACHINE\System\CurrentControlSet], call[name[key_path]][<ast.Slice object at 0x7da207f03a30>]]]
variable[find_spec] assign[=] call[name[registry_searcher].FindSpec, parameter[]]
for taget[name[key_path]] in starred[call[name[searcher].Find, parameter[]]] begin[:]
<ast.Try object at 0x7da207f02ec0>
if name[registry_key] begin[:]
variable[value_name] assign[=] call[name[key_value_pair].get, parameter[constant[value], constant[None]]]
call[name[self]._ParseKey, parameter[name[knowledge_base], name[registry_key], name[value_name]]] | keyword[def] identifier[Collect] (
identifier[self] , identifier[knowledge_base] , identifier[artifact_definition] , identifier[searcher] ):
literal[string]
keyword[for] identifier[source] keyword[in] identifier[artifact_definition] . identifier[sources] :
keyword[if] identifier[source] . identifier[type_indicator] keyword[not] keyword[in] (
identifier[artifact_definitions] . identifier[TYPE_INDICATOR_WINDOWS_REGISTRY_KEY] ,
identifier[artifact_definitions] . identifier[TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE] ):
keyword[continue]
keyword[if] identifier[source] . identifier[type_indicator] ==(
identifier[artifact_definitions] . identifier[TYPE_INDICATOR_WINDOWS_REGISTRY_KEY] ):
identifier[key_value_pairs] =[{ literal[string] : identifier[key] } keyword[for] identifier[key] keyword[in] identifier[source] . identifier[keys] ]
keyword[else] :
identifier[key_value_pairs] = identifier[source] . identifier[key_value_pairs]
keyword[for] identifier[key_value_pair] keyword[in] identifier[key_value_pairs] :
identifier[key_path] = identifier[key_value_pair] [ literal[string] ]
identifier[key_path_upper] = identifier[key_path] . identifier[upper] ()
keyword[if] identifier[key_path_upper] . identifier[startswith] ( literal[string] ):
identifier[key_path] = literal[string] . identifier[format] (
literal[string] , identifier[key_path] [ literal[int] :])
identifier[find_spec] = identifier[registry_searcher] . identifier[FindSpec] ( identifier[key_path_glob] = identifier[key_path] )
keyword[for] identifier[key_path] keyword[in] identifier[searcher] . identifier[Find] ( identifier[find_specs] =[ identifier[find_spec] ]):
keyword[try] :
identifier[registry_key] = identifier[searcher] . identifier[GetKeyByPath] ( identifier[key_path] )
keyword[except] identifier[IOError] keyword[as] identifier[exception] :
keyword[raise] identifier[errors] . identifier[PreProcessFail] ((
literal[string]
literal[string] ). identifier[format] ( identifier[key_path] , identifier[exception] ))
keyword[if] identifier[registry_key] :
identifier[value_name] = identifier[key_value_pair] . identifier[get] ( literal[string] , keyword[None] )
identifier[self] . identifier[_ParseKey] ( identifier[knowledge_base] , identifier[registry_key] , identifier[value_name] ) | def Collect(self, knowledge_base, artifact_definition, searcher):
"""Collects values using a Windows Registry value artifact definition.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
artifact_definition (artifacts.ArtifactDefinition): artifact definition.
searcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to
preprocess the Windows Registry.
Raises:
PreProcessFail: if the Windows Registry key or value cannot be read.
"""
for source in artifact_definition.sources:
if source.type_indicator not in (artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY, artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
continue # depends on [control=['if'], data=[]]
if source.type_indicator == artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY:
key_value_pairs = [{'key': key} for key in source.keys] # depends on [control=['if'], data=[]]
else:
key_value_pairs = source.key_value_pairs
for key_value_pair in key_value_pairs:
key_path = key_value_pair['key']
# The artifact definitions currently incorrectly define
# CurrentControlSet so we correct it here for now.
# Also see: https://github.com/ForensicArtifacts/artifacts/issues/120
key_path_upper = key_path.upper()
if key_path_upper.startswith('%%CURRENT_CONTROL_SET%%'):
key_path = '{0:s}{1:s}'.format('HKEY_LOCAL_MACHINE\\System\\CurrentControlSet', key_path[23:]) # depends on [control=['if'], data=[]]
find_spec = registry_searcher.FindSpec(key_path_glob=key_path)
for key_path in searcher.Find(find_specs=[find_spec]):
try:
registry_key = searcher.GetKeyByPath(key_path) # depends on [control=['try'], data=[]]
except IOError as exception:
raise errors.PreProcessFail('Unable to retrieve Windows Registry key: {0:s} with error: {1!s}'.format(key_path, exception)) # depends on [control=['except'], data=['exception']]
if registry_key:
value_name = key_value_pair.get('value', None)
self._ParseKey(knowledge_base, registry_key, value_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key_path']] # depends on [control=['for'], data=['key_value_pair']] # depends on [control=['for'], data=['source']] |
def get_batches(qp_pairs, batch_size, need_sort=True):
'''
Get batches data and shuffle.
'''
if need_sort:
qp_pairs = sorted(qp_pairs, key=lambda qp: (
len(qp['passage_tokens']), qp['id']), reverse=True)
batches = [{'qp_pairs': qp_pairs[i:(i + batch_size)]}
for i in range(0, len(qp_pairs), batch_size)]
shuffle(batches)
return batches | def function[get_batches, parameter[qp_pairs, batch_size, need_sort]]:
constant[
Get batches data and shuffle.
]
if name[need_sort] begin[:]
variable[qp_pairs] assign[=] call[name[sorted], parameter[name[qp_pairs]]]
variable[batches] assign[=] <ast.ListComp object at 0x7da18fe92e00>
call[name[shuffle], parameter[name[batches]]]
return[name[batches]] | keyword[def] identifier[get_batches] ( identifier[qp_pairs] , identifier[batch_size] , identifier[need_sort] = keyword[True] ):
literal[string]
keyword[if] identifier[need_sort] :
identifier[qp_pairs] = identifier[sorted] ( identifier[qp_pairs] , identifier[key] = keyword[lambda] identifier[qp] :(
identifier[len] ( identifier[qp] [ literal[string] ]), identifier[qp] [ literal[string] ]), identifier[reverse] = keyword[True] )
identifier[batches] =[{ literal[string] : identifier[qp_pairs] [ identifier[i] :( identifier[i] + identifier[batch_size] )]}
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[qp_pairs] ), identifier[batch_size] )]
identifier[shuffle] ( identifier[batches] )
keyword[return] identifier[batches] | def get_batches(qp_pairs, batch_size, need_sort=True):
"""
Get batches data and shuffle.
"""
if need_sort:
qp_pairs = sorted(qp_pairs, key=lambda qp: (len(qp['passage_tokens']), qp['id']), reverse=True) # depends on [control=['if'], data=[]]
batches = [{'qp_pairs': qp_pairs[i:i + batch_size]} for i in range(0, len(qp_pairs), batch_size)]
shuffle(batches)
return batches |
def __check_command_completion(self, testsemicolon=True):
"""Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise.
"""
if not self.__curcommand.iscomplete():
return True
ctype = self.__curcommand.get_type()
if ctype == "action" or \
(ctype == "control" and
not self.__curcommand.accept_children):
if testsemicolon:
self.__set_expected("semicolon")
return True
while self.__curcommand.parent:
cmd = self.__curcommand
self.__curcommand = self.__curcommand.parent
if self.__curcommand.get_type() in ["control", "test"]:
if self.__curcommand.iscomplete():
if self.__curcommand.get_type() == "control":
break
continue
if not self.__curcommand.check_next_arg("test", cmd, add=False):
return False
if not self.__curcommand.iscomplete():
if self.__curcommand.variable_args_nb:
self.__set_expected("comma", "right_parenthesis")
break
return True | def function[__check_command_completion, parameter[self, testsemicolon]]:
constant[Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise.
]
if <ast.UnaryOp object at 0x7da2043456f0> begin[:]
return[constant[True]]
variable[ctype] assign[=] call[name[self].__curcommand.get_type, parameter[]]
if <ast.BoolOp object at 0x7da204346b60> begin[:]
if name[testsemicolon] begin[:]
call[name[self].__set_expected, parameter[constant[semicolon]]]
return[constant[True]]
while name[self].__curcommand.parent begin[:]
variable[cmd] assign[=] name[self].__curcommand
name[self].__curcommand assign[=] name[self].__curcommand.parent
if compare[call[name[self].__curcommand.get_type, parameter[]] in list[[<ast.Constant object at 0x7da18f00f2e0>, <ast.Constant object at 0x7da18f00da20>]]] begin[:]
if call[name[self].__curcommand.iscomplete, parameter[]] begin[:]
if compare[call[name[self].__curcommand.get_type, parameter[]] equal[==] constant[control]] begin[:]
break
continue
if <ast.UnaryOp object at 0x7da18f723160> begin[:]
return[constant[False]]
if <ast.UnaryOp object at 0x7da18f720760> begin[:]
if name[self].__curcommand.variable_args_nb begin[:]
call[name[self].__set_expected, parameter[constant[comma], constant[right_parenthesis]]]
break
return[constant[True]] | keyword[def] identifier[__check_command_completion] ( identifier[self] , identifier[testsemicolon] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__curcommand] . identifier[iscomplete] ():
keyword[return] keyword[True]
identifier[ctype] = identifier[self] . identifier[__curcommand] . identifier[get_type] ()
keyword[if] identifier[ctype] == literal[string] keyword[or] ( identifier[ctype] == literal[string] keyword[and]
keyword[not] identifier[self] . identifier[__curcommand] . identifier[accept_children] ):
keyword[if] identifier[testsemicolon] :
identifier[self] . identifier[__set_expected] ( literal[string] )
keyword[return] keyword[True]
keyword[while] identifier[self] . identifier[__curcommand] . identifier[parent] :
identifier[cmd] = identifier[self] . identifier[__curcommand]
identifier[self] . identifier[__curcommand] = identifier[self] . identifier[__curcommand] . identifier[parent]
keyword[if] identifier[self] . identifier[__curcommand] . identifier[get_type] () keyword[in] [ literal[string] , literal[string] ]:
keyword[if] identifier[self] . identifier[__curcommand] . identifier[iscomplete] ():
keyword[if] identifier[self] . identifier[__curcommand] . identifier[get_type] ()== literal[string] :
keyword[break]
keyword[continue]
keyword[if] keyword[not] identifier[self] . identifier[__curcommand] . identifier[check_next_arg] ( literal[string] , identifier[cmd] , identifier[add] = keyword[False] ):
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[self] . identifier[__curcommand] . identifier[iscomplete] ():
keyword[if] identifier[self] . identifier[__curcommand] . identifier[variable_args_nb] :
identifier[self] . identifier[__set_expected] ( literal[string] , literal[string] )
keyword[break]
keyword[return] keyword[True] | def __check_command_completion(self, testsemicolon=True):
"""Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise.
"""
if not self.__curcommand.iscomplete():
return True # depends on [control=['if'], data=[]]
ctype = self.__curcommand.get_type()
if ctype == 'action' or (ctype == 'control' and (not self.__curcommand.accept_children)):
if testsemicolon:
self.__set_expected('semicolon') # depends on [control=['if'], data=[]]
return True # depends on [control=['if'], data=[]]
while self.__curcommand.parent:
cmd = self.__curcommand
self.__curcommand = self.__curcommand.parent
if self.__curcommand.get_type() in ['control', 'test']:
if self.__curcommand.iscomplete():
if self.__curcommand.get_type() == 'control':
break # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
if not self.__curcommand.check_next_arg('test', cmd, add=False):
return False # depends on [control=['if'], data=[]]
if not self.__curcommand.iscomplete():
if self.__curcommand.variable_args_nb:
self.__set_expected('comma', 'right_parenthesis') # depends on [control=['if'], data=[]]
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return True |
def assert_equivalent(o1, o2):
'''Asserts that o1 and o2 are distinct, yet equivalent objects
'''
if not (isinstance(o1, type) and isinstance(o2, type)):
assert o1 is not o2
assert o1 == o2
assert o2 == o1 | def function[assert_equivalent, parameter[o1, o2]]:
constant[Asserts that o1 and o2 are distinct, yet equivalent objects
]
if <ast.UnaryOp object at 0x7da1b1302800> begin[:]
assert[compare[name[o1] is_not name[o2]]]
assert[compare[name[o1] equal[==] name[o2]]]
assert[compare[name[o2] equal[==] name[o1]]] | keyword[def] identifier[assert_equivalent] ( identifier[o1] , identifier[o2] ):
literal[string]
keyword[if] keyword[not] ( identifier[isinstance] ( identifier[o1] , identifier[type] ) keyword[and] identifier[isinstance] ( identifier[o2] , identifier[type] )):
keyword[assert] identifier[o1] keyword[is] keyword[not] identifier[o2]
keyword[assert] identifier[o1] == identifier[o2]
keyword[assert] identifier[o2] == identifier[o1] | def assert_equivalent(o1, o2):
"""Asserts that o1 and o2 are distinct, yet equivalent objects
"""
if not (isinstance(o1, type) and isinstance(o2, type)):
assert o1 is not o2 # depends on [control=['if'], data=[]]
assert o1 == o2
assert o2 == o1 |
def choose_connect_args(metadata, config):
"""
Choose the SSL mode and optional root cert for the connection.
"""
if not config.require_ssl and not config.verify_ssl:
return dict(
sslmode="prefer",
)
if config.require_ssl and not config.verify_ssl:
return dict(
sslmode="require",
)
if not config.ssl_cert_path:
raise Exception("SSL certificate path (`ssl_cert_path`) must be configured for verification")
return dict(
sslmode="verify-full",
sslrootcert=config.ssl_cert_path,
) | def function[choose_connect_args, parameter[metadata, config]]:
constant[
Choose the SSL mode and optional root cert for the connection.
]
if <ast.BoolOp object at 0x7da1b0c3d600> begin[:]
return[call[name[dict], parameter[]]]
if <ast.BoolOp object at 0x7da1b0c3f970> begin[:]
return[call[name[dict], parameter[]]]
if <ast.UnaryOp object at 0x7da1b0c3e410> begin[:]
<ast.Raise object at 0x7da1b0c3f460>
return[call[name[dict], parameter[]]] | keyword[def] identifier[choose_connect_args] ( identifier[metadata] , identifier[config] ):
literal[string]
keyword[if] keyword[not] identifier[config] . identifier[require_ssl] keyword[and] keyword[not] identifier[config] . identifier[verify_ssl] :
keyword[return] identifier[dict] (
identifier[sslmode] = literal[string] ,
)
keyword[if] identifier[config] . identifier[require_ssl] keyword[and] keyword[not] identifier[config] . identifier[verify_ssl] :
keyword[return] identifier[dict] (
identifier[sslmode] = literal[string] ,
)
keyword[if] keyword[not] identifier[config] . identifier[ssl_cert_path] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[dict] (
identifier[sslmode] = literal[string] ,
identifier[sslrootcert] = identifier[config] . identifier[ssl_cert_path] ,
) | def choose_connect_args(metadata, config):
"""
Choose the SSL mode and optional root cert for the connection.
"""
if not config.require_ssl and (not config.verify_ssl):
return dict(sslmode='prefer') # depends on [control=['if'], data=[]]
if config.require_ssl and (not config.verify_ssl):
return dict(sslmode='require') # depends on [control=['if'], data=[]]
if not config.ssl_cert_path:
raise Exception('SSL certificate path (`ssl_cert_path`) must be configured for verification') # depends on [control=['if'], data=[]]
return dict(sslmode='verify-full', sslrootcert=config.ssl_cert_path) |
def start(self):
"""Open sockets to the server and start threads"""
if not self.writeThread.isAlive() and not self.readThread.isAlive():
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect(self.ADDR)
self.running = True
self.writeThread.start()
self.readThread.start() | def function[start, parameter[self]]:
constant[Open sockets to the server and start threads]
if <ast.BoolOp object at 0x7da18c4cc9d0> begin[:]
name[self].client assign[=] call[name[socket].socket, parameter[name[socket].AF_INET, name[socket].SOCK_STREAM]]
call[name[self].client.connect, parameter[name[self].ADDR]]
name[self].running assign[=] constant[True]
call[name[self].writeThread.start, parameter[]]
call[name[self].readThread.start, parameter[]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[writeThread] . identifier[isAlive] () keyword[and] keyword[not] identifier[self] . identifier[readThread] . identifier[isAlive] ():
identifier[self] . identifier[client] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_STREAM] )
identifier[self] . identifier[client] . identifier[connect] ( identifier[self] . identifier[ADDR] )
identifier[self] . identifier[running] = keyword[True]
identifier[self] . identifier[writeThread] . identifier[start] ()
identifier[self] . identifier[readThread] . identifier[start] () | def start(self):
"""Open sockets to the server and start threads"""
if not self.writeThread.isAlive() and (not self.readThread.isAlive()):
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect(self.ADDR)
self.running = True
self.writeThread.start()
self.readThread.start() # depends on [control=['if'], data=[]] |
def computeEnvelope(self, placeCode):
"""
Compute an envelope for use in suppressing border cells.
:param placeCode: The place code representing the population the envelope
will be used for.
:return: A numpy array that can be elementwise-multiplied with activations
for the given cell population to apply the envelope.
"""
places = np.abs(placeCode - 0.5)
envelope = [1 if p < 1 - self.envelopeWidth else
np.exp(-1.*self.envelopeFactor *
((p - 1 + self.envelopeWidth)/self.envelopeWidth)**2)
for p in places]
return np.asarray(envelope) | def function[computeEnvelope, parameter[self, placeCode]]:
constant[
Compute an envelope for use in suppressing border cells.
:param placeCode: The place code representing the population the envelope
will be used for.
:return: A numpy array that can be elementwise-multiplied with activations
for the given cell population to apply the envelope.
]
variable[places] assign[=] call[name[np].abs, parameter[binary_operation[name[placeCode] - constant[0.5]]]]
variable[envelope] assign[=] <ast.ListComp object at 0x7da1b0902e00>
return[call[name[np].asarray, parameter[name[envelope]]]] | keyword[def] identifier[computeEnvelope] ( identifier[self] , identifier[placeCode] ):
literal[string]
identifier[places] = identifier[np] . identifier[abs] ( identifier[placeCode] - literal[int] )
identifier[envelope] =[ literal[int] keyword[if] identifier[p] < literal[int] - identifier[self] . identifier[envelopeWidth] keyword[else]
identifier[np] . identifier[exp] (- literal[int] * identifier[self] . identifier[envelopeFactor] *
(( identifier[p] - literal[int] + identifier[self] . identifier[envelopeWidth] )/ identifier[self] . identifier[envelopeWidth] )** literal[int] )
keyword[for] identifier[p] keyword[in] identifier[places] ]
keyword[return] identifier[np] . identifier[asarray] ( identifier[envelope] ) | def computeEnvelope(self, placeCode):
"""
Compute an envelope for use in suppressing border cells.
:param placeCode: The place code representing the population the envelope
will be used for.
:return: A numpy array that can be elementwise-multiplied with activations
for the given cell population to apply the envelope.
"""
places = np.abs(placeCode - 0.5)
envelope = [1 if p < 1 - self.envelopeWidth else np.exp(-1.0 * self.envelopeFactor * ((p - 1 + self.envelopeWidth) / self.envelopeWidth) ** 2) for p in places]
return np.asarray(envelope) |
def phon(self, cls = 'current', previousdelimiter="",strict=False, correctionhandling=CorrectionHandling.CURRENT):
"""See :meth:`AbstractElement.phon`"""
if cls == 'original': correctionhandling = CorrectionHandling.ORIGINAL #backward compatibility
if correctionhandling in (CorrectionHandling.CURRENT, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, New) or isinstance(e, Current):
return previousdelimiter + e.phon(cls, "", strict, correctionhandling)
if correctionhandling in (CorrectionHandling.ORIGINAL, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, Original):
return previousdelimiter + e.phon(cls, "", correctionhandling)
raise NoSuchPhon | def function[phon, parameter[self, cls, previousdelimiter, strict, correctionhandling]]:
constant[See :meth:`AbstractElement.phon`]
if compare[name[cls] equal[==] constant[original]] begin[:]
variable[correctionhandling] assign[=] name[CorrectionHandling].ORIGINAL
if compare[name[correctionhandling] in tuple[[<ast.Attribute object at 0x7da204346350>, <ast.Attribute object at 0x7da204344940>]]] begin[:]
for taget[name[e]] in starred[name[self]] begin[:]
if <ast.BoolOp object at 0x7da204345ae0> begin[:]
return[binary_operation[name[previousdelimiter] + call[name[e].phon, parameter[name[cls], constant[], name[strict], name[correctionhandling]]]]]
if compare[name[correctionhandling] in tuple[[<ast.Attribute object at 0x7da204346b90>, <ast.Attribute object at 0x7da2043445e0>]]] begin[:]
for taget[name[e]] in starred[name[self]] begin[:]
if call[name[isinstance], parameter[name[e], name[Original]]] begin[:]
return[binary_operation[name[previousdelimiter] + call[name[e].phon, parameter[name[cls], constant[], name[correctionhandling]]]]]
<ast.Raise object at 0x7da204346f80> | keyword[def] identifier[phon] ( identifier[self] , identifier[cls] = literal[string] , identifier[previousdelimiter] = literal[string] , identifier[strict] = keyword[False] , identifier[correctionhandling] = identifier[CorrectionHandling] . identifier[CURRENT] ):
literal[string]
keyword[if] identifier[cls] == literal[string] : identifier[correctionhandling] = identifier[CorrectionHandling] . identifier[ORIGINAL]
keyword[if] identifier[correctionhandling] keyword[in] ( identifier[CorrectionHandling] . identifier[CURRENT] , identifier[CorrectionHandling] . identifier[EITHER] ):
keyword[for] identifier[e] keyword[in] identifier[self] :
keyword[if] identifier[isinstance] ( identifier[e] , identifier[New] ) keyword[or] identifier[isinstance] ( identifier[e] , identifier[Current] ):
keyword[return] identifier[previousdelimiter] + identifier[e] . identifier[phon] ( identifier[cls] , literal[string] , identifier[strict] , identifier[correctionhandling] )
keyword[if] identifier[correctionhandling] keyword[in] ( identifier[CorrectionHandling] . identifier[ORIGINAL] , identifier[CorrectionHandling] . identifier[EITHER] ):
keyword[for] identifier[e] keyword[in] identifier[self] :
keyword[if] identifier[isinstance] ( identifier[e] , identifier[Original] ):
keyword[return] identifier[previousdelimiter] + identifier[e] . identifier[phon] ( identifier[cls] , literal[string] , identifier[correctionhandling] )
keyword[raise] identifier[NoSuchPhon] | def phon(self, cls='current', previousdelimiter='', strict=False, correctionhandling=CorrectionHandling.CURRENT):
"""See :meth:`AbstractElement.phon`"""
if cls == 'original':
correctionhandling = CorrectionHandling.ORIGINAL #backward compatibility # depends on [control=['if'], data=[]]
if correctionhandling in (CorrectionHandling.CURRENT, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, New) or isinstance(e, Current):
return previousdelimiter + e.phon(cls, '', strict, correctionhandling) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']] # depends on [control=['if'], data=['correctionhandling']]
if correctionhandling in (CorrectionHandling.ORIGINAL, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, Original):
return previousdelimiter + e.phon(cls, '', correctionhandling) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']] # depends on [control=['if'], data=['correctionhandling']]
raise NoSuchPhon |
def write_flash(self, addr, page_buffer, target_page, page_count):
"""Initiate flashing of data in the buffer to flash."""
# print "Write page", flashPage
# print "Writing page [%d] and [%d] forward" % (flashPage, nPage)
pk = None
# Flushing downlink ...
pk = self.link.receive_packet(0)
while pk is not None:
pk = self.link.receive_packet(0)
retry_counter = 5
# print "Flasing to 0x{:X}".format(addr)
while ((not pk or pk.header != 0xFF or
struct.unpack('<BB', pk.data[0:2]) != (addr, 0x18)) and
retry_counter >= 0):
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = struct.pack('<BBHHH', addr, 0x18, page_buffer,
target_page, page_count)
self.link.send_packet(pk)
pk = self.link.receive_packet(1)
retry_counter -= 1
if retry_counter < 0:
self.error_code = -1
return False
self.error_code = pk.data[3]
return pk.data[2] == 1 | def function[write_flash, parameter[self, addr, page_buffer, target_page, page_count]]:
constant[Initiate flashing of data in the buffer to flash.]
variable[pk] assign[=] constant[None]
variable[pk] assign[=] call[name[self].link.receive_packet, parameter[constant[0]]]
while compare[name[pk] is_not constant[None]] begin[:]
variable[pk] assign[=] call[name[self].link.receive_packet, parameter[constant[0]]]
variable[retry_counter] assign[=] constant[5]
while <ast.BoolOp object at 0x7da1b16636a0> begin[:]
variable[pk] assign[=] call[name[CRTPPacket], parameter[]]
call[name[pk].set_header, parameter[constant[255], constant[255]]]
name[pk].data assign[=] call[name[struct].pack, parameter[constant[<BBHHH], name[addr], constant[24], name[page_buffer], name[target_page], name[page_count]]]
call[name[self].link.send_packet, parameter[name[pk]]]
variable[pk] assign[=] call[name[self].link.receive_packet, parameter[constant[1]]]
<ast.AugAssign object at 0x7da1b16633a0>
if compare[name[retry_counter] less[<] constant[0]] begin[:]
name[self].error_code assign[=] <ast.UnaryOp object at 0x7da1b16636d0>
return[constant[False]]
name[self].error_code assign[=] call[name[pk].data][constant[3]]
return[compare[call[name[pk].data][constant[2]] equal[==] constant[1]]] | keyword[def] identifier[write_flash] ( identifier[self] , identifier[addr] , identifier[page_buffer] , identifier[target_page] , identifier[page_count] ):
literal[string]
identifier[pk] = keyword[None]
identifier[pk] = identifier[self] . identifier[link] . identifier[receive_packet] ( literal[int] )
keyword[while] identifier[pk] keyword[is] keyword[not] keyword[None] :
identifier[pk] = identifier[self] . identifier[link] . identifier[receive_packet] ( literal[int] )
identifier[retry_counter] = literal[int]
keyword[while] (( keyword[not] identifier[pk] keyword[or] identifier[pk] . identifier[header] != literal[int] keyword[or]
identifier[struct] . identifier[unpack] ( literal[string] , identifier[pk] . identifier[data] [ literal[int] : literal[int] ])!=( identifier[addr] , literal[int] )) keyword[and]
identifier[retry_counter] >= literal[int] ):
identifier[pk] = identifier[CRTPPacket] ()
identifier[pk] . identifier[set_header] ( literal[int] , literal[int] )
identifier[pk] . identifier[data] = identifier[struct] . identifier[pack] ( literal[string] , identifier[addr] , literal[int] , identifier[page_buffer] ,
identifier[target_page] , identifier[page_count] )
identifier[self] . identifier[link] . identifier[send_packet] ( identifier[pk] )
identifier[pk] = identifier[self] . identifier[link] . identifier[receive_packet] ( literal[int] )
identifier[retry_counter] -= literal[int]
keyword[if] identifier[retry_counter] < literal[int] :
identifier[self] . identifier[error_code] =- literal[int]
keyword[return] keyword[False]
identifier[self] . identifier[error_code] = identifier[pk] . identifier[data] [ literal[int] ]
keyword[return] identifier[pk] . identifier[data] [ literal[int] ]== literal[int] | def write_flash(self, addr, page_buffer, target_page, page_count):
"""Initiate flashing of data in the buffer to flash."""
# print "Write page", flashPage
# print "Writing page [%d] and [%d] forward" % (flashPage, nPage)
pk = None
# Flushing downlink ...
pk = self.link.receive_packet(0)
while pk is not None:
pk = self.link.receive_packet(0) # depends on [control=['while'], data=['pk']]
retry_counter = 5
# print "Flasing to 0x{:X}".format(addr)
while (not pk or pk.header != 255 or struct.unpack('<BB', pk.data[0:2]) != (addr, 24)) and retry_counter >= 0:
pk = CRTPPacket()
pk.set_header(255, 255)
pk.data = struct.pack('<BBHHH', addr, 24, page_buffer, target_page, page_count)
self.link.send_packet(pk)
pk = self.link.receive_packet(1)
retry_counter -= 1 # depends on [control=['while'], data=[]]
if retry_counter < 0:
self.error_code = -1
return False # depends on [control=['if'], data=[]]
self.error_code = pk.data[3]
return pk.data[2] == 1 |
def cleanup(self):
'''
remove sockets on shutdown
'''
log.debug('ConCache cleaning up')
if os.path.exists(self.cache_sock):
os.remove(self.cache_sock)
if os.path.exists(self.update_sock):
os.remove(self.update_sock)
if os.path.exists(self.upd_t_sock):
os.remove(self.upd_t_sock) | def function[cleanup, parameter[self]]:
constant[
remove sockets on shutdown
]
call[name[log].debug, parameter[constant[ConCache cleaning up]]]
if call[name[os].path.exists, parameter[name[self].cache_sock]] begin[:]
call[name[os].remove, parameter[name[self].cache_sock]]
if call[name[os].path.exists, parameter[name[self].update_sock]] begin[:]
call[name[os].remove, parameter[name[self].update_sock]]
if call[name[os].path.exists, parameter[name[self].upd_t_sock]] begin[:]
call[name[os].remove, parameter[name[self].upd_t_sock]] | keyword[def] identifier[cleanup] ( identifier[self] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[cache_sock] ):
identifier[os] . identifier[remove] ( identifier[self] . identifier[cache_sock] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[update_sock] ):
identifier[os] . identifier[remove] ( identifier[self] . identifier[update_sock] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[upd_t_sock] ):
identifier[os] . identifier[remove] ( identifier[self] . identifier[upd_t_sock] ) | def cleanup(self):
"""
remove sockets on shutdown
"""
log.debug('ConCache cleaning up')
if os.path.exists(self.cache_sock):
os.remove(self.cache_sock) # depends on [control=['if'], data=[]]
if os.path.exists(self.update_sock):
os.remove(self.update_sock) # depends on [control=['if'], data=[]]
if os.path.exists(self.upd_t_sock):
os.remove(self.upd_t_sock) # depends on [control=['if'], data=[]] |
def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(dna_kernel_size):
for ykern in range(dna_kernel_size):
inputs.append(
tf.expand_dims(
tf.slice(prev_image_pad, [0, xkern, ykern, 0],
[-1, image_height, image_width, -1]), [3]))
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - relu_shift) + relu_shift
kernel = tf.expand_dims(
kernel / tf.reduce_sum(kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False) | def function[dna_transformation, parameter[prev_image, dna_input, dna_kernel_size, relu_shift]]:
constant[Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
]
variable[prev_image_pad] assign[=] call[name[tf].pad, parameter[name[prev_image], list[[<ast.List object at 0x7da1b20051e0>, <ast.List object at 0x7da1b2004850>, <ast.List object at 0x7da1b20050f0>, <ast.List object at 0x7da1b20051b0>]]]]
variable[image_height] assign[=] call[name[int], parameter[call[call[name[prev_image].get_shape, parameter[]]][constant[1]]]]
variable[image_width] assign[=] call[name[int], parameter[call[call[name[prev_image].get_shape, parameter[]]][constant[2]]]]
variable[inputs] assign[=] list[[]]
for taget[name[xkern]] in starred[call[name[range], parameter[name[dna_kernel_size]]]] begin[:]
for taget[name[ykern]] in starred[call[name[range], parameter[name[dna_kernel_size]]]] begin[:]
call[name[inputs].append, parameter[call[name[tf].expand_dims, parameter[call[name[tf].slice, parameter[name[prev_image_pad], list[[<ast.Constant object at 0x7da1b1ef9c30>, <ast.Name object at 0x7da1b1ef83a0>, <ast.Name object at 0x7da1b1ef9270>, <ast.Constant object at 0x7da1b1ef8550>]], list[[<ast.UnaryOp object at 0x7da1b1efbf10>, <ast.Name object at 0x7da1b1efba90>, <ast.Name object at 0x7da1b1efbf40>, <ast.UnaryOp object at 0x7da1b1ef89d0>]]]], list[[<ast.Constant object at 0x7da1b1ef94b0>]]]]]]
variable[inputs] assign[=] call[name[tf].concat, parameter[]]
variable[kernel] assign[=] binary_operation[call[name[tf].nn.relu, parameter[binary_operation[name[dna_input] - name[relu_shift]]]] + name[relu_shift]]
variable[kernel] assign[=] call[name[tf].expand_dims, parameter[binary_operation[name[kernel] / call[name[tf].reduce_sum, parameter[name[kernel], list[[<ast.Constant object at 0x7da1b20995d0>]]]]], list[[<ast.Constant object at 0x7da1b2099540>]]]]
return[call[name[tf].reduce_sum, parameter[binary_operation[name[kernel] * name[inputs]], list[[<ast.Constant object at 0x7da1b2099480>]]]]] | keyword[def] identifier[dna_transformation] ( identifier[prev_image] , identifier[dna_input] , identifier[dna_kernel_size] , identifier[relu_shift] ):
literal[string]
identifier[prev_image_pad] = identifier[tf] . identifier[pad] ( identifier[prev_image] ,[[ literal[int] , literal[int] ],[ literal[int] , literal[int] ],[ literal[int] , literal[int] ],[ literal[int] , literal[int] ]])
identifier[image_height] = identifier[int] ( identifier[prev_image] . identifier[get_shape] ()[ literal[int] ])
identifier[image_width] = identifier[int] ( identifier[prev_image] . identifier[get_shape] ()[ literal[int] ])
identifier[inputs] =[]
keyword[for] identifier[xkern] keyword[in] identifier[range] ( identifier[dna_kernel_size] ):
keyword[for] identifier[ykern] keyword[in] identifier[range] ( identifier[dna_kernel_size] ):
identifier[inputs] . identifier[append] (
identifier[tf] . identifier[expand_dims] (
identifier[tf] . identifier[slice] ( identifier[prev_image_pad] ,[ literal[int] , identifier[xkern] , identifier[ykern] , literal[int] ],
[- literal[int] , identifier[image_height] , identifier[image_width] ,- literal[int] ]),[ literal[int] ]))
identifier[inputs] = identifier[tf] . identifier[concat] ( identifier[axis] = literal[int] , identifier[values] = identifier[inputs] )
identifier[kernel] = identifier[tf] . identifier[nn] . identifier[relu] ( identifier[dna_input] - identifier[relu_shift] )+ identifier[relu_shift]
identifier[kernel] = identifier[tf] . identifier[expand_dims] (
identifier[kernel] / identifier[tf] . identifier[reduce_sum] ( identifier[kernel] ,[ literal[int] ], identifier[keep_dims] = keyword[True] ),[ literal[int] ])
keyword[return] identifier[tf] . identifier[reduce_sum] ( identifier[kernel] * identifier[inputs] ,[ literal[int] ], identifier[keep_dims] = keyword[False] ) | def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(dna_kernel_size):
for ykern in range(dna_kernel_size):
inputs.append(tf.expand_dims(tf.slice(prev_image_pad, [0, xkern, ykern, 0], [-1, image_height, image_width, -1]), [3])) # depends on [control=['for'], data=['ykern']] # depends on [control=['for'], data=['xkern']]
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - relu_shift) + relu_shift
kernel = tf.expand_dims(kernel / tf.reduce_sum(kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False) |
def patch_records(diff, from_records, strict=True):
"""
Apply the patch to the sequence of records, returning the transformed
records.
"""
return patch.apply(diff, from_records, strict=strict) | def function[patch_records, parameter[diff, from_records, strict]]:
constant[
Apply the patch to the sequence of records, returning the transformed
records.
]
return[call[name[patch].apply, parameter[name[diff], name[from_records]]]] | keyword[def] identifier[patch_records] ( identifier[diff] , identifier[from_records] , identifier[strict] = keyword[True] ):
literal[string]
keyword[return] identifier[patch] . identifier[apply] ( identifier[diff] , identifier[from_records] , identifier[strict] = identifier[strict] ) | def patch_records(diff, from_records, strict=True):
"""
Apply the patch to the sequence of records, returning the transformed
records.
"""
return patch.apply(diff, from_records, strict=strict) |
def glob(self, pathname, ondisk=True, source=False, strings=False, exclude=None):
"""
Returns a list of Nodes (or strings) matching a specified
pathname pattern.
Pathname patterns follow UNIX shell semantics: * matches
any-length strings of any characters, ? matches any character,
and [] can enclose lists or ranges of characters. Matches do
not span directory separators.
The matches take into account Repositories, returning local
Nodes if a corresponding entry exists in a Repository (either
an in-memory Node or something on disk).
By defafult, the glob() function matches entries that exist
on-disk, in addition to in-memory Nodes. Setting the "ondisk"
argument to False (or some other non-true value) causes the glob()
function to only match in-memory Nodes. The default behavior is
to return both the on-disk and in-memory Nodes.
The "source" argument, when true, specifies that corresponding
source Nodes must be returned if you're globbing in a build
directory (initialized with VariantDir()). The default behavior
is to return Nodes local to the VariantDir().
The "strings" argument, when true, returns the matches as strings,
not Nodes. The strings are path names relative to this directory.
The "exclude" argument, if not None, must be a pattern or a list
of patterns following the same UNIX shell semantics.
Elements matching a least one pattern of this list will be excluded
from the result.
The underlying algorithm is adapted from the glob.glob() function
in the Python library (but heavily modified), and uses fnmatch()
under the covers.
"""
dirname, basename = os.path.split(pathname)
if not dirname:
result = self._glob1(basename, ondisk, source, strings)
else:
if has_glob_magic(dirname):
list = self.glob(dirname, ondisk, source, False, exclude)
else:
list = [self.Dir(dirname, create=True)]
result = []
for dir in list:
r = dir._glob1(basename, ondisk, source, strings)
if strings:
r = [os.path.join(str(dir), x) for x in r]
result.extend(r)
if exclude:
excludes = []
excludeList = SCons.Util.flatten(exclude)
for x in excludeList:
r = self.glob(x, ondisk, source, strings)
excludes.extend(r)
result = [x for x in result if not any(fnmatch.fnmatch(str(x), str(e)) for e in SCons.Util.flatten(excludes))]
return sorted(result, key=lambda a: str(a)) | def function[glob, parameter[self, pathname, ondisk, source, strings, exclude]]:
constant[
Returns a list of Nodes (or strings) matching a specified
pathname pattern.
Pathname patterns follow UNIX shell semantics: * matches
any-length strings of any characters, ? matches any character,
and [] can enclose lists or ranges of characters. Matches do
not span directory separators.
The matches take into account Repositories, returning local
Nodes if a corresponding entry exists in a Repository (either
an in-memory Node or something on disk).
By defafult, the glob() function matches entries that exist
on-disk, in addition to in-memory Nodes. Setting the "ondisk"
argument to False (or some other non-true value) causes the glob()
function to only match in-memory Nodes. The default behavior is
to return both the on-disk and in-memory Nodes.
The "source" argument, when true, specifies that corresponding
source Nodes must be returned if you're globbing in a build
directory (initialized with VariantDir()). The default behavior
is to return Nodes local to the VariantDir().
The "strings" argument, when true, returns the matches as strings,
not Nodes. The strings are path names relative to this directory.
The "exclude" argument, if not None, must be a pattern or a list
of patterns following the same UNIX shell semantics.
Elements matching a least one pattern of this list will be excluded
from the result.
The underlying algorithm is adapted from the glob.glob() function
in the Python library (but heavily modified), and uses fnmatch()
under the covers.
]
<ast.Tuple object at 0x7da20cabfa30> assign[=] call[name[os].path.split, parameter[name[pathname]]]
if <ast.UnaryOp object at 0x7da1b0c01210> begin[:]
variable[result] assign[=] call[name[self]._glob1, parameter[name[basename], name[ondisk], name[source], name[strings]]]
if name[exclude] begin[:]
variable[excludes] assign[=] list[[]]
variable[excludeList] assign[=] call[name[SCons].Util.flatten, parameter[name[exclude]]]
for taget[name[x]] in starred[name[excludeList]] begin[:]
variable[r] assign[=] call[name[self].glob, parameter[name[x], name[ondisk], name[source], name[strings]]]
call[name[excludes].extend, parameter[name[r]]]
variable[result] assign[=] <ast.ListComp object at 0x7da2047e9d20>
return[call[name[sorted], parameter[name[result]]]] | keyword[def] identifier[glob] ( identifier[self] , identifier[pathname] , identifier[ondisk] = keyword[True] , identifier[source] = keyword[False] , identifier[strings] = keyword[False] , identifier[exclude] = keyword[None] ):
literal[string]
identifier[dirname] , identifier[basename] = identifier[os] . identifier[path] . identifier[split] ( identifier[pathname] )
keyword[if] keyword[not] identifier[dirname] :
identifier[result] = identifier[self] . identifier[_glob1] ( identifier[basename] , identifier[ondisk] , identifier[source] , identifier[strings] )
keyword[else] :
keyword[if] identifier[has_glob_magic] ( identifier[dirname] ):
identifier[list] = identifier[self] . identifier[glob] ( identifier[dirname] , identifier[ondisk] , identifier[source] , keyword[False] , identifier[exclude] )
keyword[else] :
identifier[list] =[ identifier[self] . identifier[Dir] ( identifier[dirname] , identifier[create] = keyword[True] )]
identifier[result] =[]
keyword[for] identifier[dir] keyword[in] identifier[list] :
identifier[r] = identifier[dir] . identifier[_glob1] ( identifier[basename] , identifier[ondisk] , identifier[source] , identifier[strings] )
keyword[if] identifier[strings] :
identifier[r] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[str] ( identifier[dir] ), identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[r] ]
identifier[result] . identifier[extend] ( identifier[r] )
keyword[if] identifier[exclude] :
identifier[excludes] =[]
identifier[excludeList] = identifier[SCons] . identifier[Util] . identifier[flatten] ( identifier[exclude] )
keyword[for] identifier[x] keyword[in] identifier[excludeList] :
identifier[r] = identifier[self] . identifier[glob] ( identifier[x] , identifier[ondisk] , identifier[source] , identifier[strings] )
identifier[excludes] . identifier[extend] ( identifier[r] )
identifier[result] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[result] keyword[if] keyword[not] identifier[any] ( identifier[fnmatch] . identifier[fnmatch] ( identifier[str] ( identifier[x] ), identifier[str] ( identifier[e] )) keyword[for] identifier[e] keyword[in] identifier[SCons] . identifier[Util] . identifier[flatten] ( identifier[excludes] ))]
keyword[return] identifier[sorted] ( identifier[result] , identifier[key] = keyword[lambda] identifier[a] : identifier[str] ( identifier[a] )) | def glob(self, pathname, ondisk=True, source=False, strings=False, exclude=None):
"""
Returns a list of Nodes (or strings) matching a specified
pathname pattern.
Pathname patterns follow UNIX shell semantics: * matches
any-length strings of any characters, ? matches any character,
and [] can enclose lists or ranges of characters. Matches do
not span directory separators.
The matches take into account Repositories, returning local
Nodes if a corresponding entry exists in a Repository (either
an in-memory Node or something on disk).
By defafult, the glob() function matches entries that exist
on-disk, in addition to in-memory Nodes. Setting the "ondisk"
argument to False (or some other non-true value) causes the glob()
function to only match in-memory Nodes. The default behavior is
to return both the on-disk and in-memory Nodes.
The "source" argument, when true, specifies that corresponding
source Nodes must be returned if you're globbing in a build
directory (initialized with VariantDir()). The default behavior
is to return Nodes local to the VariantDir().
The "strings" argument, when true, returns the matches as strings,
not Nodes. The strings are path names relative to this directory.
The "exclude" argument, if not None, must be a pattern or a list
of patterns following the same UNIX shell semantics.
Elements matching a least one pattern of this list will be excluded
from the result.
The underlying algorithm is adapted from the glob.glob() function
in the Python library (but heavily modified), and uses fnmatch()
under the covers.
"""
(dirname, basename) = os.path.split(pathname)
if not dirname:
result = self._glob1(basename, ondisk, source, strings) # depends on [control=['if'], data=[]]
else:
if has_glob_magic(dirname):
list = self.glob(dirname, ondisk, source, False, exclude) # depends on [control=['if'], data=[]]
else:
list = [self.Dir(dirname, create=True)]
result = []
for dir in list:
r = dir._glob1(basename, ondisk, source, strings)
if strings:
r = [os.path.join(str(dir), x) for x in r] # depends on [control=['if'], data=[]]
result.extend(r) # depends on [control=['for'], data=['dir']]
if exclude:
excludes = []
excludeList = SCons.Util.flatten(exclude)
for x in excludeList:
r = self.glob(x, ondisk, source, strings)
excludes.extend(r) # depends on [control=['for'], data=['x']]
result = [x for x in result if not any((fnmatch.fnmatch(str(x), str(e)) for e in SCons.Util.flatten(excludes)))] # depends on [control=['if'], data=[]]
return sorted(result, key=lambda a: str(a)) |
def best(args):
"""
%prog best blastfile
print the best hit for each query in the blastfile
"""
p = OptionParser(best.__doc__)
p.add_option("-n", default=1, type="int",
help="get best N hits [default: %default]")
p.add_option("--nosort", default=False, action="store_true",
help="assume BLAST is already sorted [default: %default]")
p.add_option("--hsps", default=False, action="store_true",
help="get all HSPs for the best pair [default: %default]")
p.add_option("--subject", default=False, action="store_true",
help="get best hit(s) for subject genome instead [default: %default]")
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
n = opts.n
hsps = opts.hsps
tmpdir = opts.tmpdir
ref = "query" if not opts.subject else "subject"
if not opts.nosort:
sargs = [blastfile]
if tmpdir:
sargs += ["-T {0}".format(tmpdir)]
if ref != "query":
sargs += ["--refscore"]
sort(sargs)
else:
logging.debug("Assuming sorted BLAST")
if not opts.subject:
bestblastfile = blastfile + ".best"
else:
bestblastfile = blastfile + ".subject.best"
fw = open(bestblastfile, "w")
b = Blast(blastfile)
for q, bline in b.iter_best_hit(N=n, hsps=hsps, ref=ref):
print(bline, file=fw)
return bestblastfile | def function[best, parameter[args]]:
constant[
%prog best blastfile
print the best hit for each query in the blastfile
]
variable[p] assign[=] call[name[OptionParser], parameter[name[best].__doc__]]
call[name[p].add_option, parameter[constant[-n]]]
call[name[p].add_option, parameter[constant[--nosort]]]
call[name[p].add_option, parameter[constant[--hsps]]]
call[name[p].add_option, parameter[constant[--subject]]]
call[name[p].set_tmpdir, parameter[]]
<ast.Tuple object at 0x7da1b08e9f00> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b08ebdf0>]]
<ast.Tuple object at 0x7da1b08eb2e0> assign[=] name[args]
variable[n] assign[=] name[opts].n
variable[hsps] assign[=] name[opts].hsps
variable[tmpdir] assign[=] name[opts].tmpdir
variable[ref] assign[=] <ast.IfExp object at 0x7da1b08eac80>
if <ast.UnaryOp object at 0x7da1b08eb490> begin[:]
variable[sargs] assign[=] list[[<ast.Name object at 0x7da1b08eb430>]]
if name[tmpdir] begin[:]
<ast.AugAssign object at 0x7da1b08e81c0>
if compare[name[ref] not_equal[!=] constant[query]] begin[:]
<ast.AugAssign object at 0x7da1b08eadd0>
call[name[sort], parameter[name[sargs]]]
if <ast.UnaryOp object at 0x7da1b08eb790> begin[:]
variable[bestblastfile] assign[=] binary_operation[name[blastfile] + constant[.best]]
variable[fw] assign[=] call[name[open], parameter[name[bestblastfile], constant[w]]]
variable[b] assign[=] call[name[Blast], parameter[name[blastfile]]]
for taget[tuple[[<ast.Name object at 0x7da1b08a0ee0>, <ast.Name object at 0x7da1b08a3910>]]] in starred[call[name[b].iter_best_hit, parameter[]]] begin[:]
call[name[print], parameter[name[bline]]]
return[name[bestblastfile]] | keyword[def] identifier[best] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[best] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[int] , identifier[type] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[set_tmpdir] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[blastfile] ,= identifier[args]
identifier[n] = identifier[opts] . identifier[n]
identifier[hsps] = identifier[opts] . identifier[hsps]
identifier[tmpdir] = identifier[opts] . identifier[tmpdir]
identifier[ref] = literal[string] keyword[if] keyword[not] identifier[opts] . identifier[subject] keyword[else] literal[string]
keyword[if] keyword[not] identifier[opts] . identifier[nosort] :
identifier[sargs] =[ identifier[blastfile] ]
keyword[if] identifier[tmpdir] :
identifier[sargs] +=[ literal[string] . identifier[format] ( identifier[tmpdir] )]
keyword[if] identifier[ref] != literal[string] :
identifier[sargs] +=[ literal[string] ]
identifier[sort] ( identifier[sargs] )
keyword[else] :
identifier[logging] . identifier[debug] ( literal[string] )
keyword[if] keyword[not] identifier[opts] . identifier[subject] :
identifier[bestblastfile] = identifier[blastfile] + literal[string]
keyword[else] :
identifier[bestblastfile] = identifier[blastfile] + literal[string]
identifier[fw] = identifier[open] ( identifier[bestblastfile] , literal[string] )
identifier[b] = identifier[Blast] ( identifier[blastfile] )
keyword[for] identifier[q] , identifier[bline] keyword[in] identifier[b] . identifier[iter_best_hit] ( identifier[N] = identifier[n] , identifier[hsps] = identifier[hsps] , identifier[ref] = identifier[ref] ):
identifier[print] ( identifier[bline] , identifier[file] = identifier[fw] )
keyword[return] identifier[bestblastfile] | def best(args):
"""
%prog best blastfile
print the best hit for each query in the blastfile
"""
p = OptionParser(best.__doc__)
p.add_option('-n', default=1, type='int', help='get best N hits [default: %default]')
p.add_option('--nosort', default=False, action='store_true', help='assume BLAST is already sorted [default: %default]')
p.add_option('--hsps', default=False, action='store_true', help='get all HSPs for the best pair [default: %default]')
p.add_option('--subject', default=False, action='store_true', help='get best hit(s) for subject genome instead [default: %default]')
p.set_tmpdir()
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(blastfile,) = args
n = opts.n
hsps = opts.hsps
tmpdir = opts.tmpdir
ref = 'query' if not opts.subject else 'subject'
if not opts.nosort:
sargs = [blastfile]
if tmpdir:
sargs += ['-T {0}'.format(tmpdir)] # depends on [control=['if'], data=[]]
if ref != 'query':
sargs += ['--refscore'] # depends on [control=['if'], data=[]]
sort(sargs) # depends on [control=['if'], data=[]]
else:
logging.debug('Assuming sorted BLAST')
if not opts.subject:
bestblastfile = blastfile + '.best' # depends on [control=['if'], data=[]]
else:
bestblastfile = blastfile + '.subject.best'
fw = open(bestblastfile, 'w')
b = Blast(blastfile)
for (q, bline) in b.iter_best_hit(N=n, hsps=hsps, ref=ref):
print(bline, file=fw) # depends on [control=['for'], data=[]]
return bestblastfile |
def channels_create(self, name, **kwargs):
"""Creates a new public channel, optionally including users."""
return self.__call_api_post('channels.create', name=name, kwargs=kwargs) | def function[channels_create, parameter[self, name]]:
constant[Creates a new public channel, optionally including users.]
return[call[name[self].__call_api_post, parameter[constant[channels.create]]]] | keyword[def] identifier[channels_create] ( identifier[self] , identifier[name] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[__call_api_post] ( literal[string] , identifier[name] = identifier[name] , identifier[kwargs] = identifier[kwargs] ) | def channels_create(self, name, **kwargs):
"""Creates a new public channel, optionally including users."""
return self.__call_api_post('channels.create', name=name, kwargs=kwargs) |
def _verify_default(self, spec, path):
"""Verifies that the default specified in the given spec is valid."""
field_type = spec['type']
default = spec['default']
# If it's a function there's nothing we can really do except assume its valid
if callable(default):
return
if isinstance(field_type, Array):
# Verify we'd got a list as our default
if not isinstance(default, list):
raise SchemaFormatException("Default value for Array at {} is not a list of values.", path)
# Ensure the contents are of the correct type
for i, item in enumerate(default):
if isinstance(field_type.contained_type, Schema):
if not self._valid_schema_default(item):
raise SchemaFormatException("Default value for Schema is not valid.", path)
elif not isinstance(item, field_type.contained_type):
raise SchemaFormatException("Not all items in the default list for the Array field at {} are of the correct type.", path)
elif isinstance(field_type, Schema):
if not self._valid_schema_default(default):
raise SchemaFormatException("Default value for Schema is not valid.", path)
else:
if not isinstance(default, field_type):
raise SchemaFormatException("Default value for {} is not of the nominated type.", path) | def function[_verify_default, parameter[self, spec, path]]:
constant[Verifies that the default specified in the given spec is valid.]
variable[field_type] assign[=] call[name[spec]][constant[type]]
variable[default] assign[=] call[name[spec]][constant[default]]
if call[name[callable], parameter[name[default]]] begin[:]
return[None]
if call[name[isinstance], parameter[name[field_type], name[Array]]] begin[:]
if <ast.UnaryOp object at 0x7da1b11744f0> begin[:]
<ast.Raise object at 0x7da1b1174d60>
for taget[tuple[[<ast.Name object at 0x7da1b11752a0>, <ast.Name object at 0x7da1b1174f70>]]] in starred[call[name[enumerate], parameter[name[default]]]] begin[:]
if call[name[isinstance], parameter[name[field_type].contained_type, name[Schema]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1175d50> begin[:]
<ast.Raise object at 0x7da1b1175480> | keyword[def] identifier[_verify_default] ( identifier[self] , identifier[spec] , identifier[path] ):
literal[string]
identifier[field_type] = identifier[spec] [ literal[string] ]
identifier[default] = identifier[spec] [ literal[string] ]
keyword[if] identifier[callable] ( identifier[default] ):
keyword[return]
keyword[if] identifier[isinstance] ( identifier[field_type] , identifier[Array] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[default] , identifier[list] ):
keyword[raise] identifier[SchemaFormatException] ( literal[string] , identifier[path] )
keyword[for] identifier[i] , identifier[item] keyword[in] identifier[enumerate] ( identifier[default] ):
keyword[if] identifier[isinstance] ( identifier[field_type] . identifier[contained_type] , identifier[Schema] ):
keyword[if] keyword[not] identifier[self] . identifier[_valid_schema_default] ( identifier[item] ):
keyword[raise] identifier[SchemaFormatException] ( literal[string] , identifier[path] )
keyword[elif] keyword[not] identifier[isinstance] ( identifier[item] , identifier[field_type] . identifier[contained_type] ):
keyword[raise] identifier[SchemaFormatException] ( literal[string] , identifier[path] )
keyword[elif] identifier[isinstance] ( identifier[field_type] , identifier[Schema] ):
keyword[if] keyword[not] identifier[self] . identifier[_valid_schema_default] ( identifier[default] ):
keyword[raise] identifier[SchemaFormatException] ( literal[string] , identifier[path] )
keyword[else] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[default] , identifier[field_type] ):
keyword[raise] identifier[SchemaFormatException] ( literal[string] , identifier[path] ) | def _verify_default(self, spec, path):
"""Verifies that the default specified in the given spec is valid."""
field_type = spec['type']
default = spec['default']
# If it's a function there's nothing we can really do except assume its valid
if callable(default):
return # depends on [control=['if'], data=[]]
if isinstance(field_type, Array):
# Verify we'd got a list as our default
if not isinstance(default, list):
raise SchemaFormatException('Default value for Array at {} is not a list of values.', path) # depends on [control=['if'], data=[]]
# Ensure the contents are of the correct type
for (i, item) in enumerate(default):
if isinstance(field_type.contained_type, Schema):
if not self._valid_schema_default(item):
raise SchemaFormatException('Default value for Schema is not valid.', path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not isinstance(item, field_type.contained_type):
raise SchemaFormatException('Not all items in the default list for the Array field at {} are of the correct type.', path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(field_type, Schema):
if not self._valid_schema_default(default):
raise SchemaFormatException('Default value for Schema is not valid.', path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not isinstance(default, field_type):
raise SchemaFormatException('Default value for {} is not of the nominated type.', path) # depends on [control=['if'], data=[]] |
def raise_501(instance):
"""Abort the current request with a 501 (Not Implemented) response code.
Sets the ``Allow`` response header to the return value of the
:func:`Resource.get_allowed_methods` function.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 501
"""
instance.response.status = 501
instance.response.headers['Allow'] = instance.get_allowed_methods()
raise ResponseException(instance.response) | def function[raise_501, parameter[instance]]:
constant[Abort the current request with a 501 (Not Implemented) response code.
Sets the ``Allow`` response header to the return value of the
:func:`Resource.get_allowed_methods` function.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 501
]
name[instance].response.status assign[=] constant[501]
call[name[instance].response.headers][constant[Allow]] assign[=] call[name[instance].get_allowed_methods, parameter[]]
<ast.Raise object at 0x7da1b228e650> | keyword[def] identifier[raise_501] ( identifier[instance] ):
literal[string]
identifier[instance] . identifier[response] . identifier[status] = literal[int]
identifier[instance] . identifier[response] . identifier[headers] [ literal[string] ]= identifier[instance] . identifier[get_allowed_methods] ()
keyword[raise] identifier[ResponseException] ( identifier[instance] . identifier[response] ) | def raise_501(instance):
"""Abort the current request with a 501 (Not Implemented) response code.
Sets the ``Allow`` response header to the return value of the
:func:`Resource.get_allowed_methods` function.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 501
"""
instance.response.status = 501
instance.response.headers['Allow'] = instance.get_allowed_methods()
raise ResponseException(instance.response) |
def run_population(population, evolution, gpus):
"""
Change save and load paths for obtained population, save config.json with model config,
run population via current python executor (with which evolve.py already run)
and on given devices (-1 means CPU, other integeres - visible for evolve.py GPUs)
Args:
population: list of dictionaries - configs of current population
evolution: ParamsEvolution
gpus: list of given devices (list of integers)
Returns:
None
"""
population_size = len(population)
for k in range(population_size // len(gpus) + 1):
procs = []
for j in range(len(gpus)):
i = k * len(gpus) + j
if i < population_size:
save_path = expand_path(
evolution.get_value_from_config(parse_config(population[i]),
evolution.path_to_models_save_path))
save_path.mkdir(parents=True, exist_ok=True)
f_name = save_path / "config.json"
save_json(population[i], f_name)
with save_path.joinpath('out.txt').open('w', encoding='utf8') as outlog,\
save_path.joinpath('err.txt').open('w', encoding='utf8') as errlog:
env = dict(os.environ)
if len(gpus) > 1 or gpus[0] != -1:
env['CUDA_VISIBLE_DEVICES'] = str(gpus[j])
procs.append(Popen("{} -m deeppavlov train {}".format(sys.executable, str(f_name)),
shell=True, stdout=outlog, stderr=errlog, env=env))
for j, proc in enumerate(procs):
i = k * len(gpus) + j
log.info(f'Waiting on {i}th proc')
if proc.wait() != 0:
save_path = expand_path(
evolution.get_value_from_config(parse_config(population[i]),
evolution.path_to_models_save_path))
with save_path.joinpath('err.txt').open(encoding='utf8') as errlog:
log.warning(f'Population {i} returned an error code {proc.returncode} and an error log:\n' +
errlog.read())
return None | def function[run_population, parameter[population, evolution, gpus]]:
constant[
Change save and load paths for obtained population, save config.json with model config,
run population via current python executor (with which evolve.py already run)
and on given devices (-1 means CPU, other integeres - visible for evolve.py GPUs)
Args:
population: list of dictionaries - configs of current population
evolution: ParamsEvolution
gpus: list of given devices (list of integers)
Returns:
None
]
variable[population_size] assign[=] call[name[len], parameter[name[population]]]
for taget[name[k]] in starred[call[name[range], parameter[binary_operation[binary_operation[name[population_size] <ast.FloorDiv object at 0x7da2590d6bc0> call[name[len], parameter[name[gpus]]]] + constant[1]]]]] begin[:]
variable[procs] assign[=] list[[]]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[gpus]]]]]] begin[:]
variable[i] assign[=] binary_operation[binary_operation[name[k] * call[name[len], parameter[name[gpus]]]] + name[j]]
if compare[name[i] less[<] name[population_size]] begin[:]
variable[save_path] assign[=] call[name[expand_path], parameter[call[name[evolution].get_value_from_config, parameter[call[name[parse_config], parameter[call[name[population]][name[i]]]], name[evolution].path_to_models_save_path]]]]
call[name[save_path].mkdir, parameter[]]
variable[f_name] assign[=] binary_operation[name[save_path] / constant[config.json]]
call[name[save_json], parameter[call[name[population]][name[i]], name[f_name]]]
with call[call[name[save_path].joinpath, parameter[constant[out.txt]]].open, parameter[constant[w]]] begin[:]
variable[env] assign[=] call[name[dict], parameter[name[os].environ]]
if <ast.BoolOp object at 0x7da2054a4a90> begin[:]
call[name[env]][constant[CUDA_VISIBLE_DEVICES]] assign[=] call[name[str], parameter[call[name[gpus]][name[j]]]]
call[name[procs].append, parameter[call[name[Popen], parameter[call[constant[{} -m deeppavlov train {}].format, parameter[name[sys].executable, call[name[str], parameter[name[f_name]]]]]]]]]
for taget[tuple[[<ast.Name object at 0x7da2054a4ca0>, <ast.Name object at 0x7da2054a4bb0>]]] in starred[call[name[enumerate], parameter[name[procs]]]] begin[:]
variable[i] assign[=] binary_operation[binary_operation[name[k] * call[name[len], parameter[name[gpus]]]] + name[j]]
call[name[log].info, parameter[<ast.JoinedStr object at 0x7da2054a6fe0>]]
if compare[call[name[proc].wait, parameter[]] not_equal[!=] constant[0]] begin[:]
variable[save_path] assign[=] call[name[expand_path], parameter[call[name[evolution].get_value_from_config, parameter[call[name[parse_config], parameter[call[name[population]][name[i]]]], name[evolution].path_to_models_save_path]]]]
with call[call[name[save_path].joinpath, parameter[constant[err.txt]]].open, parameter[]] begin[:]
call[name[log].warning, parameter[binary_operation[<ast.JoinedStr object at 0x7da2054a5240> + call[name[errlog].read, parameter[]]]]]
return[constant[None]] | keyword[def] identifier[run_population] ( identifier[population] , identifier[evolution] , identifier[gpus] ):
literal[string]
identifier[population_size] = identifier[len] ( identifier[population] )
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[population_size] // identifier[len] ( identifier[gpus] )+ literal[int] ):
identifier[procs] =[]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[gpus] )):
identifier[i] = identifier[k] * identifier[len] ( identifier[gpus] )+ identifier[j]
keyword[if] identifier[i] < identifier[population_size] :
identifier[save_path] = identifier[expand_path] (
identifier[evolution] . identifier[get_value_from_config] ( identifier[parse_config] ( identifier[population] [ identifier[i] ]),
identifier[evolution] . identifier[path_to_models_save_path] ))
identifier[save_path] . identifier[mkdir] ( identifier[parents] = keyword[True] , identifier[exist_ok] = keyword[True] )
identifier[f_name] = identifier[save_path] / literal[string]
identifier[save_json] ( identifier[population] [ identifier[i] ], identifier[f_name] )
keyword[with] identifier[save_path] . identifier[joinpath] ( literal[string] ). identifier[open] ( literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[outlog] , identifier[save_path] . identifier[joinpath] ( literal[string] ). identifier[open] ( literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[errlog] :
identifier[env] = identifier[dict] ( identifier[os] . identifier[environ] )
keyword[if] identifier[len] ( identifier[gpus] )> literal[int] keyword[or] identifier[gpus] [ literal[int] ]!=- literal[int] :
identifier[env] [ literal[string] ]= identifier[str] ( identifier[gpus] [ identifier[j] ])
identifier[procs] . identifier[append] ( identifier[Popen] ( literal[string] . identifier[format] ( identifier[sys] . identifier[executable] , identifier[str] ( identifier[f_name] )),
identifier[shell] = keyword[True] , identifier[stdout] = identifier[outlog] , identifier[stderr] = identifier[errlog] , identifier[env] = identifier[env] ))
keyword[for] identifier[j] , identifier[proc] keyword[in] identifier[enumerate] ( identifier[procs] ):
identifier[i] = identifier[k] * identifier[len] ( identifier[gpus] )+ identifier[j]
identifier[log] . identifier[info] ( literal[string] )
keyword[if] identifier[proc] . identifier[wait] ()!= literal[int] :
identifier[save_path] = identifier[expand_path] (
identifier[evolution] . identifier[get_value_from_config] ( identifier[parse_config] ( identifier[population] [ identifier[i] ]),
identifier[evolution] . identifier[path_to_models_save_path] ))
keyword[with] identifier[save_path] . identifier[joinpath] ( literal[string] ). identifier[open] ( identifier[encoding] = literal[string] ) keyword[as] identifier[errlog] :
identifier[log] . identifier[warning] ( literal[string] +
identifier[errlog] . identifier[read] ())
keyword[return] keyword[None] | def run_population(population, evolution, gpus):
"""
Change save and load paths for obtained population, save config.json with model config,
run population via current python executor (with which evolve.py already run)
and on given devices (-1 means CPU, other integeres - visible for evolve.py GPUs)
Args:
population: list of dictionaries - configs of current population
evolution: ParamsEvolution
gpus: list of given devices (list of integers)
Returns:
None
"""
population_size = len(population)
for k in range(population_size // len(gpus) + 1):
procs = []
for j in range(len(gpus)):
i = k * len(gpus) + j
if i < population_size:
save_path = expand_path(evolution.get_value_from_config(parse_config(population[i]), evolution.path_to_models_save_path))
save_path.mkdir(parents=True, exist_ok=True)
f_name = save_path / 'config.json'
save_json(population[i], f_name)
with save_path.joinpath('out.txt').open('w', encoding='utf8') as outlog, save_path.joinpath('err.txt').open('w', encoding='utf8') as errlog:
env = dict(os.environ)
if len(gpus) > 1 or gpus[0] != -1:
env['CUDA_VISIBLE_DEVICES'] = str(gpus[j]) # depends on [control=['if'], data=[]]
procs.append(Popen('{} -m deeppavlov train {}'.format(sys.executable, str(f_name)), shell=True, stdout=outlog, stderr=errlog, env=env)) # depends on [control=['with'], data=['outlog']] # depends on [control=['if'], data=['i']] # depends on [control=['for'], data=['j']]
for (j, proc) in enumerate(procs):
i = k * len(gpus) + j
log.info(f'Waiting on {i}th proc')
if proc.wait() != 0:
save_path = expand_path(evolution.get_value_from_config(parse_config(population[i]), evolution.path_to_models_save_path))
with save_path.joinpath('err.txt').open(encoding='utf8') as errlog:
log.warning(f'Population {i} returned an error code {proc.returncode} and an error log:\n' + errlog.read()) # depends on [control=['with'], data=['errlog']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['k']]
return None |
def compile_model(self, target_instance_family, input_shape, output_path, framework=None, framework_version=None,
compile_max_run=5 * 60, tags=None, **kwargs):
"""Compile a Neo model using the input model.
Args:
target_instance_family (str): Identifies the device that you want to run your model after compilation, for
example: ml_c5. Allowed strings are: ml_c5, ml_m5, ml_c4, ml_m4, jetsontx1, jetsontx2, ml_p2, ml_p3,
deeplens, rasp3b
input_shape (dict): Specifies the name and shape of the expected inputs for your trained model in json
dictionary form, for example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28], 'var2':[1,1,28,28]}
output_path (str): Specifies where to store the compiled model
framework (str): The framework that is used to train the original model. Allowed values: 'mxnet',
'tensorflow', 'pytorch', 'onnx', 'xgboost'
framework_version (str): The version of the framework
compile_max_run (int): Timeout in seconds for compilation (default: 3 * 60).
After this amount of time Amazon SageMaker Neo terminates the compilation job regardless of its
current status.
tags (list[dict]): List of tags for labeling a compilation job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details.
"""
if target_instance_family not in NEO_ALLOWED_TARGET_INSTANCE_FAMILY:
raise ValueError("Please use valid target_instance_family,"
"allowed values: {}".format(NEO_ALLOWED_TARGET_INSTANCE_FAMILY))
if framework and framework not in NEO_ALLOWED_FRAMEWORKS:
raise ValueError("Please use valid framework, allowed values: {}".format(NEO_ALLOWED_FRAMEWORKS))
if (framework is None) != (framework_version is None):
raise ValueError("You should provide framework and framework_version at the same time.")
model = self.create_model(**kwargs)
self._compiled_models[target_instance_family] = model.compile(target_instance_family,
input_shape,
output_path,
self.role,
tags,
self._compilation_job_name(),
compile_max_run,
framework=framework,
framework_version=framework_version)
return self._compiled_models[target_instance_family] | def function[compile_model, parameter[self, target_instance_family, input_shape, output_path, framework, framework_version, compile_max_run, tags]]:
constant[Compile a Neo model using the input model.
Args:
target_instance_family (str): Identifies the device that you want to run your model after compilation, for
example: ml_c5. Allowed strings are: ml_c5, ml_m5, ml_c4, ml_m4, jetsontx1, jetsontx2, ml_p2, ml_p3,
deeplens, rasp3b
input_shape (dict): Specifies the name and shape of the expected inputs for your trained model in json
dictionary form, for example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28], 'var2':[1,1,28,28]}
output_path (str): Specifies where to store the compiled model
framework (str): The framework that is used to train the original model. Allowed values: 'mxnet',
'tensorflow', 'pytorch', 'onnx', 'xgboost'
framework_version (str): The version of the framework
compile_max_run (int): Timeout in seconds for compilation (default: 3 * 60).
After this amount of time Amazon SageMaker Neo terminates the compilation job regardless of its
current status.
tags (list[dict]): List of tags for labeling a compilation job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details.
]
if compare[name[target_instance_family] <ast.NotIn object at 0x7da2590d7190> name[NEO_ALLOWED_TARGET_INSTANCE_FAMILY]] begin[:]
<ast.Raise object at 0x7da1b1c18fa0>
if <ast.BoolOp object at 0x7da1b1c19180> begin[:]
<ast.Raise object at 0x7da1b1c19f00>
if compare[compare[name[framework] is constant[None]] not_equal[!=] compare[name[framework_version] is constant[None]]] begin[:]
<ast.Raise object at 0x7da1b1c18bb0>
variable[model] assign[=] call[name[self].create_model, parameter[]]
call[name[self]._compiled_models][name[target_instance_family]] assign[=] call[name[model].compile, parameter[name[target_instance_family], name[input_shape], name[output_path], name[self].role, name[tags], call[name[self]._compilation_job_name, parameter[]], name[compile_max_run]]]
return[call[name[self]._compiled_models][name[target_instance_family]]] | keyword[def] identifier[compile_model] ( identifier[self] , identifier[target_instance_family] , identifier[input_shape] , identifier[output_path] , identifier[framework] = keyword[None] , identifier[framework_version] = keyword[None] ,
identifier[compile_max_run] = literal[int] * literal[int] , identifier[tags] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[target_instance_family] keyword[not] keyword[in] identifier[NEO_ALLOWED_TARGET_INSTANCE_FAMILY] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[NEO_ALLOWED_TARGET_INSTANCE_FAMILY] ))
keyword[if] identifier[framework] keyword[and] identifier[framework] keyword[not] keyword[in] identifier[NEO_ALLOWED_FRAMEWORKS] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[NEO_ALLOWED_FRAMEWORKS] ))
keyword[if] ( identifier[framework] keyword[is] keyword[None] )!=( identifier[framework_version] keyword[is] keyword[None] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[model] = identifier[self] . identifier[create_model] (** identifier[kwargs] )
identifier[self] . identifier[_compiled_models] [ identifier[target_instance_family] ]= identifier[model] . identifier[compile] ( identifier[target_instance_family] ,
identifier[input_shape] ,
identifier[output_path] ,
identifier[self] . identifier[role] ,
identifier[tags] ,
identifier[self] . identifier[_compilation_job_name] (),
identifier[compile_max_run] ,
identifier[framework] = identifier[framework] ,
identifier[framework_version] = identifier[framework_version] )
keyword[return] identifier[self] . identifier[_compiled_models] [ identifier[target_instance_family] ] | def compile_model(self, target_instance_family, input_shape, output_path, framework=None, framework_version=None, compile_max_run=5 * 60, tags=None, **kwargs):
"""Compile a Neo model using the input model.
Args:
target_instance_family (str): Identifies the device that you want to run your model after compilation, for
example: ml_c5. Allowed strings are: ml_c5, ml_m5, ml_c4, ml_m4, jetsontx1, jetsontx2, ml_p2, ml_p3,
deeplens, rasp3b
input_shape (dict): Specifies the name and shape of the expected inputs for your trained model in json
dictionary form, for example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28], 'var2':[1,1,28,28]}
output_path (str): Specifies where to store the compiled model
framework (str): The framework that is used to train the original model. Allowed values: 'mxnet',
'tensorflow', 'pytorch', 'onnx', 'xgboost'
framework_version (str): The version of the framework
compile_max_run (int): Timeout in seconds for compilation (default: 3 * 60).
After this amount of time Amazon SageMaker Neo terminates the compilation job regardless of its
current status.
tags (list[dict]): List of tags for labeling a compilation job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details.
"""
if target_instance_family not in NEO_ALLOWED_TARGET_INSTANCE_FAMILY:
raise ValueError('Please use valid target_instance_family,allowed values: {}'.format(NEO_ALLOWED_TARGET_INSTANCE_FAMILY)) # depends on [control=['if'], data=['NEO_ALLOWED_TARGET_INSTANCE_FAMILY']]
if framework and framework not in NEO_ALLOWED_FRAMEWORKS:
raise ValueError('Please use valid framework, allowed values: {}'.format(NEO_ALLOWED_FRAMEWORKS)) # depends on [control=['if'], data=[]]
if (framework is None) != (framework_version is None):
raise ValueError('You should provide framework and framework_version at the same time.') # depends on [control=['if'], data=[]]
model = self.create_model(**kwargs)
self._compiled_models[target_instance_family] = model.compile(target_instance_family, input_shape, output_path, self.role, tags, self._compilation_job_name(), compile_max_run, framework=framework, framework_version=framework_version)
return self._compiled_models[target_instance_family] |
def put(self, key, value):
'''Stores the object `value` named by `key`self.
Writes to both ``cache_datastore`` and ``child_datastore``.
'''
self.cache_datastore.put(key, value)
self.child_datastore.put(key, value) | def function[put, parameter[self, key, value]]:
constant[Stores the object `value` named by `key`self.
Writes to both ``cache_datastore`` and ``child_datastore``.
]
call[name[self].cache_datastore.put, parameter[name[key], name[value]]]
call[name[self].child_datastore.put, parameter[name[key], name[value]]] | keyword[def] identifier[put] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[self] . identifier[cache_datastore] . identifier[put] ( identifier[key] , identifier[value] )
identifier[self] . identifier[child_datastore] . identifier[put] ( identifier[key] , identifier[value] ) | def put(self, key, value):
"""Stores the object `value` named by `key`self.
Writes to both ``cache_datastore`` and ``child_datastore``.
"""
self.cache_datastore.put(key, value)
self.child_datastore.put(key, value) |
def collapse(self, indices, values):
"""Partly collapse the interval product to single values.
Note that no changes are made in-place.
Parameters
----------
indices : int or sequence of ints
The indices of the dimensions along which to collapse.
values : `array-like` or float
The values to which to collapse. Must have the same
length as ``indices``. Values must lie within the interval
boundaries.
Returns
-------
collapsed : `IntervalProd`
The collapsed set.
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
>>> rbox.collapse(1, 0)
IntervalProd([-1., 0., 2.], [-0.5, 0. , 3. ])
>>> rbox.collapse([1, 2], [0, 2.5])
IntervalProd([-1. , 0. , 2.5], [-0.5, 0. , 2.5])
"""
indices = np.atleast_1d(indices).astype('int64', casting='safe')
values = np.atleast_1d(values)
if len(indices) != len(values):
raise ValueError('lengths of indices {} and values {} do not '
'match ({} != {})'
''.format(indices, values,
len(indices), len(values)))
for axis, index in enumerate(indices):
if not 0 <= index <= self.ndim:
raise IndexError('in axis {}: index {} out of range 0 --> {}'
''.format(axis, index, self.ndim - 1))
if np.any(values < self.min_pt[indices]):
raise ValueError('values {} not above the lower interval '
'boundaries {}'
''.format(values, self.min_pt[indices]))
if np.any(values > self.max_pt[indices]):
raise ValueError('values {} not below the upper interval '
'boundaries {}'
''.format(values, self.max_pt[indices]))
b_new = self.min_pt.copy()
b_new[indices] = values
e_new = self.max_pt.copy()
e_new[indices] = values
return IntervalProd(b_new, e_new) | def function[collapse, parameter[self, indices, values]]:
constant[Partly collapse the interval product to single values.
Note that no changes are made in-place.
Parameters
----------
indices : int or sequence of ints
The indices of the dimensions along which to collapse.
values : `array-like` or float
The values to which to collapse. Must have the same
length as ``indices``. Values must lie within the interval
boundaries.
Returns
-------
collapsed : `IntervalProd`
The collapsed set.
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
>>> rbox.collapse(1, 0)
IntervalProd([-1., 0., 2.], [-0.5, 0. , 3. ])
>>> rbox.collapse([1, 2], [0, 2.5])
IntervalProd([-1. , 0. , 2.5], [-0.5, 0. , 2.5])
]
variable[indices] assign[=] call[call[name[np].atleast_1d, parameter[name[indices]]].astype, parameter[constant[int64]]]
variable[values] assign[=] call[name[np].atleast_1d, parameter[name[values]]]
if compare[call[name[len], parameter[name[indices]]] not_equal[!=] call[name[len], parameter[name[values]]]] begin[:]
<ast.Raise object at 0x7da1b1e5dae0>
for taget[tuple[[<ast.Name object at 0x7da1b1e5ce80>, <ast.Name object at 0x7da1b1e5c040>]]] in starred[call[name[enumerate], parameter[name[indices]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1e94610> begin[:]
<ast.Raise object at 0x7da1b1e95510>
if call[name[np].any, parameter[compare[name[values] less[<] call[name[self].min_pt][name[indices]]]]] begin[:]
<ast.Raise object at 0x7da1b1e96410>
if call[name[np].any, parameter[compare[name[values] greater[>] call[name[self].max_pt][name[indices]]]]] begin[:]
<ast.Raise object at 0x7da1b1e95150>
variable[b_new] assign[=] call[name[self].min_pt.copy, parameter[]]
call[name[b_new]][name[indices]] assign[=] name[values]
variable[e_new] assign[=] call[name[self].max_pt.copy, parameter[]]
call[name[e_new]][name[indices]] assign[=] name[values]
return[call[name[IntervalProd], parameter[name[b_new], name[e_new]]]] | keyword[def] identifier[collapse] ( identifier[self] , identifier[indices] , identifier[values] ):
literal[string]
identifier[indices] = identifier[np] . identifier[atleast_1d] ( identifier[indices] ). identifier[astype] ( literal[string] , identifier[casting] = literal[string] )
identifier[values] = identifier[np] . identifier[atleast_1d] ( identifier[values] )
keyword[if] identifier[len] ( identifier[indices] )!= identifier[len] ( identifier[values] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[indices] , identifier[values] ,
identifier[len] ( identifier[indices] ), identifier[len] ( identifier[values] )))
keyword[for] identifier[axis] , identifier[index] keyword[in] identifier[enumerate] ( identifier[indices] ):
keyword[if] keyword[not] literal[int] <= identifier[index] <= identifier[self] . identifier[ndim] :
keyword[raise] identifier[IndexError] ( literal[string]
literal[string] . identifier[format] ( identifier[axis] , identifier[index] , identifier[self] . identifier[ndim] - literal[int] ))
keyword[if] identifier[np] . identifier[any] ( identifier[values] < identifier[self] . identifier[min_pt] [ identifier[indices] ]):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[values] , identifier[self] . identifier[min_pt] [ identifier[indices] ]))
keyword[if] identifier[np] . identifier[any] ( identifier[values] > identifier[self] . identifier[max_pt] [ identifier[indices] ]):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[values] , identifier[self] . identifier[max_pt] [ identifier[indices] ]))
identifier[b_new] = identifier[self] . identifier[min_pt] . identifier[copy] ()
identifier[b_new] [ identifier[indices] ]= identifier[values]
identifier[e_new] = identifier[self] . identifier[max_pt] . identifier[copy] ()
identifier[e_new] [ identifier[indices] ]= identifier[values]
keyword[return] identifier[IntervalProd] ( identifier[b_new] , identifier[e_new] ) | def collapse(self, indices, values):
"""Partly collapse the interval product to single values.
Note that no changes are made in-place.
Parameters
----------
indices : int or sequence of ints
The indices of the dimensions along which to collapse.
values : `array-like` or float
The values to which to collapse. Must have the same
length as ``indices``. Values must lie within the interval
boundaries.
Returns
-------
collapsed : `IntervalProd`
The collapsed set.
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
>>> rbox.collapse(1, 0)
IntervalProd([-1., 0., 2.], [-0.5, 0. , 3. ])
>>> rbox.collapse([1, 2], [0, 2.5])
IntervalProd([-1. , 0. , 2.5], [-0.5, 0. , 2.5])
"""
indices = np.atleast_1d(indices).astype('int64', casting='safe')
values = np.atleast_1d(values)
if len(indices) != len(values):
raise ValueError('lengths of indices {} and values {} do not match ({} != {})'.format(indices, values, len(indices), len(values))) # depends on [control=['if'], data=[]]
for (axis, index) in enumerate(indices):
if not 0 <= index <= self.ndim:
raise IndexError('in axis {}: index {} out of range 0 --> {}'.format(axis, index, self.ndim - 1)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if np.any(values < self.min_pt[indices]):
raise ValueError('values {} not above the lower interval boundaries {}'.format(values, self.min_pt[indices])) # depends on [control=['if'], data=[]]
if np.any(values > self.max_pt[indices]):
raise ValueError('values {} not below the upper interval boundaries {}'.format(values, self.max_pt[indices])) # depends on [control=['if'], data=[]]
b_new = self.min_pt.copy()
b_new[indices] = values
e_new = self.max_pt.copy()
e_new[indices] = values
return IntervalProd(b_new, e_new) |
def triggered_token(self) -> 'CancelToken':
"""
Return the token which was triggered.
The returned token may be this token or one that it was chained with.
"""
if self._triggered.is_set():
return self
for token in self._chain:
if token.triggered:
# Use token.triggered_token here to make the lookup recursive as self._chain may
# contain other chains.
return token.triggered_token
return None | def function[triggered_token, parameter[self]]:
constant[
Return the token which was triggered.
The returned token may be this token or one that it was chained with.
]
if call[name[self]._triggered.is_set, parameter[]] begin[:]
return[name[self]]
for taget[name[token]] in starred[name[self]._chain] begin[:]
if name[token].triggered begin[:]
return[name[token].triggered_token]
return[constant[None]] | keyword[def] identifier[triggered_token] ( identifier[self] )-> literal[string] :
literal[string]
keyword[if] identifier[self] . identifier[_triggered] . identifier[is_set] ():
keyword[return] identifier[self]
keyword[for] identifier[token] keyword[in] identifier[self] . identifier[_chain] :
keyword[if] identifier[token] . identifier[triggered] :
keyword[return] identifier[token] . identifier[triggered_token]
keyword[return] keyword[None] | def triggered_token(self) -> 'CancelToken':
"""
Return the token which was triggered.
The returned token may be this token or one that it was chained with.
"""
if self._triggered.is_set():
return self # depends on [control=['if'], data=[]]
for token in self._chain:
if token.triggered:
# Use token.triggered_token here to make the lookup recursive as self._chain may
# contain other chains.
return token.triggered_token # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['token']]
return None |
def appendOps(self, ops, append_to=None):
""" Append op(s) to the transaction builder
:param list ops: One or a list of operations
"""
if isinstance(ops, list):
self.ops.extend(ops)
else:
self.ops.append(ops)
parent = self.parent
if parent:
parent._set_require_reconstruction() | def function[appendOps, parameter[self, ops, append_to]]:
constant[ Append op(s) to the transaction builder
:param list ops: One or a list of operations
]
if call[name[isinstance], parameter[name[ops], name[list]]] begin[:]
call[name[self].ops.extend, parameter[name[ops]]]
variable[parent] assign[=] name[self].parent
if name[parent] begin[:]
call[name[parent]._set_require_reconstruction, parameter[]] | keyword[def] identifier[appendOps] ( identifier[self] , identifier[ops] , identifier[append_to] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[ops] , identifier[list] ):
identifier[self] . identifier[ops] . identifier[extend] ( identifier[ops] )
keyword[else] :
identifier[self] . identifier[ops] . identifier[append] ( identifier[ops] )
identifier[parent] = identifier[self] . identifier[parent]
keyword[if] identifier[parent] :
identifier[parent] . identifier[_set_require_reconstruction] () | def appendOps(self, ops, append_to=None):
""" Append op(s) to the transaction builder
:param list ops: One or a list of operations
"""
if isinstance(ops, list):
self.ops.extend(ops) # depends on [control=['if'], data=[]]
else:
self.ops.append(ops)
parent = self.parent
if parent:
parent._set_require_reconstruction() # depends on [control=['if'], data=[]] |
def write_data_as_message(self, buffer, data, content_related,
*, after_id=None):
"""
Writes a message containing the given data into buffer.
Returns the message id.
"""
msg_id = self._get_new_msg_id()
seq_no = self._get_seq_no(content_related)
if after_id is None:
body = GzipPacked.gzip_if_smaller(content_related, data)
else:
body = GzipPacked.gzip_if_smaller(content_related,
bytes(InvokeAfterMsgRequest(after_id, data)))
buffer.write(struct.pack('<qii', msg_id, seq_no, len(body)))
buffer.write(body)
return msg_id | def function[write_data_as_message, parameter[self, buffer, data, content_related]]:
constant[
Writes a message containing the given data into buffer.
Returns the message id.
]
variable[msg_id] assign[=] call[name[self]._get_new_msg_id, parameter[]]
variable[seq_no] assign[=] call[name[self]._get_seq_no, parameter[name[content_related]]]
if compare[name[after_id] is constant[None]] begin[:]
variable[body] assign[=] call[name[GzipPacked].gzip_if_smaller, parameter[name[content_related], name[data]]]
call[name[buffer].write, parameter[call[name[struct].pack, parameter[constant[<qii], name[msg_id], name[seq_no], call[name[len], parameter[name[body]]]]]]]
call[name[buffer].write, parameter[name[body]]]
return[name[msg_id]] | keyword[def] identifier[write_data_as_message] ( identifier[self] , identifier[buffer] , identifier[data] , identifier[content_related] ,
*, identifier[after_id] = keyword[None] ):
literal[string]
identifier[msg_id] = identifier[self] . identifier[_get_new_msg_id] ()
identifier[seq_no] = identifier[self] . identifier[_get_seq_no] ( identifier[content_related] )
keyword[if] identifier[after_id] keyword[is] keyword[None] :
identifier[body] = identifier[GzipPacked] . identifier[gzip_if_smaller] ( identifier[content_related] , identifier[data] )
keyword[else] :
identifier[body] = identifier[GzipPacked] . identifier[gzip_if_smaller] ( identifier[content_related] ,
identifier[bytes] ( identifier[InvokeAfterMsgRequest] ( identifier[after_id] , identifier[data] )))
identifier[buffer] . identifier[write] ( identifier[struct] . identifier[pack] ( literal[string] , identifier[msg_id] , identifier[seq_no] , identifier[len] ( identifier[body] )))
identifier[buffer] . identifier[write] ( identifier[body] )
keyword[return] identifier[msg_id] | def write_data_as_message(self, buffer, data, content_related, *, after_id=None):
"""
Writes a message containing the given data into buffer.
Returns the message id.
"""
msg_id = self._get_new_msg_id()
seq_no = self._get_seq_no(content_related)
if after_id is None:
body = GzipPacked.gzip_if_smaller(content_related, data) # depends on [control=['if'], data=[]]
else:
body = GzipPacked.gzip_if_smaller(content_related, bytes(InvokeAfterMsgRequest(after_id, data)))
buffer.write(struct.pack('<qii', msg_id, seq_no, len(body)))
buffer.write(body)
return msg_id |
def add_kde_setting (key, value, data):
"""Add a KDE proxy setting value to data dictionary."""
if key == "ProxyType":
mode = None
int_value = int(value)
if int_value == 1:
mode = "manual"
elif int_value == 2:
# PAC URL
mode = "pac"
elif int_value == 3:
# WPAD.
mode = "wpad"
elif int_value == 4:
# Indirect manual via environment variables.
mode = "indirect"
data["mode"] = mode
elif key == "Proxy Config Script":
data["autoconfig_url"] = value
elif key == "httpProxy":
add_kde_proxy("http_proxy", value, data)
elif key == "httpsProxy":
add_kde_proxy("https_proxy", value, data)
elif key == "ftpProxy":
add_kde_proxy("ftp_proxy", value, data)
elif key == "ReversedException":
data["reversed_bypass"] = bool(value == "true" or int(value))
elif key == "NoProxyFor":
data["ignore_hosts"] = split_hosts(value)
elif key == "AuthMode":
mode = int(value) | def function[add_kde_setting, parameter[key, value, data]]:
constant[Add a KDE proxy setting value to data dictionary.]
if compare[name[key] equal[==] constant[ProxyType]] begin[:]
variable[mode] assign[=] constant[None]
variable[int_value] assign[=] call[name[int], parameter[name[value]]]
if compare[name[int_value] equal[==] constant[1]] begin[:]
variable[mode] assign[=] constant[manual]
call[name[data]][constant[mode]] assign[=] name[mode] | keyword[def] identifier[add_kde_setting] ( identifier[key] , identifier[value] , identifier[data] ):
literal[string]
keyword[if] identifier[key] == literal[string] :
identifier[mode] = keyword[None]
identifier[int_value] = identifier[int] ( identifier[value] )
keyword[if] identifier[int_value] == literal[int] :
identifier[mode] = literal[string]
keyword[elif] identifier[int_value] == literal[int] :
identifier[mode] = literal[string]
keyword[elif] identifier[int_value] == literal[int] :
identifier[mode] = literal[string]
keyword[elif] identifier[int_value] == literal[int] :
identifier[mode] = literal[string]
identifier[data] [ literal[string] ]= identifier[mode]
keyword[elif] identifier[key] == literal[string] :
identifier[data] [ literal[string] ]= identifier[value]
keyword[elif] identifier[key] == literal[string] :
identifier[add_kde_proxy] ( literal[string] , identifier[value] , identifier[data] )
keyword[elif] identifier[key] == literal[string] :
identifier[add_kde_proxy] ( literal[string] , identifier[value] , identifier[data] )
keyword[elif] identifier[key] == literal[string] :
identifier[add_kde_proxy] ( literal[string] , identifier[value] , identifier[data] )
keyword[elif] identifier[key] == literal[string] :
identifier[data] [ literal[string] ]= identifier[bool] ( identifier[value] == literal[string] keyword[or] identifier[int] ( identifier[value] ))
keyword[elif] identifier[key] == literal[string] :
identifier[data] [ literal[string] ]= identifier[split_hosts] ( identifier[value] )
keyword[elif] identifier[key] == literal[string] :
identifier[mode] = identifier[int] ( identifier[value] ) | def add_kde_setting(key, value, data):
"""Add a KDE proxy setting value to data dictionary."""
if key == 'ProxyType':
mode = None
int_value = int(value)
if int_value == 1:
mode = 'manual' # depends on [control=['if'], data=[]]
elif int_value == 2:
# PAC URL
mode = 'pac' # depends on [control=['if'], data=[]]
elif int_value == 3:
# WPAD.
mode = 'wpad' # depends on [control=['if'], data=[]]
elif int_value == 4:
# Indirect manual via environment variables.
mode = 'indirect' # depends on [control=['if'], data=[]]
data['mode'] = mode # depends on [control=['if'], data=[]]
elif key == 'Proxy Config Script':
data['autoconfig_url'] = value # depends on [control=['if'], data=[]]
elif key == 'httpProxy':
add_kde_proxy('http_proxy', value, data) # depends on [control=['if'], data=[]]
elif key == 'httpsProxy':
add_kde_proxy('https_proxy', value, data) # depends on [control=['if'], data=[]]
elif key == 'ftpProxy':
add_kde_proxy('ftp_proxy', value, data) # depends on [control=['if'], data=[]]
elif key == 'ReversedException':
data['reversed_bypass'] = bool(value == 'true' or int(value)) # depends on [control=['if'], data=[]]
elif key == 'NoProxyFor':
data['ignore_hosts'] = split_hosts(value) # depends on [control=['if'], data=[]]
elif key == 'AuthMode':
mode = int(value) # depends on [control=['if'], data=[]] |
def path_distance(points):
"""
Compute the path distance from given set of points
"""
vecs = np.diff(points, axis=0)[:, :3]
d2 = [np.dot(p, p) for p in vecs]
return np.sum(np.sqrt(d2)) | def function[path_distance, parameter[points]]:
constant[
Compute the path distance from given set of points
]
variable[vecs] assign[=] call[call[name[np].diff, parameter[name[points]]]][tuple[[<ast.Slice object at 0x7da20e963e20>, <ast.Slice object at 0x7da20e962710>]]]
variable[d2] assign[=] <ast.ListComp object at 0x7da20e960520>
return[call[name[np].sum, parameter[call[name[np].sqrt, parameter[name[d2]]]]]] | keyword[def] identifier[path_distance] ( identifier[points] ):
literal[string]
identifier[vecs] = identifier[np] . identifier[diff] ( identifier[points] , identifier[axis] = literal[int] )[:,: literal[int] ]
identifier[d2] =[ identifier[np] . identifier[dot] ( identifier[p] , identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[vecs] ]
keyword[return] identifier[np] . identifier[sum] ( identifier[np] . identifier[sqrt] ( identifier[d2] )) | def path_distance(points):
"""
Compute the path distance from given set of points
"""
vecs = np.diff(points, axis=0)[:, :3]
d2 = [np.dot(p, p) for p in vecs]
return np.sum(np.sqrt(d2)) |
def data_struct_array(sample, **vectors): # data_struct_array(sample, *, energy, **vectors):
"""Combine samples and per-sample data into a numpy structured array.
Args:
sample (array_like):
Samples, in any form that can be converted into a numpy array.
energy (array_like, required):
Required keyword argument. Energies, in any form that can be converted into a numpy
1-dimensional array.
**kwargs (array_like):
Other per-sample data, in any form that can be converted into a numpy array.
Returns:
:obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', 'num_occurrences', **kwargs]
"""
if not len(sample):
# if samples are empty
sample = np.zeros((0, 0), dtype=np.int8)
else:
sample = np.asarray(sample, dtype=np.int8)
if sample.ndim < 2:
sample = np.expand_dims(sample, 0)
num_samples, num_variables = sample.shape
if 'num_occurrences' not in vectors:
vectors['num_occurrences'] = [1] * num_samples
datavectors = {}
datatypes = [('sample', np.dtype(np.int8), (num_variables,))]
for kwarg, vector in vectors.items():
dtype = float if kwarg == 'energy' else None
datavectors[kwarg] = vector = np.asarray(vector, dtype)
if len(vector.shape) < 1 or vector.shape[0] != num_samples:
msg = ('{} and sample have a mismatched shape {}, {}. They must have the same size '
'in the first axis.').format(kwarg, vector.shape, sample.shape)
raise ValueError(msg)
datatypes.append((kwarg, vector.dtype, vector.shape[1:]))
if 'energy' not in datavectors:
# consistent error with the one thrown in python3
raise TypeError('data_struct_array() needs keyword-only argument energy')
elif datavectors['energy'].shape != (num_samples,):
raise ValueError('energy should be a vector of length {}'.format(num_samples))
data = np.rec.array(np.zeros(num_samples, dtype=datatypes))
data['sample'] = sample
for kwarg, vector in datavectors.items():
data[kwarg] = vector
return data | def function[data_struct_array, parameter[sample]]:
constant[Combine samples and per-sample data into a numpy structured array.
Args:
sample (array_like):
Samples, in any form that can be converted into a numpy array.
energy (array_like, required):
Required keyword argument. Energies, in any form that can be converted into a numpy
1-dimensional array.
**kwargs (array_like):
Other per-sample data, in any form that can be converted into a numpy array.
Returns:
:obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', 'num_occurrences', **kwargs]
]
if <ast.UnaryOp object at 0x7da1b0774d30> begin[:]
variable[sample] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b078fbe0>, <ast.Constant object at 0x7da1b078f310>]]]]
<ast.Tuple object at 0x7da1b078fa30> assign[=] name[sample].shape
if compare[constant[num_occurrences] <ast.NotIn object at 0x7da2590d7190> name[vectors]] begin[:]
call[name[vectors]][constant[num_occurrences]] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b078f580>]] * name[num_samples]]
variable[datavectors] assign[=] dictionary[[], []]
variable[datatypes] assign[=] list[[<ast.Tuple object at 0x7da20c993d60>]]
for taget[tuple[[<ast.Name object at 0x7da1b06fc3d0>, <ast.Name object at 0x7da1b06fe860>]]] in starred[call[name[vectors].items, parameter[]]] begin[:]
variable[dtype] assign[=] <ast.IfExp object at 0x7da1b06fe890>
call[name[datavectors]][name[kwarg]] assign[=] call[name[np].asarray, parameter[name[vector], name[dtype]]]
if <ast.BoolOp object at 0x7da1b06ff040> begin[:]
variable[msg] assign[=] call[constant[{} and sample have a mismatched shape {}, {}. They must have the same size in the first axis.].format, parameter[name[kwarg], name[vector].shape, name[sample].shape]]
<ast.Raise object at 0x7da1b06fd330>
call[name[datatypes].append, parameter[tuple[[<ast.Name object at 0x7da1b06fd720>, <ast.Attribute object at 0x7da1b06fcfd0>, <ast.Subscript object at 0x7da1b06fc970>]]]]
if compare[constant[energy] <ast.NotIn object at 0x7da2590d7190> name[datavectors]] begin[:]
<ast.Raise object at 0x7da1b07f6200>
variable[data] assign[=] call[name[np].rec.array, parameter[call[name[np].zeros, parameter[name[num_samples]]]]]
call[name[data]][constant[sample]] assign[=] name[sample]
for taget[tuple[[<ast.Name object at 0x7da1b07f58a0>, <ast.Name object at 0x7da1b07f5090>]]] in starred[call[name[datavectors].items, parameter[]]] begin[:]
call[name[data]][name[kwarg]] assign[=] name[vector]
return[name[data]] | keyword[def] identifier[data_struct_array] ( identifier[sample] ,** identifier[vectors] ):
literal[string]
keyword[if] keyword[not] identifier[len] ( identifier[sample] ):
identifier[sample] = identifier[np] . identifier[zeros] (( literal[int] , literal[int] ), identifier[dtype] = identifier[np] . identifier[int8] )
keyword[else] :
identifier[sample] = identifier[np] . identifier[asarray] ( identifier[sample] , identifier[dtype] = identifier[np] . identifier[int8] )
keyword[if] identifier[sample] . identifier[ndim] < literal[int] :
identifier[sample] = identifier[np] . identifier[expand_dims] ( identifier[sample] , literal[int] )
identifier[num_samples] , identifier[num_variables] = identifier[sample] . identifier[shape]
keyword[if] literal[string] keyword[not] keyword[in] identifier[vectors] :
identifier[vectors] [ literal[string] ]=[ literal[int] ]* identifier[num_samples]
identifier[datavectors] ={}
identifier[datatypes] =[( literal[string] , identifier[np] . identifier[dtype] ( identifier[np] . identifier[int8] ),( identifier[num_variables] ,))]
keyword[for] identifier[kwarg] , identifier[vector] keyword[in] identifier[vectors] . identifier[items] ():
identifier[dtype] = identifier[float] keyword[if] identifier[kwarg] == literal[string] keyword[else] keyword[None]
identifier[datavectors] [ identifier[kwarg] ]= identifier[vector] = identifier[np] . identifier[asarray] ( identifier[vector] , identifier[dtype] )
keyword[if] identifier[len] ( identifier[vector] . identifier[shape] )< literal[int] keyword[or] identifier[vector] . identifier[shape] [ literal[int] ]!= identifier[num_samples] :
identifier[msg] =( literal[string]
literal[string] ). identifier[format] ( identifier[kwarg] , identifier[vector] . identifier[shape] , identifier[sample] . identifier[shape] )
keyword[raise] identifier[ValueError] ( identifier[msg] )
identifier[datatypes] . identifier[append] (( identifier[kwarg] , identifier[vector] . identifier[dtype] , identifier[vector] . identifier[shape] [ literal[int] :]))
keyword[if] literal[string] keyword[not] keyword[in] identifier[datavectors] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[elif] identifier[datavectors] [ literal[string] ]. identifier[shape] !=( identifier[num_samples] ,):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[num_samples] ))
identifier[data] = identifier[np] . identifier[rec] . identifier[array] ( identifier[np] . identifier[zeros] ( identifier[num_samples] , identifier[dtype] = identifier[datatypes] ))
identifier[data] [ literal[string] ]= identifier[sample]
keyword[for] identifier[kwarg] , identifier[vector] keyword[in] identifier[datavectors] . identifier[items] ():
identifier[data] [ identifier[kwarg] ]= identifier[vector]
keyword[return] identifier[data] | def data_struct_array(sample, **vectors): # data_struct_array(sample, *, energy, **vectors):
"Combine samples and per-sample data into a numpy structured array.\n\n Args:\n sample (array_like):\n Samples, in any form that can be converted into a numpy array.\n\n energy (array_like, required):\n Required keyword argument. Energies, in any form that can be converted into a numpy\n 1-dimensional array.\n\n **kwargs (array_like):\n Other per-sample data, in any form that can be converted into a numpy array.\n\n Returns:\n :obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', 'num_occurrences', **kwargs]\n\n "
if not len(sample):
# if samples are empty
sample = np.zeros((0, 0), dtype=np.int8) # depends on [control=['if'], data=[]]
else:
sample = np.asarray(sample, dtype=np.int8)
if sample.ndim < 2:
sample = np.expand_dims(sample, 0) # depends on [control=['if'], data=[]]
(num_samples, num_variables) = sample.shape
if 'num_occurrences' not in vectors:
vectors['num_occurrences'] = [1] * num_samples # depends on [control=['if'], data=['vectors']]
datavectors = {}
datatypes = [('sample', np.dtype(np.int8), (num_variables,))]
for (kwarg, vector) in vectors.items():
dtype = float if kwarg == 'energy' else None
datavectors[kwarg] = vector = np.asarray(vector, dtype)
if len(vector.shape) < 1 or vector.shape[0] != num_samples:
msg = '{} and sample have a mismatched shape {}, {}. They must have the same size in the first axis.'.format(kwarg, vector.shape, sample.shape)
raise ValueError(msg) # depends on [control=['if'], data=[]]
datatypes.append((kwarg, vector.dtype, vector.shape[1:])) # depends on [control=['for'], data=[]]
if 'energy' not in datavectors:
# consistent error with the one thrown in python3
raise TypeError('data_struct_array() needs keyword-only argument energy') # depends on [control=['if'], data=[]]
elif datavectors['energy'].shape != (num_samples,):
raise ValueError('energy should be a vector of length {}'.format(num_samples)) # depends on [control=['if'], data=[]]
data = np.rec.array(np.zeros(num_samples, dtype=datatypes))
data['sample'] = sample
for (kwarg, vector) in datavectors.items():
data[kwarg] = vector # depends on [control=['for'], data=[]]
return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.