code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def get_object(self, group_id, mask=None):
"""Returns a PlacementGroup Object
https://softlayer.github.io/reference/services/SoftLayer_Virtual_PlacementGroup/getObject
"""
if mask is None:
mask = "mask[id, name, createDate, rule, backendRouter[id, hostname]," \
"guests[activeTransaction[id,transactionStatus[name,friendlyName]]]]"
return self.client.call('SoftLayer_Virtual_PlacementGroup', 'getObject', id=group_id, mask=mask) | def function[get_object, parameter[self, group_id, mask]]:
constant[Returns a PlacementGroup Object
https://softlayer.github.io/reference/services/SoftLayer_Virtual_PlacementGroup/getObject
]
if compare[name[mask] is constant[None]] begin[:]
variable[mask] assign[=] constant[mask[id, name, createDate, rule, backendRouter[id, hostname],guests[activeTransaction[id,transactionStatus[name,friendlyName]]]]]
return[call[name[self].client.call, parameter[constant[SoftLayer_Virtual_PlacementGroup], constant[getObject]]]] | keyword[def] identifier[get_object] ( identifier[self] , identifier[group_id] , identifier[mask] = keyword[None] ):
literal[string]
keyword[if] identifier[mask] keyword[is] keyword[None] :
identifier[mask] = literal[string] literal[string]
keyword[return] identifier[self] . identifier[client] . identifier[call] ( literal[string] , literal[string] , identifier[id] = identifier[group_id] , identifier[mask] = identifier[mask] ) | def get_object(self, group_id, mask=None):
"""Returns a PlacementGroup Object
https://softlayer.github.io/reference/services/SoftLayer_Virtual_PlacementGroup/getObject
"""
if mask is None:
mask = 'mask[id, name, createDate, rule, backendRouter[id, hostname],guests[activeTransaction[id,transactionStatus[name,friendlyName]]]]' # depends on [control=['if'], data=['mask']]
return self.client.call('SoftLayer_Virtual_PlacementGroup', 'getObject', id=group_id, mask=mask) |
def load_user_from_request(req):
"""
Just like the Flask.login load_user_from_request
If you need to customize the user loading from your database,
the FlaskBitjws.get_user_by_key method is the one to modify.
:param req: The flask request to load a user based on.
"""
load_jws_from_request(req)
if not hasattr(req, 'jws_header') or req.jws_header is None or not \
'iat' in req.jws_payload:
current_app.logger.info("invalid jws request.")
return None
ln = current_app.bitjws.get_last_nonce(current_app,
req.jws_header['kid'],
req.jws_payload['iat'])
if (ln is None or 'iat' not in req.jws_payload or
req.jws_payload['iat'] * 1000 <= ln):
current_app.logger.info("invalid nonce. lastnonce: %s" % ln)
return None
rawu = current_app.bitjws.get_user_by_key(current_app,
req.jws_header['kid'])
if rawu is None:
return None
current_app.logger.info("logging in user: %s" % rawu)
return FlaskUser(rawu) | def function[load_user_from_request, parameter[req]]:
constant[
Just like the Flask.login load_user_from_request
If you need to customize the user loading from your database,
the FlaskBitjws.get_user_by_key method is the one to modify.
:param req: The flask request to load a user based on.
]
call[name[load_jws_from_request], parameter[name[req]]]
if <ast.BoolOp object at 0x7da1b1471ba0> begin[:]
call[name[current_app].logger.info, parameter[constant[invalid jws request.]]]
return[constant[None]]
variable[ln] assign[=] call[name[current_app].bitjws.get_last_nonce, parameter[name[current_app], call[name[req].jws_header][constant[kid]], call[name[req].jws_payload][constant[iat]]]]
if <ast.BoolOp object at 0x7da20c6aab60> begin[:]
call[name[current_app].logger.info, parameter[binary_operation[constant[invalid nonce. lastnonce: %s] <ast.Mod object at 0x7da2590d6920> name[ln]]]]
return[constant[None]]
variable[rawu] assign[=] call[name[current_app].bitjws.get_user_by_key, parameter[name[current_app], call[name[req].jws_header][constant[kid]]]]
if compare[name[rawu] is constant[None]] begin[:]
return[constant[None]]
call[name[current_app].logger.info, parameter[binary_operation[constant[logging in user: %s] <ast.Mod object at 0x7da2590d6920> name[rawu]]]]
return[call[name[FlaskUser], parameter[name[rawu]]]] | keyword[def] identifier[load_user_from_request] ( identifier[req] ):
literal[string]
identifier[load_jws_from_request] ( identifier[req] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[req] , literal[string] ) keyword[or] identifier[req] . identifier[jws_header] keyword[is] keyword[None] keyword[or] keyword[not] literal[string] keyword[in] identifier[req] . identifier[jws_payload] :
identifier[current_app] . identifier[logger] . identifier[info] ( literal[string] )
keyword[return] keyword[None]
identifier[ln] = identifier[current_app] . identifier[bitjws] . identifier[get_last_nonce] ( identifier[current_app] ,
identifier[req] . identifier[jws_header] [ literal[string] ],
identifier[req] . identifier[jws_payload] [ literal[string] ])
keyword[if] ( identifier[ln] keyword[is] keyword[None] keyword[or] literal[string] keyword[not] keyword[in] identifier[req] . identifier[jws_payload] keyword[or]
identifier[req] . identifier[jws_payload] [ literal[string] ]* literal[int] <= identifier[ln] ):
identifier[current_app] . identifier[logger] . identifier[info] ( literal[string] % identifier[ln] )
keyword[return] keyword[None]
identifier[rawu] = identifier[current_app] . identifier[bitjws] . identifier[get_user_by_key] ( identifier[current_app] ,
identifier[req] . identifier[jws_header] [ literal[string] ])
keyword[if] identifier[rawu] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[current_app] . identifier[logger] . identifier[info] ( literal[string] % identifier[rawu] )
keyword[return] identifier[FlaskUser] ( identifier[rawu] ) | def load_user_from_request(req):
"""
Just like the Flask.login load_user_from_request
If you need to customize the user loading from your database,
the FlaskBitjws.get_user_by_key method is the one to modify.
:param req: The flask request to load a user based on.
"""
load_jws_from_request(req)
if not hasattr(req, 'jws_header') or req.jws_header is None or (not 'iat' in req.jws_payload):
current_app.logger.info('invalid jws request.')
return None # depends on [control=['if'], data=[]]
ln = current_app.bitjws.get_last_nonce(current_app, req.jws_header['kid'], req.jws_payload['iat'])
if ln is None or 'iat' not in req.jws_payload or req.jws_payload['iat'] * 1000 <= ln:
current_app.logger.info('invalid nonce. lastnonce: %s' % ln)
return None # depends on [control=['if'], data=[]]
rawu = current_app.bitjws.get_user_by_key(current_app, req.jws_header['kid'])
if rawu is None:
return None # depends on [control=['if'], data=[]]
current_app.logger.info('logging in user: %s' % rawu)
return FlaskUser(rawu) |
def _scan(self, inputs, outputs, name='scan', step=None, constants=None):
'''Helper method for defining a basic loop in theano.
Parameters
----------
inputs : sequence of theano expressions
Inputs to the scan operation.
outputs : sequence of output specifiers
Specifiers for the outputs of the scan operation. This should be a
sequence containing:
- None for values that are output by the scan but not tapped as
inputs,
- an integer or theano scalar (``ndim == 0``) indicating the batch
size for initial zero state,
- a theano tensor variable (``ndim > 0``) containing initial state
data, or
- a dictionary containing a full output specifier. See
``outputs_info`` in the Theano documentation for ``scan``.
name : str, optional
Name of the scan variable to create. Defaults to ``'scan'``.
step : callable, optional
The callable to apply in the loop. Defaults to :func:`self._step`.
constants : sequence of tensor, optional
A sequence of parameters, if any, needed by the step function.
Returns
-------
output(s) : theano expression(s)
Theano expression(s) representing output(s) from the scan.
updates : sequence of update tuples
A sequence of updates to apply inside a theano function.
'''
init = []
for i, x in enumerate(outputs):
ndim = getattr(x, 'ndim', -1)
if x is None or isinstance(x, dict) or ndim > 0:
init.append(x)
continue
if isinstance(x, int) or ndim == 0:
init.append(TT.repeat(theano.shared(
np.zeros((1, self.output_size), util.FLOAT),
name=self._fmt('init{}'.format(i))), x, axis=0))
continue
raise ValueError('cannot handle input {} for scan!'.format(x))
return theano.scan(
step or self._step,
name=self._fmt(name),
sequences=inputs,
outputs_info=init,
non_sequences=constants,
go_backwards='back' in self.kwargs.get('direction', '').lower(),
truncate_gradient=self.kwargs.get('bptt_limit', -1),
) | def function[_scan, parameter[self, inputs, outputs, name, step, constants]]:
constant[Helper method for defining a basic loop in theano.
Parameters
----------
inputs : sequence of theano expressions
Inputs to the scan operation.
outputs : sequence of output specifiers
Specifiers for the outputs of the scan operation. This should be a
sequence containing:
- None for values that are output by the scan but not tapped as
inputs,
- an integer or theano scalar (``ndim == 0``) indicating the batch
size for initial zero state,
- a theano tensor variable (``ndim > 0``) containing initial state
data, or
- a dictionary containing a full output specifier. See
``outputs_info`` in the Theano documentation for ``scan``.
name : str, optional
Name of the scan variable to create. Defaults to ``'scan'``.
step : callable, optional
The callable to apply in the loop. Defaults to :func:`self._step`.
constants : sequence of tensor, optional
A sequence of parameters, if any, needed by the step function.
Returns
-------
output(s) : theano expression(s)
Theano expression(s) representing output(s) from the scan.
updates : sequence of update tuples
A sequence of updates to apply inside a theano function.
]
variable[init] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0250b80>, <ast.Name object at 0x7da1b0250b50>]]] in starred[call[name[enumerate], parameter[name[outputs]]]] begin[:]
variable[ndim] assign[=] call[name[getattr], parameter[name[x], constant[ndim], <ast.UnaryOp object at 0x7da1b0252530>]]
if <ast.BoolOp object at 0x7da1b02525c0> begin[:]
call[name[init].append, parameter[name[x]]]
continue
if <ast.BoolOp object at 0x7da1b0252980> begin[:]
call[name[init].append, parameter[call[name[TT].repeat, parameter[call[name[theano].shared, parameter[call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b02514e0>, <ast.Attribute object at 0x7da1b0251510>]], name[util].FLOAT]]]], name[x]]]]]
continue
<ast.Raise object at 0x7da1b02505b0>
return[call[name[theano].scan, parameter[<ast.BoolOp object at 0x7da1b02507c0>]]] | keyword[def] identifier[_scan] ( identifier[self] , identifier[inputs] , identifier[outputs] , identifier[name] = literal[string] , identifier[step] = keyword[None] , identifier[constants] = keyword[None] ):
literal[string]
identifier[init] =[]
keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[outputs] ):
identifier[ndim] = identifier[getattr] ( identifier[x] , literal[string] ,- literal[int] )
keyword[if] identifier[x] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[x] , identifier[dict] ) keyword[or] identifier[ndim] > literal[int] :
identifier[init] . identifier[append] ( identifier[x] )
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[x] , identifier[int] ) keyword[or] identifier[ndim] == literal[int] :
identifier[init] . identifier[append] ( identifier[TT] . identifier[repeat] ( identifier[theano] . identifier[shared] (
identifier[np] . identifier[zeros] (( literal[int] , identifier[self] . identifier[output_size] ), identifier[util] . identifier[FLOAT] ),
identifier[name] = identifier[self] . identifier[_fmt] ( literal[string] . identifier[format] ( identifier[i] ))), identifier[x] , identifier[axis] = literal[int] ))
keyword[continue]
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[x] ))
keyword[return] identifier[theano] . identifier[scan] (
identifier[step] keyword[or] identifier[self] . identifier[_step] ,
identifier[name] = identifier[self] . identifier[_fmt] ( identifier[name] ),
identifier[sequences] = identifier[inputs] ,
identifier[outputs_info] = identifier[init] ,
identifier[non_sequences] = identifier[constants] ,
identifier[go_backwards] = literal[string] keyword[in] identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ). identifier[lower] (),
identifier[truncate_gradient] = identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] ,- literal[int] ),
) | def _scan(self, inputs, outputs, name='scan', step=None, constants=None):
"""Helper method for defining a basic loop in theano.
Parameters
----------
inputs : sequence of theano expressions
Inputs to the scan operation.
outputs : sequence of output specifiers
Specifiers for the outputs of the scan operation. This should be a
sequence containing:
- None for values that are output by the scan but not tapped as
inputs,
- an integer or theano scalar (``ndim == 0``) indicating the batch
size for initial zero state,
- a theano tensor variable (``ndim > 0``) containing initial state
data, or
- a dictionary containing a full output specifier. See
``outputs_info`` in the Theano documentation for ``scan``.
name : str, optional
Name of the scan variable to create. Defaults to ``'scan'``.
step : callable, optional
The callable to apply in the loop. Defaults to :func:`self._step`.
constants : sequence of tensor, optional
A sequence of parameters, if any, needed by the step function.
Returns
-------
output(s) : theano expression(s)
Theano expression(s) representing output(s) from the scan.
updates : sequence of update tuples
A sequence of updates to apply inside a theano function.
"""
init = []
for (i, x) in enumerate(outputs):
ndim = getattr(x, 'ndim', -1)
if x is None or isinstance(x, dict) or ndim > 0:
init.append(x)
continue # depends on [control=['if'], data=[]]
if isinstance(x, int) or ndim == 0:
init.append(TT.repeat(theano.shared(np.zeros((1, self.output_size), util.FLOAT), name=self._fmt('init{}'.format(i))), x, axis=0))
continue # depends on [control=['if'], data=[]]
raise ValueError('cannot handle input {} for scan!'.format(x)) # depends on [control=['for'], data=[]]
return theano.scan(step or self._step, name=self._fmt(name), sequences=inputs, outputs_info=init, non_sequences=constants, go_backwards='back' in self.kwargs.get('direction', '').lower(), truncate_gradient=self.kwargs.get('bptt_limit', -1)) |
def is_valid(self):
''' Validate form.
Return True if Django validates the form, the username obeys the parameters, and passwords match.
Return False otherwise.
'''
if not super(DeleteUserForm, self).is_valid():
return False
if self.user == self.request.user:
self._errors["__all__"] = self.error_class([MESSAGES['SELF_DELETE']])
return False
return True | def function[is_valid, parameter[self]]:
constant[ Validate form.
Return True if Django validates the form, the username obeys the parameters, and passwords match.
Return False otherwise.
]
if <ast.UnaryOp object at 0x7da204345480> begin[:]
return[constant[False]]
if compare[name[self].user equal[==] name[self].request.user] begin[:]
call[name[self]._errors][constant[__all__]] assign[=] call[name[self].error_class, parameter[list[[<ast.Subscript object at 0x7da204566590>]]]]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_valid] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[super] ( identifier[DeleteUserForm] , identifier[self] ). identifier[is_valid] ():
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[user] == identifier[self] . identifier[request] . identifier[user] :
identifier[self] . identifier[_errors] [ literal[string] ]= identifier[self] . identifier[error_class] ([ identifier[MESSAGES] [ literal[string] ]])
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_valid(self):
""" Validate form.
Return True if Django validates the form, the username obeys the parameters, and passwords match.
Return False otherwise.
"""
if not super(DeleteUserForm, self).is_valid():
return False # depends on [control=['if'], data=[]]
if self.user == self.request.user:
self._errors['__all__'] = self.error_class([MESSAGES['SELF_DELETE']])
return False # depends on [control=['if'], data=[]]
return True |
def smooth(y, radius, mode='two_sided', valid_only=False):
'''
Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available
'''
assert mode in ('two_sided', 'causal')
if len(y) < 2*radius+1:
return np.ones_like(y) * y.mean()
elif mode == 'two_sided':
convkernel = np.ones(2 * radius+1)
out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same')
if valid_only:
out[:radius] = out[-radius:] = np.nan
elif mode == 'causal':
convkernel = np.ones(radius)
out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full')
out = out[:-radius+1]
if valid_only:
out[:radius] = np.nan
return out | def function[smooth, parameter[y, radius, mode, valid_only]]:
constant[
Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available
]
assert[compare[name[mode] in tuple[[<ast.Constant object at 0x7da1b0210f70>, <ast.Constant object at 0x7da1b0210550>]]]]
if compare[call[name[len], parameter[name[y]]] less[<] binary_operation[binary_operation[constant[2] * name[radius]] + constant[1]]] begin[:]
return[binary_operation[call[name[np].ones_like, parameter[name[y]]] * call[name[y].mean, parameter[]]]]
return[name[out]] | keyword[def] identifier[smooth] ( identifier[y] , identifier[radius] , identifier[mode] = literal[string] , identifier[valid_only] = keyword[False] ):
literal[string]
keyword[assert] identifier[mode] keyword[in] ( literal[string] , literal[string] )
keyword[if] identifier[len] ( identifier[y] )< literal[int] * identifier[radius] + literal[int] :
keyword[return] identifier[np] . identifier[ones_like] ( identifier[y] )* identifier[y] . identifier[mean] ()
keyword[elif] identifier[mode] == literal[string] :
identifier[convkernel] = identifier[np] . identifier[ones] ( literal[int] * identifier[radius] + literal[int] )
identifier[out] = identifier[np] . identifier[convolve] ( identifier[y] , identifier[convkernel] , identifier[mode] = literal[string] )/ identifier[np] . identifier[convolve] ( identifier[np] . identifier[ones_like] ( identifier[y] ), identifier[convkernel] , identifier[mode] = literal[string] )
keyword[if] identifier[valid_only] :
identifier[out] [: identifier[radius] ]= identifier[out] [- identifier[radius] :]= identifier[np] . identifier[nan]
keyword[elif] identifier[mode] == literal[string] :
identifier[convkernel] = identifier[np] . identifier[ones] ( identifier[radius] )
identifier[out] = identifier[np] . identifier[convolve] ( identifier[y] , identifier[convkernel] , identifier[mode] = literal[string] )/ identifier[np] . identifier[convolve] ( identifier[np] . identifier[ones_like] ( identifier[y] ), identifier[convkernel] , identifier[mode] = literal[string] )
identifier[out] = identifier[out] [:- identifier[radius] + literal[int] ]
keyword[if] identifier[valid_only] :
identifier[out] [: identifier[radius] ]= identifier[np] . identifier[nan]
keyword[return] identifier[out] | def smooth(y, radius, mode='two_sided', valid_only=False):
"""
Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available
"""
assert mode in ('two_sided', 'causal')
if len(y) < 2 * radius + 1:
return np.ones_like(y) * y.mean() # depends on [control=['if'], data=[]]
elif mode == 'two_sided':
convkernel = np.ones(2 * radius + 1)
out = np.convolve(y, convkernel, mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same')
if valid_only:
out[:radius] = out[-radius:] = np.nan # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif mode == 'causal':
convkernel = np.ones(radius)
out = np.convolve(y, convkernel, mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full')
out = out[:-radius + 1]
if valid_only:
out[:radius] = np.nan # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return out |
def get_recurring_bill_by_client(self, *, customer_id, date_begin=None, date_final=None):
"""
Consulta de las facturas que están pagadas o pendientes por pagar. Se puede consultar por cliente,
por suscripción o por rango de fechas.
Args:
customer_id:
date_begin:
date_final:
Returns:
"""
params = {
"customerId": customer_id,
}
if date_begin and date_final:
params['dateBegin'] = date_begin.strftime('%Y-%m-%d')
params['dateFinal'] = date_final.strftime('%Y-%m-%d')
return self.client._get(self.url + 'recurringBill', params=params, headers=self.get_headers()) | def function[get_recurring_bill_by_client, parameter[self]]:
constant[
Consulta de las facturas que están pagadas o pendientes por pagar. Se puede consultar por cliente,
por suscripción o por rango de fechas.
Args:
customer_id:
date_begin:
date_final:
Returns:
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc9d80>], [<ast.Name object at 0x7da18bccb9d0>]]
if <ast.BoolOp object at 0x7da18bcc91b0> begin[:]
call[name[params]][constant[dateBegin]] assign[=] call[name[date_begin].strftime, parameter[constant[%Y-%m-%d]]]
call[name[params]][constant[dateFinal]] assign[=] call[name[date_final].strftime, parameter[constant[%Y-%m-%d]]]
return[call[name[self].client._get, parameter[binary_operation[name[self].url + constant[recurringBill]]]]] | keyword[def] identifier[get_recurring_bill_by_client] ( identifier[self] ,*, identifier[customer_id] , identifier[date_begin] = keyword[None] , identifier[date_final] = keyword[None] ):
literal[string]
identifier[params] ={
literal[string] : identifier[customer_id] ,
}
keyword[if] identifier[date_begin] keyword[and] identifier[date_final] :
identifier[params] [ literal[string] ]= identifier[date_begin] . identifier[strftime] ( literal[string] )
identifier[params] [ literal[string] ]= identifier[date_final] . identifier[strftime] ( literal[string] )
keyword[return] identifier[self] . identifier[client] . identifier[_get] ( identifier[self] . identifier[url] + literal[string] , identifier[params] = identifier[params] , identifier[headers] = identifier[self] . identifier[get_headers] ()) | def get_recurring_bill_by_client(self, *, customer_id, date_begin=None, date_final=None):
"""
Consulta de las facturas que están pagadas o pendientes por pagar. Se puede consultar por cliente,
por suscripción o por rango de fechas.
Args:
customer_id:
date_begin:
date_final:
Returns:
"""
params = {'customerId': customer_id}
if date_begin and date_final:
params['dateBegin'] = date_begin.strftime('%Y-%m-%d')
params['dateFinal'] = date_final.strftime('%Y-%m-%d') # depends on [control=['if'], data=[]]
return self.client._get(self.url + 'recurringBill', params=params, headers=self.get_headers()) |
def pac_context_for_url(url, proxy_auth=None):
"""
This context manager provides a simple way to add rudimentary PAC functionality
to code that cannot be modified to use :class:`PACSession`,
but obeys the ``HTTP_PROXY`` and ``HTTPS_PROXY`` environment variables.
Upon entering this context, PAC discovery occurs with default parameters.
If a PAC is found, then it's asked for the proxy to use for the given URL.
The proxy environment variables are then set accordingly.
Note that this provides a very simplified PAC experience that's insufficient for some scenarios.
:param url: Consult the PAC for the proxy to use for this URL.
:param requests.auth.HTTPProxyAuth proxy_auth: Username and password proxy authentication.
"""
prev_http_proxy, prev_https_proxy = os.environ.get('HTTP_PROXY'), os.environ.get('HTTPS_PROXY')
pac = get_pac()
if pac:
resolver = ProxyResolver(pac, proxy_auth=proxy_auth)
proxies = resolver.get_proxy_for_requests(url)
# Cannot set None for environ. (#27)
os.environ['HTTP_PROXY'] = proxies.get('http') or ''
os.environ['HTTPS_PROXY'] = proxies.get('https') or ''
yield
if prev_http_proxy:
os.environ['HTTP_PROXY'] = prev_http_proxy
elif 'HTTP_PROXY' in os.environ:
del os.environ['HTTP_PROXY']
if prev_https_proxy:
os.environ['HTTPS_PROXY'] = prev_https_proxy
elif 'HTTPS_PROXY' in os.environ:
del os.environ['HTTPS_PROXY'] | def function[pac_context_for_url, parameter[url, proxy_auth]]:
constant[
This context manager provides a simple way to add rudimentary PAC functionality
to code that cannot be modified to use :class:`PACSession`,
but obeys the ``HTTP_PROXY`` and ``HTTPS_PROXY`` environment variables.
Upon entering this context, PAC discovery occurs with default parameters.
If a PAC is found, then it's asked for the proxy to use for the given URL.
The proxy environment variables are then set accordingly.
Note that this provides a very simplified PAC experience that's insufficient for some scenarios.
:param url: Consult the PAC for the proxy to use for this URL.
:param requests.auth.HTTPProxyAuth proxy_auth: Username and password proxy authentication.
]
<ast.Tuple object at 0x7da20c795390> assign[=] tuple[[<ast.Call object at 0x7da20c794310>, <ast.Call object at 0x7da204564130>]]
variable[pac] assign[=] call[name[get_pac], parameter[]]
if name[pac] begin[:]
variable[resolver] assign[=] call[name[ProxyResolver], parameter[name[pac]]]
variable[proxies] assign[=] call[name[resolver].get_proxy_for_requests, parameter[name[url]]]
call[name[os].environ][constant[HTTP_PROXY]] assign[=] <ast.BoolOp object at 0x7da204565540>
call[name[os].environ][constant[HTTPS_PROXY]] assign[=] <ast.BoolOp object at 0x7da204567d30>
<ast.Yield object at 0x7da204565330>
if name[prev_http_proxy] begin[:]
call[name[os].environ][constant[HTTP_PROXY]] assign[=] name[prev_http_proxy]
if name[prev_https_proxy] begin[:]
call[name[os].environ][constant[HTTPS_PROXY]] assign[=] name[prev_https_proxy] | keyword[def] identifier[pac_context_for_url] ( identifier[url] , identifier[proxy_auth] = keyword[None] ):
literal[string]
identifier[prev_http_proxy] , identifier[prev_https_proxy] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] ), identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
identifier[pac] = identifier[get_pac] ()
keyword[if] identifier[pac] :
identifier[resolver] = identifier[ProxyResolver] ( identifier[pac] , identifier[proxy_auth] = identifier[proxy_auth] )
identifier[proxies] = identifier[resolver] . identifier[get_proxy_for_requests] ( identifier[url] )
identifier[os] . identifier[environ] [ literal[string] ]= identifier[proxies] . identifier[get] ( literal[string] ) keyword[or] literal[string]
identifier[os] . identifier[environ] [ literal[string] ]= identifier[proxies] . identifier[get] ( literal[string] ) keyword[or] literal[string]
keyword[yield]
keyword[if] identifier[prev_http_proxy] :
identifier[os] . identifier[environ] [ literal[string] ]= identifier[prev_http_proxy]
keyword[elif] literal[string] keyword[in] identifier[os] . identifier[environ] :
keyword[del] identifier[os] . identifier[environ] [ literal[string] ]
keyword[if] identifier[prev_https_proxy] :
identifier[os] . identifier[environ] [ literal[string] ]= identifier[prev_https_proxy]
keyword[elif] literal[string] keyword[in] identifier[os] . identifier[environ] :
keyword[del] identifier[os] . identifier[environ] [ literal[string] ] | def pac_context_for_url(url, proxy_auth=None):
"""
This context manager provides a simple way to add rudimentary PAC functionality
to code that cannot be modified to use :class:`PACSession`,
but obeys the ``HTTP_PROXY`` and ``HTTPS_PROXY`` environment variables.
Upon entering this context, PAC discovery occurs with default parameters.
If a PAC is found, then it's asked for the proxy to use for the given URL.
The proxy environment variables are then set accordingly.
Note that this provides a very simplified PAC experience that's insufficient for some scenarios.
:param url: Consult the PAC for the proxy to use for this URL.
:param requests.auth.HTTPProxyAuth proxy_auth: Username and password proxy authentication.
"""
(prev_http_proxy, prev_https_proxy) = (os.environ.get('HTTP_PROXY'), os.environ.get('HTTPS_PROXY'))
pac = get_pac()
if pac:
resolver = ProxyResolver(pac, proxy_auth=proxy_auth)
proxies = resolver.get_proxy_for_requests(url) # Cannot set None for environ. (#27)
os.environ['HTTP_PROXY'] = proxies.get('http') or ''
os.environ['HTTPS_PROXY'] = proxies.get('https') or '' # depends on [control=['if'], data=[]]
yield
if prev_http_proxy:
os.environ['HTTP_PROXY'] = prev_http_proxy # depends on [control=['if'], data=[]]
elif 'HTTP_PROXY' in os.environ:
del os.environ['HTTP_PROXY'] # depends on [control=['if'], data=[]]
if prev_https_proxy:
os.environ['HTTPS_PROXY'] = prev_https_proxy # depends on [control=['if'], data=[]]
elif 'HTTPS_PROXY' in os.environ:
del os.environ['HTTPS_PROXY'] # depends on [control=['if'], data=[]] |
def set_limit_overrides(self, override_dict, override_ta=True):
"""
Set manual overrides on AWS service limits, i.e. if you
had limits increased by AWS support. This takes a dict in
the same form as that returned by :py:meth:`~.get_limits`,
i.e. service_name (str) keys to nested dict of limit_name
(str) to limit value (int) like:
::
{
'EC2': {
'Running On-Demand t2.micro Instances': 1000,
'Running On-Demand r3.4xlarge Instances': 1000,
}
}
Internally, for each limit override for each service in
``override_dict``, this method calls
:py:meth:`._AwsService.set_limit_override` on the corresponding
_AwsService instance.
Explicitly set limit overrides using this method will take
precedence over default limits. They will also take precedence over
limit information obtained via Trusted Advisor, unless ``override_ta``
is set to ``False``.
:param override_dict: dict of overrides to default limits
:type override_dict: dict
:param override_ta: whether or not to use this value even if Trusted
Advisor supplies limit information
:type override_ta: bool
:raises: :py:exc:`ValueError` if limit_name is not known to the
service instance
"""
for svc_name in override_dict:
for lim_name in override_dict[svc_name]:
self.services[svc_name].set_limit_override(
lim_name,
override_dict[svc_name][lim_name],
override_ta=override_ta
) | def function[set_limit_overrides, parameter[self, override_dict, override_ta]]:
constant[
Set manual overrides on AWS service limits, i.e. if you
had limits increased by AWS support. This takes a dict in
the same form as that returned by :py:meth:`~.get_limits`,
i.e. service_name (str) keys to nested dict of limit_name
(str) to limit value (int) like:
::
{
'EC2': {
'Running On-Demand t2.micro Instances': 1000,
'Running On-Demand r3.4xlarge Instances': 1000,
}
}
Internally, for each limit override for each service in
``override_dict``, this method calls
:py:meth:`._AwsService.set_limit_override` on the corresponding
_AwsService instance.
Explicitly set limit overrides using this method will take
precedence over default limits. They will also take precedence over
limit information obtained via Trusted Advisor, unless ``override_ta``
is set to ``False``.
:param override_dict: dict of overrides to default limits
:type override_dict: dict
:param override_ta: whether or not to use this value even if Trusted
Advisor supplies limit information
:type override_ta: bool
:raises: :py:exc:`ValueError` if limit_name is not known to the
service instance
]
for taget[name[svc_name]] in starred[name[override_dict]] begin[:]
for taget[name[lim_name]] in starred[call[name[override_dict]][name[svc_name]]] begin[:]
call[call[name[self].services][name[svc_name]].set_limit_override, parameter[name[lim_name], call[call[name[override_dict]][name[svc_name]]][name[lim_name]]]] | keyword[def] identifier[set_limit_overrides] ( identifier[self] , identifier[override_dict] , identifier[override_ta] = keyword[True] ):
literal[string]
keyword[for] identifier[svc_name] keyword[in] identifier[override_dict] :
keyword[for] identifier[lim_name] keyword[in] identifier[override_dict] [ identifier[svc_name] ]:
identifier[self] . identifier[services] [ identifier[svc_name] ]. identifier[set_limit_override] (
identifier[lim_name] ,
identifier[override_dict] [ identifier[svc_name] ][ identifier[lim_name] ],
identifier[override_ta] = identifier[override_ta]
) | def set_limit_overrides(self, override_dict, override_ta=True):
"""
Set manual overrides on AWS service limits, i.e. if you
had limits increased by AWS support. This takes a dict in
the same form as that returned by :py:meth:`~.get_limits`,
i.e. service_name (str) keys to nested dict of limit_name
(str) to limit value (int) like:
::
{
'EC2': {
'Running On-Demand t2.micro Instances': 1000,
'Running On-Demand r3.4xlarge Instances': 1000,
}
}
Internally, for each limit override for each service in
``override_dict``, this method calls
:py:meth:`._AwsService.set_limit_override` on the corresponding
_AwsService instance.
Explicitly set limit overrides using this method will take
precedence over default limits. They will also take precedence over
limit information obtained via Trusted Advisor, unless ``override_ta``
is set to ``False``.
:param override_dict: dict of overrides to default limits
:type override_dict: dict
:param override_ta: whether or not to use this value even if Trusted
Advisor supplies limit information
:type override_ta: bool
:raises: :py:exc:`ValueError` if limit_name is not known to the
service instance
"""
for svc_name in override_dict:
for lim_name in override_dict[svc_name]:
self.services[svc_name].set_limit_override(lim_name, override_dict[svc_name][lim_name], override_ta=override_ta) # depends on [control=['for'], data=['lim_name']] # depends on [control=['for'], data=['svc_name']] |
def get_all_teams(self, mine=None, top=None, skip=None):
"""GetAllTeams.
[Preview API] Get a list of all teams.
:param bool mine: If true return all the teams requesting user is member, otherwise return all the teams user has read access
:param int top: Maximum number of teams to return.
:param int skip: Number of teams to skip.
:rtype: [WebApiTeam]
"""
query_parameters = {}
if mine is not None:
query_parameters['$mine'] = self._serialize.query('mine', mine, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='7a4d9ee9-3433-4347-b47a-7a80f1cf307e',
version='5.0-preview.2',
query_parameters=query_parameters)
return self._deserialize('[WebApiTeam]', self._unwrap_collection(response)) | def function[get_all_teams, parameter[self, mine, top, skip]]:
constant[GetAllTeams.
[Preview API] Get a list of all teams.
:param bool mine: If true return all the teams requesting user is member, otherwise return all the teams user has read access
:param int top: Maximum number of teams to return.
:param int skip: Number of teams to skip.
:rtype: [WebApiTeam]
]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[mine] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[$mine]] assign[=] call[name[self]._serialize.query, parameter[constant[mine], name[mine], constant[bool]]]
if compare[name[top] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[$top]] assign[=] call[name[self]._serialize.query, parameter[constant[top], name[top], constant[int]]]
if compare[name[skip] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[$skip]] assign[=] call[name[self]._serialize.query, parameter[constant[skip], name[skip], constant[int]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[[WebApiTeam]], call[name[self]._unwrap_collection, parameter[name[response]]]]]] | keyword[def] identifier[get_all_teams] ( identifier[self] , identifier[mine] = keyword[None] , identifier[top] = keyword[None] , identifier[skip] = keyword[None] ):
literal[string]
identifier[query_parameters] ={}
keyword[if] identifier[mine] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[mine] , literal[string] )
keyword[if] identifier[top] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[top] , literal[string] )
keyword[if] identifier[skip] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[skip] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[query_parameters] = identifier[query_parameters] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[self] . identifier[_unwrap_collection] ( identifier[response] )) | def get_all_teams(self, mine=None, top=None, skip=None):
"""GetAllTeams.
[Preview API] Get a list of all teams.
:param bool mine: If true return all the teams requesting user is member, otherwise return all the teams user has read access
:param int top: Maximum number of teams to return.
:param int skip: Number of teams to skip.
:rtype: [WebApiTeam]
"""
query_parameters = {}
if mine is not None:
query_parameters['$mine'] = self._serialize.query('mine', mine, 'bool') # depends on [control=['if'], data=['mine']]
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int') # depends on [control=['if'], data=['top']]
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') # depends on [control=['if'], data=['skip']]
response = self._send(http_method='GET', location_id='7a4d9ee9-3433-4347-b47a-7a80f1cf307e', version='5.0-preview.2', query_parameters=query_parameters)
return self._deserialize('[WebApiTeam]', self._unwrap_collection(response)) |
def vote_count(self):
"""
Returns the total number of votes cast across all the
poll's options.
"""
return Vote.objects.filter(
content_type=ContentType.objects.get(app_label='poll', model='polloption'),
object_id__in=[o.id for o in self.polloption_set.all()]
).aggregate(Sum('vote'))['vote__sum'] or 0 | def function[vote_count, parameter[self]]:
constant[
Returns the total number of votes cast across all the
poll's options.
]
return[<ast.BoolOp object at 0x7da1b14d2e60>] | keyword[def] identifier[vote_count] ( identifier[self] ):
literal[string]
keyword[return] identifier[Vote] . identifier[objects] . identifier[filter] (
identifier[content_type] = identifier[ContentType] . identifier[objects] . identifier[get] ( identifier[app_label] = literal[string] , identifier[model] = literal[string] ),
identifier[object_id__in] =[ identifier[o] . identifier[id] keyword[for] identifier[o] keyword[in] identifier[self] . identifier[polloption_set] . identifier[all] ()]
). identifier[aggregate] ( identifier[Sum] ( literal[string] ))[ literal[string] ] keyword[or] literal[int] | def vote_count(self):
"""
Returns the total number of votes cast across all the
poll's options.
"""
return Vote.objects.filter(content_type=ContentType.objects.get(app_label='poll', model='polloption'), object_id__in=[o.id for o in self.polloption_set.all()]).aggregate(Sum('vote'))['vote__sum'] or 0 |
def send(_dict, addr):
"""
Sends key/value pairs via UDP.
>>> StatsdClient.send({"example.send":"11|c"}, ("127.0.0.1", 8125))
"""
# TODO(rbtz@): IPv6 support
udp_sock = socket(AF_INET, SOCK_DGRAM)
# TODO(rbtz@): Add batch support
for item in _dict.items():
udp_sock.sendto(":".join(item).encode('utf-8'), addr) | def function[send, parameter[_dict, addr]]:
constant[
Sends key/value pairs via UDP.
>>> StatsdClient.send({"example.send":"11|c"}, ("127.0.0.1", 8125))
]
variable[udp_sock] assign[=] call[name[socket], parameter[name[AF_INET], name[SOCK_DGRAM]]]
for taget[name[item]] in starred[call[name[_dict].items, parameter[]]] begin[:]
call[name[udp_sock].sendto, parameter[call[call[constant[:].join, parameter[name[item]]].encode, parameter[constant[utf-8]]], name[addr]]] | keyword[def] identifier[send] ( identifier[_dict] , identifier[addr] ):
literal[string]
identifier[udp_sock] = identifier[socket] ( identifier[AF_INET] , identifier[SOCK_DGRAM] )
keyword[for] identifier[item] keyword[in] identifier[_dict] . identifier[items] ():
identifier[udp_sock] . identifier[sendto] ( literal[string] . identifier[join] ( identifier[item] ). identifier[encode] ( literal[string] ), identifier[addr] ) | def send(_dict, addr):
"""
Sends key/value pairs via UDP.
>>> StatsdClient.send({"example.send":"11|c"}, ("127.0.0.1", 8125))
"""
# TODO(rbtz@): IPv6 support
udp_sock = socket(AF_INET, SOCK_DGRAM)
# TODO(rbtz@): Add batch support
for item in _dict.items():
udp_sock.sendto(':'.join(item).encode('utf-8'), addr) # depends on [control=['for'], data=['item']] |
def list_namespaced_pod(self, namespace, **kwargs):
"""
list or watch objects of kind Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_pod(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1PodList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_pod_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_pod_with_http_info(namespace, **kwargs)
return data | def function[list_namespaced_pod, parameter[self, namespace]]:
constant[
list or watch objects of kind Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_pod(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1PodList
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].list_namespaced_pod_with_http_info, parameter[name[namespace]]]] | keyword[def] identifier[list_namespaced_pod] ( identifier[self] , identifier[namespace] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[list_namespaced_pod_with_http_info] ( identifier[namespace] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[list_namespaced_pod_with_http_info] ( identifier[namespace] ,** identifier[kwargs] )
keyword[return] identifier[data] | def list_namespaced_pod(self, namespace, **kwargs):
"""
list or watch objects of kind Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_pod(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1PodList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_pod_with_http_info(namespace, **kwargs) # depends on [control=['if'], data=[]]
else:
data = self.list_namespaced_pod_with_http_info(namespace, **kwargs)
return data |
def add_task(self, fn, inputs=None, outputs=None):
"""
Adds a task to the workflow.
Returns self to facilitate chaining method calls
"""
# self.tasks.append({'task': task, 'inputs': inputs, 'outputs': outputs})
self.tasks.append(Task(fn, inputs, outputs))
return self | def function[add_task, parameter[self, fn, inputs, outputs]]:
constant[
Adds a task to the workflow.
Returns self to facilitate chaining method calls
]
call[name[self].tasks.append, parameter[call[name[Task], parameter[name[fn], name[inputs], name[outputs]]]]]
return[name[self]] | keyword[def] identifier[add_task] ( identifier[self] , identifier[fn] , identifier[inputs] = keyword[None] , identifier[outputs] = keyword[None] ):
literal[string]
identifier[self] . identifier[tasks] . identifier[append] ( identifier[Task] ( identifier[fn] , identifier[inputs] , identifier[outputs] ))
keyword[return] identifier[self] | def add_task(self, fn, inputs=None, outputs=None):
"""
Adds a task to the workflow.
Returns self to facilitate chaining method calls
"""
# self.tasks.append({'task': task, 'inputs': inputs, 'outputs': outputs})
self.tasks.append(Task(fn, inputs, outputs))
return self |
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
argstr = ''
if self.component:
argstr += 'component="{0}" '.format(self.component)
if self.component_type:
argstr += 'componentType="{0}" '.format(self.component_type)
if self.number:
argstr += 'number="{0}" '.format(self.number)
if self.assignments:
chxmlstr = ''
for assign in self.assignments:
chxmlstr += assign.toxml()
return '<MultiInstantiate {0}>{1}</MultiInstantiate>'.format(argstr, chxmlstr)
else:
return '<MultiInstantiate {0}/>'.format(argstr) | def function[toxml, parameter[self]]:
constant[
Exports this object into a LEMS XML object
]
variable[argstr] assign[=] constant[]
if name[self].component begin[:]
<ast.AugAssign object at 0x7da1b234b910>
if name[self].component_type begin[:]
<ast.AugAssign object at 0x7da1b234be50>
if name[self].number begin[:]
<ast.AugAssign object at 0x7da1b236b220>
if name[self].assignments begin[:]
variable[chxmlstr] assign[=] constant[]
for taget[name[assign]] in starred[name[self].assignments] begin[:]
<ast.AugAssign object at 0x7da1b2368d60>
return[call[constant[<MultiInstantiate {0}>{1}</MultiInstantiate>].format, parameter[name[argstr], name[chxmlstr]]]] | keyword[def] identifier[toxml] ( identifier[self] ):
literal[string]
identifier[argstr] = literal[string]
keyword[if] identifier[self] . identifier[component] :
identifier[argstr] += literal[string] . identifier[format] ( identifier[self] . identifier[component] )
keyword[if] identifier[self] . identifier[component_type] :
identifier[argstr] += literal[string] . identifier[format] ( identifier[self] . identifier[component_type] )
keyword[if] identifier[self] . identifier[number] :
identifier[argstr] += literal[string] . identifier[format] ( identifier[self] . identifier[number] )
keyword[if] identifier[self] . identifier[assignments] :
identifier[chxmlstr] = literal[string]
keyword[for] identifier[assign] keyword[in] identifier[self] . identifier[assignments] :
identifier[chxmlstr] += identifier[assign] . identifier[toxml] ()
keyword[return] literal[string] . identifier[format] ( identifier[argstr] , identifier[chxmlstr] )
keyword[else] :
keyword[return] literal[string] . identifier[format] ( identifier[argstr] ) | def toxml(self):
"""
Exports this object into a LEMS XML object
"""
argstr = ''
if self.component:
argstr += 'component="{0}" '.format(self.component) # depends on [control=['if'], data=[]]
if self.component_type:
argstr += 'componentType="{0}" '.format(self.component_type) # depends on [control=['if'], data=[]]
if self.number:
argstr += 'number="{0}" '.format(self.number) # depends on [control=['if'], data=[]]
if self.assignments:
chxmlstr = ''
for assign in self.assignments:
chxmlstr += assign.toxml() # depends on [control=['for'], data=['assign']]
return '<MultiInstantiate {0}>{1}</MultiInstantiate>'.format(argstr, chxmlstr) # depends on [control=['if'], data=[]]
else:
return '<MultiInstantiate {0}/>'.format(argstr) |
def get_distinct_values_from_cols(self, l_col_list):
"""
returns the list of distinct combinations in a dataset
based on the columns in the list. Note that this is
currently implemented as MAX permutations of the combo
so it is not guarenteed to have values in each case.
"""
uniq_vals = []
for l_col_name in l_col_list:
#print('col_name: ' + l_col_name)
uniq_vals.append(set(self.get_col_data_by_name(l_col_name)))
#print(' unique values = ', uniq_vals)
#print(' unique values[0] = ', uniq_vals[0])
#print(' unique values[1] = ', uniq_vals[1])
if len(l_col_list) == 0:
return []
elif len(l_col_list) == 1:
return sorted([v for v in uniq_vals])
elif len(l_col_list) == 2:
res = []
res = [(a, b) for a in uniq_vals[0] for b in uniq_vals[1]]
return res
else:
print ("TODO ")
return -44 | def function[get_distinct_values_from_cols, parameter[self, l_col_list]]:
constant[
returns the list of distinct combinations in a dataset
based on the columns in the list. Note that this is
currently implemented as MAX permutations of the combo
so it is not guarenteed to have values in each case.
]
variable[uniq_vals] assign[=] list[[]]
for taget[name[l_col_name]] in starred[name[l_col_list]] begin[:]
call[name[uniq_vals].append, parameter[call[name[set], parameter[call[name[self].get_col_data_by_name, parameter[name[l_col_name]]]]]]]
if compare[call[name[len], parameter[name[l_col_list]]] equal[==] constant[0]] begin[:]
return[list[[]]] | keyword[def] identifier[get_distinct_values_from_cols] ( identifier[self] , identifier[l_col_list] ):
literal[string]
identifier[uniq_vals] =[]
keyword[for] identifier[l_col_name] keyword[in] identifier[l_col_list] :
identifier[uniq_vals] . identifier[append] ( identifier[set] ( identifier[self] . identifier[get_col_data_by_name] ( identifier[l_col_name] )))
keyword[if] identifier[len] ( identifier[l_col_list] )== literal[int] :
keyword[return] []
keyword[elif] identifier[len] ( identifier[l_col_list] )== literal[int] :
keyword[return] identifier[sorted] ([ identifier[v] keyword[for] identifier[v] keyword[in] identifier[uniq_vals] ])
keyword[elif] identifier[len] ( identifier[l_col_list] )== literal[int] :
identifier[res] =[]
identifier[res] =[( identifier[a] , identifier[b] ) keyword[for] identifier[a] keyword[in] identifier[uniq_vals] [ literal[int] ] keyword[for] identifier[b] keyword[in] identifier[uniq_vals] [ literal[int] ]]
keyword[return] identifier[res]
keyword[else] :
identifier[print] ( literal[string] )
keyword[return] - literal[int] | def get_distinct_values_from_cols(self, l_col_list):
"""
returns the list of distinct combinations in a dataset
based on the columns in the list. Note that this is
currently implemented as MAX permutations of the combo
so it is not guarenteed to have values in each case.
"""
uniq_vals = []
for l_col_name in l_col_list: #print('col_name: ' + l_col_name)
uniq_vals.append(set(self.get_col_data_by_name(l_col_name))) # depends on [control=['for'], data=['l_col_name']] #print(' unique values = ', uniq_vals)
#print(' unique values[0] = ', uniq_vals[0])
#print(' unique values[1] = ', uniq_vals[1])
if len(l_col_list) == 0:
return [] # depends on [control=['if'], data=[]]
elif len(l_col_list) == 1:
return sorted([v for v in uniq_vals]) # depends on [control=['if'], data=[]]
elif len(l_col_list) == 2:
res = []
res = [(a, b) for a in uniq_vals[0] for b in uniq_vals[1]]
return res # depends on [control=['if'], data=[]]
else:
print('TODO ')
return -44 |
def delete_company(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes specified company.
Prerequisite: The company has no jobs associated with it.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompanyServiceClient()
>>>
>>> name = client.company_path('[PROJECT]', '[COMPANY]')
>>>
>>> client.delete_company(name)
Args:
name (str): Required.
The resource name of the company to be deleted.
The format is "projects/{project\_id}/companies/{company\_id}", for
example, "projects/api-test-project/companies/foo".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_company" not in self._inner_api_calls:
self._inner_api_calls[
"delete_company"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_company,
default_retry=self._method_configs["DeleteCompany"].retry,
default_timeout=self._method_configs["DeleteCompany"].timeout,
client_info=self._client_info,
)
request = company_service_pb2.DeleteCompanyRequest(name=name)
self._inner_api_calls["delete_company"](
request, retry=retry, timeout=timeout, metadata=metadata
) | def function[delete_company, parameter[self, name, retry, timeout, metadata]]:
constant[
Deletes specified company.
Prerequisite: The company has no jobs associated with it.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompanyServiceClient()
>>>
>>> name = client.company_path('[PROJECT]', '[COMPANY]')
>>>
>>> client.delete_company(name)
Args:
name (str): Required.
The resource name of the company to be deleted.
The format is "projects/{project\_id}/companies/{company\_id}", for
example, "projects/api-test-project/companies/foo".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[delete_company] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[delete_company]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.delete_company]]
variable[request] assign[=] call[name[company_service_pb2].DeleteCompanyRequest, parameter[]]
call[call[name[self]._inner_api_calls][constant[delete_company]], parameter[name[request]]] | keyword[def] identifier[delete_company] (
identifier[self] ,
identifier[name] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ,
):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string]
]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[delete_company] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[company_service_pb2] . identifier[DeleteCompanyRequest] ( identifier[name] = identifier[name] )
identifier[self] . identifier[_inner_api_calls] [ literal[string] ](
identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata]
) | def delete_company(self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Deletes specified company.
Prerequisite: The company has no jobs associated with it.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompanyServiceClient()
>>>
>>> name = client.company_path('[PROJECT]', '[COMPANY]')
>>>
>>> client.delete_company(name)
Args:
name (str): Required.
The resource name of the company to be deleted.
The format is "projects/{project\\_id}/companies/{company\\_id}", for
example, "projects/api-test-project/companies/foo".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_company' not in self._inner_api_calls:
self._inner_api_calls['delete_company'] = google.api_core.gapic_v1.method.wrap_method(self.transport.delete_company, default_retry=self._method_configs['DeleteCompany'].retry, default_timeout=self._method_configs['DeleteCompany'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = company_service_pb2.DeleteCompanyRequest(name=name)
self._inner_api_calls['delete_company'](request, retry=retry, timeout=timeout, metadata=metadata) |
def regions(self):
"""
This method will return all the available regions within the
DigitalOcean cloud.
"""
json = self.request('/regions', method='GET')
status = json.get('status')
if status == 'OK':
regions_json = json.get('regions', [])
regions = [Region.from_json(region) for region in regions_json]
return regions
else:
message = json.get('message')
raise DOPException('[%s]: %s' % (status, message)) | def function[regions, parameter[self]]:
constant[
This method will return all the available regions within the
DigitalOcean cloud.
]
variable[json] assign[=] call[name[self].request, parameter[constant[/regions]]]
variable[status] assign[=] call[name[json].get, parameter[constant[status]]]
if compare[name[status] equal[==] constant[OK]] begin[:]
variable[regions_json] assign[=] call[name[json].get, parameter[constant[regions], list[[]]]]
variable[regions] assign[=] <ast.ListComp object at 0x7da18f00de10>
return[name[regions]] | keyword[def] identifier[regions] ( identifier[self] ):
literal[string]
identifier[json] = identifier[self] . identifier[request] ( literal[string] , identifier[method] = literal[string] )
identifier[status] = identifier[json] . identifier[get] ( literal[string] )
keyword[if] identifier[status] == literal[string] :
identifier[regions_json] = identifier[json] . identifier[get] ( literal[string] ,[])
identifier[regions] =[ identifier[Region] . identifier[from_json] ( identifier[region] ) keyword[for] identifier[region] keyword[in] identifier[regions_json] ]
keyword[return] identifier[regions]
keyword[else] :
identifier[message] = identifier[json] . identifier[get] ( literal[string] )
keyword[raise] identifier[DOPException] ( literal[string] %( identifier[status] , identifier[message] )) | def regions(self):
"""
This method will return all the available regions within the
DigitalOcean cloud.
"""
json = self.request('/regions', method='GET')
status = json.get('status')
if status == 'OK':
regions_json = json.get('regions', [])
regions = [Region.from_json(region) for region in regions_json]
return regions # depends on [control=['if'], data=[]]
else:
message = json.get('message')
raise DOPException('[%s]: %s' % (status, message)) |
def makeblastdb(self):
"""Makes blast database files from targets as necessary"""
while True: # while daemon
fastapath = self.dqueue.get() # grabs fastapath from dqueue
# remove the path and the file extension for easier future globbing
db = os.path.splitext(fastapath)[0]
nhr = '{}.nhr'.format(db) # add nhr for searching
# fnull = open(os.devnull, 'w') # define /dev/null
if not os.path.isfile(str(nhr)): # if check for already existing dbs
# Create the databases
# TODO use MakeBLASTdb class
threadlock = threading.Lock()
command = 'makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'.format(fastapath, db)
# subprocess.call(shlex.split('makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'
# .format(fastapath, db)), stdout=fnull, stderr=fnull)
out, err = run_subprocess(command)
threadlock.acquire()
write_to_logfile(command, command, self.logfile, None, None, None, None)
write_to_logfile(out, err, self.logfile, None, None, None, None)
threadlock.release()
self.dqueue.task_done() | def function[makeblastdb, parameter[self]]:
constant[Makes blast database files from targets as necessary]
while constant[True] begin[:]
variable[fastapath] assign[=] call[name[self].dqueue.get, parameter[]]
variable[db] assign[=] call[call[name[os].path.splitext, parameter[name[fastapath]]]][constant[0]]
variable[nhr] assign[=] call[constant[{}.nhr].format, parameter[name[db]]]
if <ast.UnaryOp object at 0x7da2047ea4d0> begin[:]
variable[threadlock] assign[=] call[name[threading].Lock, parameter[]]
variable[command] assign[=] call[constant[makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}].format, parameter[name[fastapath], name[db]]]
<ast.Tuple object at 0x7da18f09fd90> assign[=] call[name[run_subprocess], parameter[name[command]]]
call[name[threadlock].acquire, parameter[]]
call[name[write_to_logfile], parameter[name[command], name[command], name[self].logfile, constant[None], constant[None], constant[None], constant[None]]]
call[name[write_to_logfile], parameter[name[out], name[err], name[self].logfile, constant[None], constant[None], constant[None], constant[None]]]
call[name[threadlock].release, parameter[]]
call[name[self].dqueue.task_done, parameter[]] | keyword[def] identifier[makeblastdb] ( identifier[self] ):
literal[string]
keyword[while] keyword[True] :
identifier[fastapath] = identifier[self] . identifier[dqueue] . identifier[get] ()
identifier[db] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[fastapath] )[ literal[int] ]
identifier[nhr] = literal[string] . identifier[format] ( identifier[db] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[str] ( identifier[nhr] )):
identifier[threadlock] = identifier[threading] . identifier[Lock] ()
identifier[command] = literal[string] . identifier[format] ( identifier[fastapath] , identifier[db] )
identifier[out] , identifier[err] = identifier[run_subprocess] ( identifier[command] )
identifier[threadlock] . identifier[acquire] ()
identifier[write_to_logfile] ( identifier[command] , identifier[command] , identifier[self] . identifier[logfile] , keyword[None] , keyword[None] , keyword[None] , keyword[None] )
identifier[write_to_logfile] ( identifier[out] , identifier[err] , identifier[self] . identifier[logfile] , keyword[None] , keyword[None] , keyword[None] , keyword[None] )
identifier[threadlock] . identifier[release] ()
identifier[self] . identifier[dqueue] . identifier[task_done] () | def makeblastdb(self):
"""Makes blast database files from targets as necessary"""
while True: # while daemon
fastapath = self.dqueue.get() # grabs fastapath from dqueue
# remove the path and the file extension for easier future globbing
db = os.path.splitext(fastapath)[0]
nhr = '{}.nhr'.format(db) # add nhr for searching
# fnull = open(os.devnull, 'w') # define /dev/null
if not os.path.isfile(str(nhr)): # if check for already existing dbs
# Create the databases
# TODO use MakeBLASTdb class
threadlock = threading.Lock()
command = 'makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'.format(fastapath, db)
# subprocess.call(shlex.split('makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'
# .format(fastapath, db)), stdout=fnull, stderr=fnull)
(out, err) = run_subprocess(command)
threadlock.acquire()
write_to_logfile(command, command, self.logfile, None, None, None, None)
write_to_logfile(out, err, self.logfile, None, None, None, None)
threadlock.release() # depends on [control=['if'], data=[]]
self.dqueue.task_done() # depends on [control=['while'], data=[]] |
def signatures_to_bytes(signatures: List[Tuple[int, int, int]]) -> bytes:
"""
Convert signatures to bytes
:param signatures: list of tuples(v, r, s)
:return: 65 bytes per signature
"""
return b''.join([signature_to_bytes(vrs) for vrs in signatures]) | def function[signatures_to_bytes, parameter[signatures]]:
constant[
Convert signatures to bytes
:param signatures: list of tuples(v, r, s)
:return: 65 bytes per signature
]
return[call[constant[b''].join, parameter[<ast.ListComp object at 0x7da18dc06f80>]]] | keyword[def] identifier[signatures_to_bytes] ( identifier[signatures] : identifier[List] [ identifier[Tuple] [ identifier[int] , identifier[int] , identifier[int] ]])-> identifier[bytes] :
literal[string]
keyword[return] literal[string] . identifier[join] ([ identifier[signature_to_bytes] ( identifier[vrs] ) keyword[for] identifier[vrs] keyword[in] identifier[signatures] ]) | def signatures_to_bytes(signatures: List[Tuple[int, int, int]]) -> bytes:
"""
Convert signatures to bytes
:param signatures: list of tuples(v, r, s)
:return: 65 bytes per signature
"""
return b''.join([signature_to_bytes(vrs) for vrs in signatures]) |
def rating_score(obj, user):
"""
Returns the score a user has given an object
"""
if not user.is_authenticated() or not hasattr(obj, '_ratings_field'):
return False
ratings_descriptor = getattr(obj, obj._ratings_field)
try:
rating = ratings_descriptor.get(user=user).score
except ratings_descriptor.model.DoesNotExist:
rating = None
return rating | def function[rating_score, parameter[obj, user]]:
constant[
Returns the score a user has given an object
]
if <ast.BoolOp object at 0x7da20e9b0070> begin[:]
return[constant[False]]
variable[ratings_descriptor] assign[=] call[name[getattr], parameter[name[obj], name[obj]._ratings_field]]
<ast.Try object at 0x7da20e74a890>
return[name[rating]] | keyword[def] identifier[rating_score] ( identifier[obj] , identifier[user] ):
literal[string]
keyword[if] keyword[not] identifier[user] . identifier[is_authenticated] () keyword[or] keyword[not] identifier[hasattr] ( identifier[obj] , literal[string] ):
keyword[return] keyword[False]
identifier[ratings_descriptor] = identifier[getattr] ( identifier[obj] , identifier[obj] . identifier[_ratings_field] )
keyword[try] :
identifier[rating] = identifier[ratings_descriptor] . identifier[get] ( identifier[user] = identifier[user] ). identifier[score]
keyword[except] identifier[ratings_descriptor] . identifier[model] . identifier[DoesNotExist] :
identifier[rating] = keyword[None]
keyword[return] identifier[rating] | def rating_score(obj, user):
"""
Returns the score a user has given an object
"""
if not user.is_authenticated() or not hasattr(obj, '_ratings_field'):
return False # depends on [control=['if'], data=[]]
ratings_descriptor = getattr(obj, obj._ratings_field)
try:
rating = ratings_descriptor.get(user=user).score # depends on [control=['try'], data=[]]
except ratings_descriptor.model.DoesNotExist:
rating = None # depends on [control=['except'], data=[]]
return rating |
def validate(self, value):
"""Return a boolean if the choice is a number in the enumeration"""
if value in list(self.choices.keys()):
self._choice = value
return True
try:
self._choice = list(self.choices.keys())[int(value)]
return True
except (ValueError, IndexError):
self.error_message = '%s is not a valid choice.' % value
return False | def function[validate, parameter[self, value]]:
constant[Return a boolean if the choice is a number in the enumeration]
if compare[name[value] in call[name[list], parameter[call[name[self].choices.keys, parameter[]]]]] begin[:]
name[self]._choice assign[=] name[value]
return[constant[True]]
<ast.Try object at 0x7da1b1f36500> | keyword[def] identifier[validate] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[in] identifier[list] ( identifier[self] . identifier[choices] . identifier[keys] ()):
identifier[self] . identifier[_choice] = identifier[value]
keyword[return] keyword[True]
keyword[try] :
identifier[self] . identifier[_choice] = identifier[list] ( identifier[self] . identifier[choices] . identifier[keys] ())[ identifier[int] ( identifier[value] )]
keyword[return] keyword[True]
keyword[except] ( identifier[ValueError] , identifier[IndexError] ):
identifier[self] . identifier[error_message] = literal[string] % identifier[value]
keyword[return] keyword[False] | def validate(self, value):
"""Return a boolean if the choice is a number in the enumeration"""
if value in list(self.choices.keys()):
self._choice = value
return True # depends on [control=['if'], data=['value']]
try:
self._choice = list(self.choices.keys())[int(value)]
return True # depends on [control=['try'], data=[]]
except (ValueError, IndexError):
self.error_message = '%s is not a valid choice.' % value
return False # depends on [control=['except'], data=[]] |
def run(self): # No "_" in the name, but nevertheless, running in the backed
"""After the fork. Now the process starts running
"""
self.preRun_()
self.running=True
while(self.running):
self.cycle_()
self.handleSignal_()
self.postRun_() | def function[run, parameter[self]]:
constant[After the fork. Now the process starts running
]
call[name[self].preRun_, parameter[]]
name[self].running assign[=] constant[True]
while name[self].running begin[:]
call[name[self].cycle_, parameter[]]
call[name[self].handleSignal_, parameter[]]
call[name[self].postRun_, parameter[]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[preRun_] ()
identifier[self] . identifier[running] = keyword[True]
keyword[while] ( identifier[self] . identifier[running] ):
identifier[self] . identifier[cycle_] ()
identifier[self] . identifier[handleSignal_] ()
identifier[self] . identifier[postRun_] () | def run(self): # No "_" in the name, but nevertheless, running in the backed
'After the fork. Now the process starts running\n '
self.preRun_()
self.running = True
while self.running:
self.cycle_()
self.handleSignal_() # depends on [control=['while'], data=[]]
self.postRun_() |
def prune_to_subset(self, subset, inplace=False):
""" Prunes the Tree to just the taxon set given in `subset` """
if not subset.issubset(self.labels):
print('"subset" is not a subset')
return
if not inplace:
t = self.copy()
else:
t = self
t._tree.retain_taxa_with_labels(subset)
t._tree.encode_bipartitions()
t._dirty = True
return t | def function[prune_to_subset, parameter[self, subset, inplace]]:
constant[ Prunes the Tree to just the taxon set given in `subset` ]
if <ast.UnaryOp object at 0x7da18bc711e0> begin[:]
call[name[print], parameter[constant["subset" is not a subset]]]
return[None]
if <ast.UnaryOp object at 0x7da18bc719f0> begin[:]
variable[t] assign[=] call[name[self].copy, parameter[]]
call[name[t]._tree.retain_taxa_with_labels, parameter[name[subset]]]
call[name[t]._tree.encode_bipartitions, parameter[]]
name[t]._dirty assign[=] constant[True]
return[name[t]] | keyword[def] identifier[prune_to_subset] ( identifier[self] , identifier[subset] , identifier[inplace] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[subset] . identifier[issubset] ( identifier[self] . identifier[labels] ):
identifier[print] ( literal[string] )
keyword[return]
keyword[if] keyword[not] identifier[inplace] :
identifier[t] = identifier[self] . identifier[copy] ()
keyword[else] :
identifier[t] = identifier[self]
identifier[t] . identifier[_tree] . identifier[retain_taxa_with_labels] ( identifier[subset] )
identifier[t] . identifier[_tree] . identifier[encode_bipartitions] ()
identifier[t] . identifier[_dirty] = keyword[True]
keyword[return] identifier[t] | def prune_to_subset(self, subset, inplace=False):
""" Prunes the Tree to just the taxon set given in `subset` """
if not subset.issubset(self.labels):
print('"subset" is not a subset')
return # depends on [control=['if'], data=[]]
if not inplace:
t = self.copy() # depends on [control=['if'], data=[]]
else:
t = self
t._tree.retain_taxa_with_labels(subset)
t._tree.encode_bipartitions()
t._dirty = True
return t |
def _post_tags(self, fileobj):
"""Raises ogg.error"""
page = OggPage.find_last(fileobj, self.serial, finishing=True)
if page is None:
raise OggVorbisHeaderError
self.length = page.position / float(self.sample_rate) | def function[_post_tags, parameter[self, fileobj]]:
constant[Raises ogg.error]
variable[page] assign[=] call[name[OggPage].find_last, parameter[name[fileobj], name[self].serial]]
if compare[name[page] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b20b7310>
name[self].length assign[=] binary_operation[name[page].position / call[name[float], parameter[name[self].sample_rate]]] | keyword[def] identifier[_post_tags] ( identifier[self] , identifier[fileobj] ):
literal[string]
identifier[page] = identifier[OggPage] . identifier[find_last] ( identifier[fileobj] , identifier[self] . identifier[serial] , identifier[finishing] = keyword[True] )
keyword[if] identifier[page] keyword[is] keyword[None] :
keyword[raise] identifier[OggVorbisHeaderError]
identifier[self] . identifier[length] = identifier[page] . identifier[position] / identifier[float] ( identifier[self] . identifier[sample_rate] ) | def _post_tags(self, fileobj):
"""Raises ogg.error"""
page = OggPage.find_last(fileobj, self.serial, finishing=True)
if page is None:
raise OggVorbisHeaderError # depends on [control=['if'], data=[]]
self.length = page.position / float(self.sample_rate) |
def list_orders(self, params=None):
"""Lists existing order transactions."""
request = self._get('transactions/orders', params)
return self.responder(request) | def function[list_orders, parameter[self, params]]:
constant[Lists existing order transactions.]
variable[request] assign[=] call[name[self]._get, parameter[constant[transactions/orders], name[params]]]
return[call[name[self].responder, parameter[name[request]]]] | keyword[def] identifier[list_orders] ( identifier[self] , identifier[params] = keyword[None] ):
literal[string]
identifier[request] = identifier[self] . identifier[_get] ( literal[string] , identifier[params] )
keyword[return] identifier[self] . identifier[responder] ( identifier[request] ) | def list_orders(self, params=None):
"""Lists existing order transactions."""
request = self._get('transactions/orders', params)
return self.responder(request) |
def _get_indicators_for_report_page_generator(self, report_id, start_page=0, page_size=None):
"""
Creates a generator from the |get_indicators_for_report_page| method that returns each successive page.
:param str report_id: The ID of the report to get indicators for.
:param int start_page: The page to start on.
:param int page_size: The size of each page.
:return: The generator.
"""
get_page = functools.partial(self.get_indicators_for_report_page, report_id=report_id)
return Page.get_page_generator(get_page, start_page, page_size) | def function[_get_indicators_for_report_page_generator, parameter[self, report_id, start_page, page_size]]:
constant[
Creates a generator from the |get_indicators_for_report_page| method that returns each successive page.
:param str report_id: The ID of the report to get indicators for.
:param int start_page: The page to start on.
:param int page_size: The size of each page.
:return: The generator.
]
variable[get_page] assign[=] call[name[functools].partial, parameter[name[self].get_indicators_for_report_page]]
return[call[name[Page].get_page_generator, parameter[name[get_page], name[start_page], name[page_size]]]] | keyword[def] identifier[_get_indicators_for_report_page_generator] ( identifier[self] , identifier[report_id] , identifier[start_page] = literal[int] , identifier[page_size] = keyword[None] ):
literal[string]
identifier[get_page] = identifier[functools] . identifier[partial] ( identifier[self] . identifier[get_indicators_for_report_page] , identifier[report_id] = identifier[report_id] )
keyword[return] identifier[Page] . identifier[get_page_generator] ( identifier[get_page] , identifier[start_page] , identifier[page_size] ) | def _get_indicators_for_report_page_generator(self, report_id, start_page=0, page_size=None):
"""
Creates a generator from the |get_indicators_for_report_page| method that returns each successive page.
:param str report_id: The ID of the report to get indicators for.
:param int start_page: The page to start on.
:param int page_size: The size of each page.
:return: The generator.
"""
get_page = functools.partial(self.get_indicators_for_report_page, report_id=report_id)
return Page.get_page_generator(get_page, start_page, page_size) |
def check_url_accessibility(url, timeout=10):
'''
Check whether the URL accessible and returns HTTP 200 OK or not
if not raises ValidationError
'''
if(url=='localhost'):
url = 'http://127.0.0.1'
try:
req = urllib2.urlopen(url, timeout=timeout)
if (req.getcode()==200):
return True
except Exception:
pass
fail("URL '%s' is not accessible from this machine" % url) | def function[check_url_accessibility, parameter[url, timeout]]:
constant[
Check whether the URL accessible and returns HTTP 200 OK or not
if not raises ValidationError
]
if compare[name[url] equal[==] constant[localhost]] begin[:]
variable[url] assign[=] constant[http://127.0.0.1]
<ast.Try object at 0x7da204345780>
call[name[fail], parameter[binary_operation[constant[URL '%s' is not accessible from this machine] <ast.Mod object at 0x7da2590d6920> name[url]]]] | keyword[def] identifier[check_url_accessibility] ( identifier[url] , identifier[timeout] = literal[int] ):
literal[string]
keyword[if] ( identifier[url] == literal[string] ):
identifier[url] = literal[string]
keyword[try] :
identifier[req] = identifier[urllib2] . identifier[urlopen] ( identifier[url] , identifier[timeout] = identifier[timeout] )
keyword[if] ( identifier[req] . identifier[getcode] ()== literal[int] ):
keyword[return] keyword[True]
keyword[except] identifier[Exception] :
keyword[pass]
identifier[fail] ( literal[string] % identifier[url] ) | def check_url_accessibility(url, timeout=10):
"""
Check whether the URL accessible and returns HTTP 200 OK or not
if not raises ValidationError
"""
if url == 'localhost':
url = 'http://127.0.0.1' # depends on [control=['if'], data=['url']]
try:
req = urllib2.urlopen(url, timeout=timeout)
if req.getcode() == 200:
return True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
fail("URL '%s' is not accessible from this machine" % url) |
def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly) | def function[manage_git_folder, parameter[gh_token, temp_dir, git_id]]:
constant[Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
]
call[name[_LOGGER].debug, parameter[constant[Git ID %s], name[git_id]]]
if call[call[name[Path], parameter[name[git_id]]].exists, parameter[]] begin[:]
<ast.Yield object at 0x7da1b2436b90>
return[None]
variable[split_git_id] assign[=] call[name[git_id].split, parameter[constant[@]]]
variable[branch] assign[=] <ast.IfExp object at 0x7da1b2435f00>
call[name[clone_to_path], parameter[name[gh_token], name[temp_dir], call[name[split_git_id]][constant[0]]]]
<ast.Try object at 0x7da1b25d8190> | keyword[def] identifier[manage_git_folder] ( identifier[gh_token] , identifier[temp_dir] , identifier[git_id] ,*, identifier[pr_number] = keyword[None] ):
literal[string]
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[git_id] )
keyword[if] identifier[Path] ( identifier[git_id] ). identifier[exists] ():
keyword[yield] identifier[git_id]
keyword[return]
identifier[split_git_id] = identifier[git_id] . identifier[split] ( literal[string] )
identifier[branch] = identifier[split_git_id] [ literal[int] ] keyword[if] identifier[len] ( identifier[split_git_id] )> literal[int] keyword[else] keyword[None]
identifier[clone_to_path] ( identifier[gh_token] , identifier[temp_dir] , identifier[split_git_id] [ literal[int] ], identifier[branch_or_commit] = identifier[branch] , identifier[pr_number] = identifier[pr_number] )
keyword[try] :
keyword[yield] identifier[temp_dir]
keyword[finally] :
identifier[_LOGGER] . identifier[debug] ( literal[string] )
identifier[shutil] . identifier[rmtree] ( identifier[temp_dir] , identifier[onerror] = identifier[remove_readonly] ) | def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug('Git ID %s', git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here # depends on [control=['if'], data=[]]
# Clone the specific branch
split_git_id = git_id.split('@')
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir # depends on [control=['try'], data=[]]
finally:
# Pre-cleanup for Windows http://bugs.python.org/issue26660
_LOGGER.debug('Preclean Rest folder')
shutil.rmtree(temp_dir, onerror=remove_readonly) |
def _fit_orbit_mlogl(new_vxvv,vxvv,vxvv_err,pot,radec,lb,
customsky,lb_to_customsky,pmllpmbb_to_customsky,
tmockAA,
ro,vo,obs):
"""The log likelihood for fitting an orbit"""
#Use this _parse_args routine, which does forward and backward integration
iR,ivR,ivT,iz,ivz,iphi= tmockAA._parse_args(True,False,
new_vxvv[0],
new_vxvv[1],
new_vxvv[2],
new_vxvv[3],
new_vxvv[4],
new_vxvv[5])
if radec or lb or customsky:
#Need to transform to (l,b), (ra,dec), or a custom set
#First transform to X,Y,Z,vX,vY,vZ (Galactic)
X,Y,Z = coords.galcencyl_to_XYZ(iR.flatten(),iphi.flatten(),
iz.flatten(),
Xsun=obs[0]/ro,
Zsun=obs[2]/ro).T
vX,vY,vZ = coords.galcencyl_to_vxvyvz(ivR.flatten(),ivT.flatten(),
ivz.flatten(),iphi.flatten(),
vsun=nu.array(\
obs[3:6])/vo,Xsun=obs[0]/ro,Zsun=obs[2]/ro).T
bad_indx= (X == 0.)*(Y == 0.)*(Z == 0.)
if True in bad_indx: X[bad_indx]+= ro/10000.
lbdvrpmllpmbb= coords.rectgal_to_sphergal(X*ro,Y*ro,Z*ro,
vX*vo,vY*vo,vZ*vo,
degree=True)
if lb:
orb_vxvv= nu.array([lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
lbdvrpmllpmbb[:,2],
lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,3]]).T
elif radec:
#Further transform to ra,dec,pmra,pmdec
radec= coords.lb_to_radec(lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],degree=True,
epoch=None)
pmrapmdec= coords.pmllpmbb_to_pmrapmdec(lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
degree=True,
epoch=None)
orb_vxvv= nu.array([radec[:,0],radec[:,1],
lbdvrpmllpmbb[:,2],
pmrapmdec[:,0],pmrapmdec[:,1],
lbdvrpmllpmbb[:,3]]).T
elif customsky:
#Further transform to ra,dec,pmra,pmdec
customradec= lb_to_customsky(lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],degree=True)
custompmrapmdec= pmllpmbb_to_customsky(lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
degree=True)
orb_vxvv= nu.array([customradec[:,0],customradec[:,1],
lbdvrpmllpmbb[:,2],
custompmrapmdec[:,0],custompmrapmdec[:,1],
lbdvrpmllpmbb[:,3]]).T
else:
#shape=(2tintJ-1,6)
orb_vxvv= nu.array([iR.flatten(),ivR.flatten(),ivT.flatten(),
iz.flatten(),ivz.flatten(),iphi.flatten()]).T
out= 0.
for ii in range(vxvv.shape[0]):
sub_vxvv= (orb_vxvv-vxvv[ii,:].flatten())**2.
#print(sub_vxvv[nu.argmin(nu.sum(sub_vxvv,axis=1))])
if not vxvv_err is None:
sub_vxvv/= vxvv_err[ii,:]**2.
else:
sub_vxvv/= 0.01**2.
out+= logsumexp(-0.5*nu.sum(sub_vxvv,axis=1))
return -out | def function[_fit_orbit_mlogl, parameter[new_vxvv, vxvv, vxvv_err, pot, radec, lb, customsky, lb_to_customsky, pmllpmbb_to_customsky, tmockAA, ro, vo, obs]]:
constant[The log likelihood for fitting an orbit]
<ast.Tuple object at 0x7da1b0ec1240> assign[=] call[name[tmockAA]._parse_args, parameter[constant[True], constant[False], call[name[new_vxvv]][constant[0]], call[name[new_vxvv]][constant[1]], call[name[new_vxvv]][constant[2]], call[name[new_vxvv]][constant[3]], call[name[new_vxvv]][constant[4]], call[name[new_vxvv]][constant[5]]]]
if <ast.BoolOp object at 0x7da1b0ec2680> begin[:]
<ast.Tuple object at 0x7da1b0ec1ba0> assign[=] call[name[coords].galcencyl_to_XYZ, parameter[call[name[iR].flatten, parameter[]], call[name[iphi].flatten, parameter[]], call[name[iz].flatten, parameter[]]]].T
<ast.Tuple object at 0x7da1b0ec2440> assign[=] call[name[coords].galcencyl_to_vxvyvz, parameter[call[name[ivR].flatten, parameter[]], call[name[ivT].flatten, parameter[]], call[name[ivz].flatten, parameter[]], call[name[iphi].flatten, parameter[]]]].T
variable[bad_indx] assign[=] binary_operation[binary_operation[compare[name[X] equal[==] constant[0.0]] * compare[name[Y] equal[==] constant[0.0]]] * compare[name[Z] equal[==] constant[0.0]]]
if compare[constant[True] in name[bad_indx]] begin[:]
<ast.AugAssign object at 0x7da18bc73c40>
variable[lbdvrpmllpmbb] assign[=] call[name[coords].rectgal_to_sphergal, parameter[binary_operation[name[X] * name[ro]], binary_operation[name[Y] * name[ro]], binary_operation[name[Z] * name[ro]], binary_operation[name[vX] * name[vo]], binary_operation[name[vY] * name[vo]], binary_operation[name[vZ] * name[vo]]]]
if name[lb] begin[:]
variable[orb_vxvv] assign[=] call[name[nu].array, parameter[list[[<ast.Subscript object at 0x7da18bc71690>, <ast.Subscript object at 0x7da18bc712d0>, <ast.Subscript object at 0x7da18bc70c10>, <ast.Subscript object at 0x7da18bc70e20>, <ast.Subscript object at 0x7da18bc72f50>, <ast.Subscript object at 0x7da18bc70400>]]]].T
variable[out] assign[=] constant[0.0]
for taget[name[ii]] in starred[call[name[range], parameter[call[name[vxvv].shape][constant[0]]]]] begin[:]
variable[sub_vxvv] assign[=] binary_operation[binary_operation[name[orb_vxvv] - call[call[name[vxvv]][tuple[[<ast.Name object at 0x7da1b0e8f190>, <ast.Slice object at 0x7da1b0e8d3f0>]]].flatten, parameter[]]] ** constant[2.0]]
if <ast.UnaryOp object at 0x7da1b0e8eec0> begin[:]
<ast.AugAssign object at 0x7da1b0e8cbe0>
<ast.AugAssign object at 0x7da1b0e8c100>
return[<ast.UnaryOp object at 0x7da1b0e8de40>] | keyword[def] identifier[_fit_orbit_mlogl] ( identifier[new_vxvv] , identifier[vxvv] , identifier[vxvv_err] , identifier[pot] , identifier[radec] , identifier[lb] ,
identifier[customsky] , identifier[lb_to_customsky] , identifier[pmllpmbb_to_customsky] ,
identifier[tmockAA] ,
identifier[ro] , identifier[vo] , identifier[obs] ):
literal[string]
identifier[iR] , identifier[ivR] , identifier[ivT] , identifier[iz] , identifier[ivz] , identifier[iphi] = identifier[tmockAA] . identifier[_parse_args] ( keyword[True] , keyword[False] ,
identifier[new_vxvv] [ literal[int] ],
identifier[new_vxvv] [ literal[int] ],
identifier[new_vxvv] [ literal[int] ],
identifier[new_vxvv] [ literal[int] ],
identifier[new_vxvv] [ literal[int] ],
identifier[new_vxvv] [ literal[int] ])
keyword[if] identifier[radec] keyword[or] identifier[lb] keyword[or] identifier[customsky] :
identifier[X] , identifier[Y] , identifier[Z] = identifier[coords] . identifier[galcencyl_to_XYZ] ( identifier[iR] . identifier[flatten] (), identifier[iphi] . identifier[flatten] (),
identifier[iz] . identifier[flatten] (),
identifier[Xsun] = identifier[obs] [ literal[int] ]/ identifier[ro] ,
identifier[Zsun] = identifier[obs] [ literal[int] ]/ identifier[ro] ). identifier[T]
identifier[vX] , identifier[vY] , identifier[vZ] = identifier[coords] . identifier[galcencyl_to_vxvyvz] ( identifier[ivR] . identifier[flatten] (), identifier[ivT] . identifier[flatten] (),
identifier[ivz] . identifier[flatten] (), identifier[iphi] . identifier[flatten] (),
identifier[vsun] = identifier[nu] . identifier[array] ( identifier[obs] [ literal[int] : literal[int] ])/ identifier[vo] , identifier[Xsun] = identifier[obs] [ literal[int] ]/ identifier[ro] , identifier[Zsun] = identifier[obs] [ literal[int] ]/ identifier[ro] ). identifier[T]
identifier[bad_indx] =( identifier[X] == literal[int] )*( identifier[Y] == literal[int] )*( identifier[Z] == literal[int] )
keyword[if] keyword[True] keyword[in] identifier[bad_indx] : identifier[X] [ identifier[bad_indx] ]+= identifier[ro] / literal[int]
identifier[lbdvrpmllpmbb] = identifier[coords] . identifier[rectgal_to_sphergal] ( identifier[X] * identifier[ro] , identifier[Y] * identifier[ro] , identifier[Z] * identifier[ro] ,
identifier[vX] * identifier[vo] , identifier[vY] * identifier[vo] , identifier[vZ] * identifier[vo] ,
identifier[degree] = keyword[True] )
keyword[if] identifier[lb] :
identifier[orb_vxvv] = identifier[nu] . identifier[array] ([ identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ]]). identifier[T]
keyword[elif] identifier[radec] :
identifier[radec] = identifier[coords] . identifier[lb_to_radec] ( identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ], identifier[degree] = keyword[True] ,
identifier[epoch] = keyword[None] )
identifier[pmrapmdec] = identifier[coords] . identifier[pmllpmbb_to_pmrapmdec] ( identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[degree] = keyword[True] ,
identifier[epoch] = keyword[None] )
identifier[orb_vxvv] = identifier[nu] . identifier[array] ([ identifier[radec] [:, literal[int] ], identifier[radec] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[pmrapmdec] [:, literal[int] ], identifier[pmrapmdec] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ]]). identifier[T]
keyword[elif] identifier[customsky] :
identifier[customradec] = identifier[lb_to_customsky] ( identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ], identifier[degree] = keyword[True] )
identifier[custompmrapmdec] = identifier[pmllpmbb_to_customsky] ( identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[degree] = keyword[True] )
identifier[orb_vxvv] = identifier[nu] . identifier[array] ([ identifier[customradec] [:, literal[int] ], identifier[customradec] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ],
identifier[custompmrapmdec] [:, literal[int] ], identifier[custompmrapmdec] [:, literal[int] ],
identifier[lbdvrpmllpmbb] [:, literal[int] ]]). identifier[T]
keyword[else] :
identifier[orb_vxvv] = identifier[nu] . identifier[array] ([ identifier[iR] . identifier[flatten] (), identifier[ivR] . identifier[flatten] (), identifier[ivT] . identifier[flatten] (),
identifier[iz] . identifier[flatten] (), identifier[ivz] . identifier[flatten] (), identifier[iphi] . identifier[flatten] ()]). identifier[T]
identifier[out] = literal[int]
keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[vxvv] . identifier[shape] [ literal[int] ]):
identifier[sub_vxvv] =( identifier[orb_vxvv] - identifier[vxvv] [ identifier[ii] ,:]. identifier[flatten] ())** literal[int]
keyword[if] keyword[not] identifier[vxvv_err] keyword[is] keyword[None] :
identifier[sub_vxvv] /= identifier[vxvv_err] [ identifier[ii] ,:]** literal[int]
keyword[else] :
identifier[sub_vxvv] /= literal[int] ** literal[int]
identifier[out] += identifier[logsumexp] (- literal[int] * identifier[nu] . identifier[sum] ( identifier[sub_vxvv] , identifier[axis] = literal[int] ))
keyword[return] - identifier[out] | def _fit_orbit_mlogl(new_vxvv, vxvv, vxvv_err, pot, radec, lb, customsky, lb_to_customsky, pmllpmbb_to_customsky, tmockAA, ro, vo, obs):
"""The log likelihood for fitting an orbit"""
#Use this _parse_args routine, which does forward and backward integration
(iR, ivR, ivT, iz, ivz, iphi) = tmockAA._parse_args(True, False, new_vxvv[0], new_vxvv[1], new_vxvv[2], new_vxvv[3], new_vxvv[4], new_vxvv[5])
if radec or lb or customsky:
#Need to transform to (l,b), (ra,dec), or a custom set
#First transform to X,Y,Z,vX,vY,vZ (Galactic)
(X, Y, Z) = coords.galcencyl_to_XYZ(iR.flatten(), iphi.flatten(), iz.flatten(), Xsun=obs[0] / ro, Zsun=obs[2] / ro).T
(vX, vY, vZ) = coords.galcencyl_to_vxvyvz(ivR.flatten(), ivT.flatten(), ivz.flatten(), iphi.flatten(), vsun=nu.array(obs[3:6]) / vo, Xsun=obs[0] / ro, Zsun=obs[2] / ro).T
bad_indx = (X == 0.0) * (Y == 0.0) * (Z == 0.0)
if True in bad_indx:
X[bad_indx] += ro / 10000.0 # depends on [control=['if'], data=['bad_indx']]
lbdvrpmllpmbb = coords.rectgal_to_sphergal(X * ro, Y * ro, Z * ro, vX * vo, vY * vo, vZ * vo, degree=True)
if lb:
orb_vxvv = nu.array([lbdvrpmllpmbb[:, 0], lbdvrpmllpmbb[:, 1], lbdvrpmllpmbb[:, 2], lbdvrpmllpmbb[:, 4], lbdvrpmllpmbb[:, 5], lbdvrpmllpmbb[:, 3]]).T # depends on [control=['if'], data=[]]
elif radec:
#Further transform to ra,dec,pmra,pmdec
radec = coords.lb_to_radec(lbdvrpmllpmbb[:, 0], lbdvrpmllpmbb[:, 1], degree=True, epoch=None)
pmrapmdec = coords.pmllpmbb_to_pmrapmdec(lbdvrpmllpmbb[:, 4], lbdvrpmllpmbb[:, 5], lbdvrpmllpmbb[:, 0], lbdvrpmllpmbb[:, 1], degree=True, epoch=None)
orb_vxvv = nu.array([radec[:, 0], radec[:, 1], lbdvrpmllpmbb[:, 2], pmrapmdec[:, 0], pmrapmdec[:, 1], lbdvrpmllpmbb[:, 3]]).T # depends on [control=['if'], data=[]]
elif customsky:
#Further transform to ra,dec,pmra,pmdec
customradec = lb_to_customsky(lbdvrpmllpmbb[:, 0], lbdvrpmllpmbb[:, 1], degree=True)
custompmrapmdec = pmllpmbb_to_customsky(lbdvrpmllpmbb[:, 4], lbdvrpmllpmbb[:, 5], lbdvrpmllpmbb[:, 0], lbdvrpmllpmbb[:, 1], degree=True)
orb_vxvv = nu.array([customradec[:, 0], customradec[:, 1], lbdvrpmllpmbb[:, 2], custompmrapmdec[:, 0], custompmrapmdec[:, 1], lbdvrpmllpmbb[:, 3]]).T # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
#shape=(2tintJ-1,6)
orb_vxvv = nu.array([iR.flatten(), ivR.flatten(), ivT.flatten(), iz.flatten(), ivz.flatten(), iphi.flatten()]).T
out = 0.0
for ii in range(vxvv.shape[0]):
sub_vxvv = (orb_vxvv - vxvv[ii, :].flatten()) ** 2.0
#print(sub_vxvv[nu.argmin(nu.sum(sub_vxvv,axis=1))])
if not vxvv_err is None:
sub_vxvv /= vxvv_err[ii, :] ** 2.0 # depends on [control=['if'], data=[]]
else:
sub_vxvv /= 0.01 ** 2.0
out += logsumexp(-0.5 * nu.sum(sub_vxvv, axis=1)) # depends on [control=['for'], data=['ii']]
return -out |
def key_hash_algo(self, value):
"""
A unicode string of the hash algorithm to use when creating the
certificate identifier - "sha1" (default), or "sha256".
"""
if value not in set(['sha1', 'sha256']):
raise ValueError(_pretty_message(
'''
hash_algo must be one of "sha1", "sha256", not %s
''',
repr(value)
))
self._key_hash_algo = value | def function[key_hash_algo, parameter[self, value]]:
constant[
A unicode string of the hash algorithm to use when creating the
certificate identifier - "sha1" (default), or "sha256".
]
if compare[name[value] <ast.NotIn object at 0x7da2590d7190> call[name[set], parameter[list[[<ast.Constant object at 0x7da20c7c8250>, <ast.Constant object at 0x7da20c7c9990>]]]]] begin[:]
<ast.Raise object at 0x7da20c7c8bb0>
name[self]._key_hash_algo assign[=] name[value] | keyword[def] identifier[key_hash_algo] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[not] keyword[in] identifier[set] ([ literal[string] , literal[string] ]):
keyword[raise] identifier[ValueError] ( identifier[_pretty_message] (
literal[string] ,
identifier[repr] ( identifier[value] )
))
identifier[self] . identifier[_key_hash_algo] = identifier[value] | def key_hash_algo(self, value):
"""
A unicode string of the hash algorithm to use when creating the
certificate identifier - "sha1" (default), or "sha256".
"""
if value not in set(['sha1', 'sha256']):
raise ValueError(_pretty_message('\n hash_algo must be one of "sha1", "sha256", not %s\n ', repr(value))) # depends on [control=['if'], data=['value']]
self._key_hash_algo = value |
def connect_paragraph(self, paragraph, paragraphs):
""" Create parent/child links to other paragraphs.
The paragraphs parameters is a list of all the paragraphs
parsed up till now.
The parent is the previous paragraph whose depth is less.
The parent's children include this paragraph.
Called from parse_paragraphs() method.
"""
if paragraph.depth > 0:
n = range(len(paragraphs))
n.reverse()
for i in n:
if paragraphs[i].depth == paragraph.depth-1:
paragraph.parent = paragraphs[i]
paragraphs[i].children.append(paragraph)
break
return paragraph | def function[connect_paragraph, parameter[self, paragraph, paragraphs]]:
constant[ Create parent/child links to other paragraphs.
The paragraphs parameters is a list of all the paragraphs
parsed up till now.
The parent is the previous paragraph whose depth is less.
The parent's children include this paragraph.
Called from parse_paragraphs() method.
]
if compare[name[paragraph].depth greater[>] constant[0]] begin[:]
variable[n] assign[=] call[name[range], parameter[call[name[len], parameter[name[paragraphs]]]]]
call[name[n].reverse, parameter[]]
for taget[name[i]] in starred[name[n]] begin[:]
if compare[call[name[paragraphs]][name[i]].depth equal[==] binary_operation[name[paragraph].depth - constant[1]]] begin[:]
name[paragraph].parent assign[=] call[name[paragraphs]][name[i]]
call[call[name[paragraphs]][name[i]].children.append, parameter[name[paragraph]]]
break
return[name[paragraph]] | keyword[def] identifier[connect_paragraph] ( identifier[self] , identifier[paragraph] , identifier[paragraphs] ):
literal[string]
keyword[if] identifier[paragraph] . identifier[depth] > literal[int] :
identifier[n] = identifier[range] ( identifier[len] ( identifier[paragraphs] ))
identifier[n] . identifier[reverse] ()
keyword[for] identifier[i] keyword[in] identifier[n] :
keyword[if] identifier[paragraphs] [ identifier[i] ]. identifier[depth] == identifier[paragraph] . identifier[depth] - literal[int] :
identifier[paragraph] . identifier[parent] = identifier[paragraphs] [ identifier[i] ]
identifier[paragraphs] [ identifier[i] ]. identifier[children] . identifier[append] ( identifier[paragraph] )
keyword[break]
keyword[return] identifier[paragraph] | def connect_paragraph(self, paragraph, paragraphs):
""" Create parent/child links to other paragraphs.
The paragraphs parameters is a list of all the paragraphs
parsed up till now.
The parent is the previous paragraph whose depth is less.
The parent's children include this paragraph.
Called from parse_paragraphs() method.
"""
if paragraph.depth > 0:
n = range(len(paragraphs))
n.reverse()
for i in n:
if paragraphs[i].depth == paragraph.depth - 1:
paragraph.parent = paragraphs[i]
paragraphs[i].children.append(paragraph)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
return paragraph |
def bet_place(
self,
betting_market_id,
amount_to_bet,
backer_multiplier,
back_or_lay,
account=None,
**kwargs
):
""" Place a bet
:param str betting_market_id: The identifier for the market to bet
in
:param peerplays.amount.Amount amount_to_bet: Amount to bet with
:param int backer_multiplier: Multipler for backer
:param str back_or_lay: "back" or "lay" the bet
:param str account: (optional) the account to bet (defaults
to ``default_account``)
"""
from . import GRAPHENE_BETTING_ODDS_PRECISION
assert isinstance(amount_to_bet, Amount)
assert back_or_lay in ["back", "lay"]
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account)
bm = BettingMarket(betting_market_id)
op = operations.Bet_place(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"bettor_id": account["id"],
"betting_market_id": bm["id"],
"amount_to_bet": amount_to_bet.json(),
"backer_multiplier": (
int(backer_multiplier * GRAPHENE_BETTING_ODDS_PRECISION)
),
"back_or_lay": back_or_lay,
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active", **kwargs) | def function[bet_place, parameter[self, betting_market_id, amount_to_bet, backer_multiplier, back_or_lay, account]]:
constant[ Place a bet
:param str betting_market_id: The identifier for the market to bet
in
:param peerplays.amount.Amount amount_to_bet: Amount to bet with
:param int backer_multiplier: Multipler for backer
:param str back_or_lay: "back" or "lay" the bet
:param str account: (optional) the account to bet (defaults
to ``default_account``)
]
from relative_module[None] import module[GRAPHENE_BETTING_ODDS_PRECISION]
assert[call[name[isinstance], parameter[name[amount_to_bet], name[Amount]]]]
assert[compare[name[back_or_lay] in list[[<ast.Constant object at 0x7da1b1038ca0>, <ast.Constant object at 0x7da1b1038880>]]]]
if <ast.UnaryOp object at 0x7da1b1039ae0> begin[:]
if compare[constant[default_account] in name[self].config] begin[:]
variable[account] assign[=] call[name[self].config][constant[default_account]]
if <ast.UnaryOp object at 0x7da1b1039240> begin[:]
<ast.Raise object at 0x7da1b10383d0>
variable[account] assign[=] call[name[Account], parameter[name[account]]]
variable[bm] assign[=] call[name[BettingMarket], parameter[name[betting_market_id]]]
variable[op] assign[=] call[name[operations].Bet_place, parameter[]]
return[call[name[self].finalizeOp, parameter[name[op], call[name[account]][constant[name]], constant[active]]]] | keyword[def] identifier[bet_place] (
identifier[self] ,
identifier[betting_market_id] ,
identifier[amount_to_bet] ,
identifier[backer_multiplier] ,
identifier[back_or_lay] ,
identifier[account] = keyword[None] ,
** identifier[kwargs]
):
literal[string]
keyword[from] . keyword[import] identifier[GRAPHENE_BETTING_ODDS_PRECISION]
keyword[assert] identifier[isinstance] ( identifier[amount_to_bet] , identifier[Amount] )
keyword[assert] identifier[back_or_lay] keyword[in] [ literal[string] , literal[string] ]
keyword[if] keyword[not] identifier[account] :
keyword[if] literal[string] keyword[in] identifier[self] . identifier[config] :
identifier[account] = identifier[self] . identifier[config] [ literal[string] ]
keyword[if] keyword[not] identifier[account] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[account] = identifier[Account] ( identifier[account] )
identifier[bm] = identifier[BettingMarket] ( identifier[betting_market_id] )
identifier[op] = identifier[operations] . identifier[Bet_place] (
**{
literal[string] :{ literal[string] : literal[int] , literal[string] : literal[string] },
literal[string] : identifier[account] [ literal[string] ],
literal[string] : identifier[bm] [ literal[string] ],
literal[string] : identifier[amount_to_bet] . identifier[json] (),
literal[string] :(
identifier[int] ( identifier[backer_multiplier] * identifier[GRAPHENE_BETTING_ODDS_PRECISION] )
),
literal[string] : identifier[back_or_lay] ,
literal[string] : identifier[self] . identifier[prefix] ,
}
)
keyword[return] identifier[self] . identifier[finalizeOp] ( identifier[op] , identifier[account] [ literal[string] ], literal[string] ,** identifier[kwargs] ) | def bet_place(self, betting_market_id, amount_to_bet, backer_multiplier, back_or_lay, account=None, **kwargs):
""" Place a bet
:param str betting_market_id: The identifier for the market to bet
in
:param peerplays.amount.Amount amount_to_bet: Amount to bet with
:param int backer_multiplier: Multipler for backer
:param str back_or_lay: "back" or "lay" the bet
:param str account: (optional) the account to bet (defaults
to ``default_account``)
"""
from . import GRAPHENE_BETTING_ODDS_PRECISION
assert isinstance(amount_to_bet, Amount)
assert back_or_lay in ['back', 'lay']
if not account:
if 'default_account' in self.config:
account = self.config['default_account'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not account:
raise ValueError('You need to provide an account') # depends on [control=['if'], data=[]]
account = Account(account)
bm = BettingMarket(betting_market_id)
op = operations.Bet_place(**{'fee': {'amount': 0, 'asset_id': '1.3.0'}, 'bettor_id': account['id'], 'betting_market_id': bm['id'], 'amount_to_bet': amount_to_bet.json(), 'backer_multiplier': int(backer_multiplier * GRAPHENE_BETTING_ODDS_PRECISION), 'back_or_lay': back_or_lay, 'prefix': self.prefix})
return self.finalizeOp(op, account['name'], 'active', **kwargs) |
def batch_accumulate(max_batch_size, a_generator, cooperator=None):
"""
Start a Deferred whose callBack arg is a deque of the accumulation
of the values yielded from a_generator which is iterated over
in batches the size of max_batch_size.
It should be more efficient to iterate over the generator in
batches and still provide enough speed for non-blocking execution.
:param max_batch_size: The number of iterations of the generator
to consume at a time.
:param a_generator: An iterator which yields some not None values.
:return: A Deferred to which the next callback will be called with
the yielded contents of the generator function.
"""
if cooperator:
own_cooperate = cooperator.cooperate
else:
own_cooperate = cooperate
spigot = ValueBucket()
items = stream_tap((spigot,), a_generator)
d = own_cooperate(i_batch(max_batch_size, items)).whenDone()
d.addCallback(accumulation_handler, spigot)
return d | def function[batch_accumulate, parameter[max_batch_size, a_generator, cooperator]]:
constant[
Start a Deferred whose callBack arg is a deque of the accumulation
of the values yielded from a_generator which is iterated over
in batches the size of max_batch_size.
It should be more efficient to iterate over the generator in
batches and still provide enough speed for non-blocking execution.
:param max_batch_size: The number of iterations of the generator
to consume at a time.
:param a_generator: An iterator which yields some not None values.
:return: A Deferred to which the next callback will be called with
the yielded contents of the generator function.
]
if name[cooperator] begin[:]
variable[own_cooperate] assign[=] name[cooperator].cooperate
variable[spigot] assign[=] call[name[ValueBucket], parameter[]]
variable[items] assign[=] call[name[stream_tap], parameter[tuple[[<ast.Name object at 0x7da18bccbca0>]], name[a_generator]]]
variable[d] assign[=] call[call[name[own_cooperate], parameter[call[name[i_batch], parameter[name[max_batch_size], name[items]]]]].whenDone, parameter[]]
call[name[d].addCallback, parameter[name[accumulation_handler], name[spigot]]]
return[name[d]] | keyword[def] identifier[batch_accumulate] ( identifier[max_batch_size] , identifier[a_generator] , identifier[cooperator] = keyword[None] ):
literal[string]
keyword[if] identifier[cooperator] :
identifier[own_cooperate] = identifier[cooperator] . identifier[cooperate]
keyword[else] :
identifier[own_cooperate] = identifier[cooperate]
identifier[spigot] = identifier[ValueBucket] ()
identifier[items] = identifier[stream_tap] (( identifier[spigot] ,), identifier[a_generator] )
identifier[d] = identifier[own_cooperate] ( identifier[i_batch] ( identifier[max_batch_size] , identifier[items] )). identifier[whenDone] ()
identifier[d] . identifier[addCallback] ( identifier[accumulation_handler] , identifier[spigot] )
keyword[return] identifier[d] | def batch_accumulate(max_batch_size, a_generator, cooperator=None):
"""
Start a Deferred whose callBack arg is a deque of the accumulation
of the values yielded from a_generator which is iterated over
in batches the size of max_batch_size.
It should be more efficient to iterate over the generator in
batches and still provide enough speed for non-blocking execution.
:param max_batch_size: The number of iterations of the generator
to consume at a time.
:param a_generator: An iterator which yields some not None values.
:return: A Deferred to which the next callback will be called with
the yielded contents of the generator function.
"""
if cooperator:
own_cooperate = cooperator.cooperate # depends on [control=['if'], data=[]]
else:
own_cooperate = cooperate
spigot = ValueBucket()
items = stream_tap((spigot,), a_generator)
d = own_cooperate(i_batch(max_batch_size, items)).whenDone()
d.addCallback(accumulation_handler, spigot)
return d |
def _set_interface_dynamic_bypass(self, v, load=False):
"""
Setter method for interface_dynamic_bypass, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_dynamic_bypass is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_dynamic_bypass() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_dynamic_bypass.interface_dynamic_bypass, is_container='container', presence=True, yang_name="interface-dynamic-bypass", rest_name="dynamic-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'MplsInterfaceDynamicBypass', u'info': u'Configure Dynamic bypass interface level parameters', u'cli-full-no': None, u'cli-add-mode': None, u'cli-full-command': None, u'hidden': u'full', u'alt-name': u'dynamic-bypass', u'cli-mode-name': u'config-router-mpls-interface-$(interface-name)-dynamic-bypass'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_dynamic_bypass must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_dynamic_bypass.interface_dynamic_bypass, is_container='container', presence=True, yang_name="interface-dynamic-bypass", rest_name="dynamic-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'MplsInterfaceDynamicBypass', u'info': u'Configure Dynamic bypass interface level parameters', u'cli-full-no': None, u'cli-add-mode': None, u'cli-full-command': None, u'hidden': u'full', u'alt-name': u'dynamic-bypass', u'cli-mode-name': u'config-router-mpls-interface-$(interface-name)-dynamic-bypass'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__interface_dynamic_bypass = t
if hasattr(self, '_set'):
self._set() | def function[_set_interface_dynamic_bypass, parameter[self, v, load]]:
constant[
Setter method for interface_dynamic_bypass, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_dynamic_bypass is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_dynamic_bypass() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da1b2594ee0>
name[self].__interface_dynamic_bypass assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_interface_dynamic_bypass] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[interface_dynamic_bypass] . identifier[interface_dynamic_bypass] , identifier[is_container] = literal[string] , identifier[presence] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__interface_dynamic_bypass] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_interface_dynamic_bypass(self, v, load=False):
"""
Setter method for interface_dynamic_bypass, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_dynamic_bypass is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_dynamic_bypass() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=interface_dynamic_bypass.interface_dynamic_bypass, is_container='container', presence=True, yang_name='interface-dynamic-bypass', rest_name='dynamic-bypass', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'MplsInterfaceDynamicBypass', u'info': u'Configure Dynamic bypass interface level parameters', u'cli-full-no': None, u'cli-add-mode': None, u'cli-full-command': None, u'hidden': u'full', u'alt-name': u'dynamic-bypass', u'cli-mode-name': u'config-router-mpls-interface-$(interface-name)-dynamic-bypass'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'interface_dynamic_bypass must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=interface_dynamic_bypass.interface_dynamic_bypass, is_container=\'container\', presence=True, yang_name="interface-dynamic-bypass", rest_name="dynamic-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'MplsInterfaceDynamicBypass\', u\'info\': u\'Configure Dynamic bypass interface level parameters\', u\'cli-full-no\': None, u\'cli-add-mode\': None, u\'cli-full-command\': None, u\'hidden\': u\'full\', u\'alt-name\': u\'dynamic-bypass\', u\'cli-mode-name\': u\'config-router-mpls-interface-$(interface-name)-dynamic-bypass\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__interface_dynamic_bypass = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def make_move(self, d, left):
'''
Plays a domino from the hand of the player whose turn it is onto one
end of the game board. If the game does not end, the turn is advanced
to the next player who has a valid move.
Making a move is transactional - if the operation fails at any point,
the game will return to its state before the operation began.
:param Domino d: domino to be played
:param bool left: end of the board on which to play the
domino (True for left, False for right)
:return: a Result object if the game ends; None otherwise
:raises GameOverException: if the game has already ended
:raises NoSuchDominoException: if the domino to be played is not in
the hand of the player whose turn it is
:raises EndsMismatchException: if the domino cannot be placed on
the specified position in the board
'''
if self.result is not None:
raise dominoes.GameOverException('Cannot make a move - the game is over!')
i = self.hands[self.turn].play(d)
try:
self.board.add(d, left)
except dominoes.EndsMismatchException as error:
# return the domino to the hand if it cannot be placed on the board
self.hands[self.turn].draw(d, i)
raise error
# record the move
self.moves.append((d, left))
# check if the game ended due to a player running out of dominoes
if not self.hands[self.turn]:
self.valid_moves = ()
self.result = dominoes.Result(
self.turn, True, pow(-1, self.turn) * sum(_remaining_points(self.hands))
)
return self.result
# advance the turn to the next player with a valid move.
# if no player has a valid move, the game is stuck. also,
# record all the passes.
passes = []
stuck = True
for _ in self.hands:
self.turn = next_player(self.turn)
self._update_valid_moves()
if self.valid_moves:
self.moves.extend(passes)
stuck = False
break
else:
passes.append(None)
if stuck:
player_points = _remaining_points(self.hands)
team_points = [player_points[0] + player_points[2],
player_points[1] + player_points[3]]
if team_points[0] < team_points[1]:
self.result = dominoes.Result(self.turn, False, sum(team_points))
elif team_points[0] == team_points[1]:
self.result = dominoes.Result(self.turn, False, 0)
else:
self.result = dominoes.Result(self.turn, False, -sum(team_points))
return self.result | def function[make_move, parameter[self, d, left]]:
constant[
Plays a domino from the hand of the player whose turn it is onto one
end of the game board. If the game does not end, the turn is advanced
to the next player who has a valid move.
Making a move is transactional - if the operation fails at any point,
the game will return to its state before the operation began.
:param Domino d: domino to be played
:param bool left: end of the board on which to play the
domino (True for left, False for right)
:return: a Result object if the game ends; None otherwise
:raises GameOverException: if the game has already ended
:raises NoSuchDominoException: if the domino to be played is not in
the hand of the player whose turn it is
:raises EndsMismatchException: if the domino cannot be placed on
the specified position in the board
]
if compare[name[self].result is_not constant[None]] begin[:]
<ast.Raise object at 0x7da18bcc9780>
variable[i] assign[=] call[call[name[self].hands][name[self].turn].play, parameter[name[d]]]
<ast.Try object at 0x7da18bcc9330>
call[name[self].moves.append, parameter[tuple[[<ast.Name object at 0x7da18bcc9e40>, <ast.Name object at 0x7da18bcc92a0>]]]]
if <ast.UnaryOp object at 0x7da18bccab90> begin[:]
name[self].valid_moves assign[=] tuple[[]]
name[self].result assign[=] call[name[dominoes].Result, parameter[name[self].turn, constant[True], binary_operation[call[name[pow], parameter[<ast.UnaryOp object at 0x7da18bcc9120>, name[self].turn]] * call[name[sum], parameter[call[name[_remaining_points], parameter[name[self].hands]]]]]]]
return[name[self].result]
variable[passes] assign[=] list[[]]
variable[stuck] assign[=] constant[True]
for taget[name[_]] in starred[name[self].hands] begin[:]
name[self].turn assign[=] call[name[next_player], parameter[name[self].turn]]
call[name[self]._update_valid_moves, parameter[]]
if name[self].valid_moves begin[:]
call[name[self].moves.extend, parameter[name[passes]]]
variable[stuck] assign[=] constant[False]
break
if name[stuck] begin[:]
variable[player_points] assign[=] call[name[_remaining_points], parameter[name[self].hands]]
variable[team_points] assign[=] list[[<ast.BinOp object at 0x7da18bcc9d50>, <ast.BinOp object at 0x7da18bcca0b0>]]
if compare[call[name[team_points]][constant[0]] less[<] call[name[team_points]][constant[1]]] begin[:]
name[self].result assign[=] call[name[dominoes].Result, parameter[name[self].turn, constant[False], call[name[sum], parameter[name[team_points]]]]]
return[name[self].result] | keyword[def] identifier[make_move] ( identifier[self] , identifier[d] , identifier[left] ):
literal[string]
keyword[if] identifier[self] . identifier[result] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[dominoes] . identifier[GameOverException] ( literal[string] )
identifier[i] = identifier[self] . identifier[hands] [ identifier[self] . identifier[turn] ]. identifier[play] ( identifier[d] )
keyword[try] :
identifier[self] . identifier[board] . identifier[add] ( identifier[d] , identifier[left] )
keyword[except] identifier[dominoes] . identifier[EndsMismatchException] keyword[as] identifier[error] :
identifier[self] . identifier[hands] [ identifier[self] . identifier[turn] ]. identifier[draw] ( identifier[d] , identifier[i] )
keyword[raise] identifier[error]
identifier[self] . identifier[moves] . identifier[append] (( identifier[d] , identifier[left] ))
keyword[if] keyword[not] identifier[self] . identifier[hands] [ identifier[self] . identifier[turn] ]:
identifier[self] . identifier[valid_moves] =()
identifier[self] . identifier[result] = identifier[dominoes] . identifier[Result] (
identifier[self] . identifier[turn] , keyword[True] , identifier[pow] (- literal[int] , identifier[self] . identifier[turn] )* identifier[sum] ( identifier[_remaining_points] ( identifier[self] . identifier[hands] ))
)
keyword[return] identifier[self] . identifier[result]
identifier[passes] =[]
identifier[stuck] = keyword[True]
keyword[for] identifier[_] keyword[in] identifier[self] . identifier[hands] :
identifier[self] . identifier[turn] = identifier[next_player] ( identifier[self] . identifier[turn] )
identifier[self] . identifier[_update_valid_moves] ()
keyword[if] identifier[self] . identifier[valid_moves] :
identifier[self] . identifier[moves] . identifier[extend] ( identifier[passes] )
identifier[stuck] = keyword[False]
keyword[break]
keyword[else] :
identifier[passes] . identifier[append] ( keyword[None] )
keyword[if] identifier[stuck] :
identifier[player_points] = identifier[_remaining_points] ( identifier[self] . identifier[hands] )
identifier[team_points] =[ identifier[player_points] [ literal[int] ]+ identifier[player_points] [ literal[int] ],
identifier[player_points] [ literal[int] ]+ identifier[player_points] [ literal[int] ]]
keyword[if] identifier[team_points] [ literal[int] ]< identifier[team_points] [ literal[int] ]:
identifier[self] . identifier[result] = identifier[dominoes] . identifier[Result] ( identifier[self] . identifier[turn] , keyword[False] , identifier[sum] ( identifier[team_points] ))
keyword[elif] identifier[team_points] [ literal[int] ]== identifier[team_points] [ literal[int] ]:
identifier[self] . identifier[result] = identifier[dominoes] . identifier[Result] ( identifier[self] . identifier[turn] , keyword[False] , literal[int] )
keyword[else] :
identifier[self] . identifier[result] = identifier[dominoes] . identifier[Result] ( identifier[self] . identifier[turn] , keyword[False] ,- identifier[sum] ( identifier[team_points] ))
keyword[return] identifier[self] . identifier[result] | def make_move(self, d, left):
"""
Plays a domino from the hand of the player whose turn it is onto one
end of the game board. If the game does not end, the turn is advanced
to the next player who has a valid move.
Making a move is transactional - if the operation fails at any point,
the game will return to its state before the operation began.
:param Domino d: domino to be played
:param bool left: end of the board on which to play the
domino (True for left, False for right)
:return: a Result object if the game ends; None otherwise
:raises GameOverException: if the game has already ended
:raises NoSuchDominoException: if the domino to be played is not in
the hand of the player whose turn it is
:raises EndsMismatchException: if the domino cannot be placed on
the specified position in the board
"""
if self.result is not None:
raise dominoes.GameOverException('Cannot make a move - the game is over!') # depends on [control=['if'], data=[]]
i = self.hands[self.turn].play(d)
try:
self.board.add(d, left) # depends on [control=['try'], data=[]]
except dominoes.EndsMismatchException as error:
# return the domino to the hand if it cannot be placed on the board
self.hands[self.turn].draw(d, i)
raise error # depends on [control=['except'], data=['error']]
# record the move
self.moves.append((d, left))
# check if the game ended due to a player running out of dominoes
if not self.hands[self.turn]:
self.valid_moves = ()
self.result = dominoes.Result(self.turn, True, pow(-1, self.turn) * sum(_remaining_points(self.hands)))
return self.result # depends on [control=['if'], data=[]]
# advance the turn to the next player with a valid move.
# if no player has a valid move, the game is stuck. also,
# record all the passes.
passes = []
stuck = True
for _ in self.hands:
self.turn = next_player(self.turn)
self._update_valid_moves()
if self.valid_moves:
self.moves.extend(passes)
stuck = False
break # depends on [control=['if'], data=[]]
else:
passes.append(None) # depends on [control=['for'], data=[]]
if stuck:
player_points = _remaining_points(self.hands)
team_points = [player_points[0] + player_points[2], player_points[1] + player_points[3]]
if team_points[0] < team_points[1]:
self.result = dominoes.Result(self.turn, False, sum(team_points)) # depends on [control=['if'], data=[]]
elif team_points[0] == team_points[1]:
self.result = dominoes.Result(self.turn, False, 0) # depends on [control=['if'], data=[]]
else:
self.result = dominoes.Result(self.turn, False, -sum(team_points))
return self.result # depends on [control=['if'], data=[]] |
def qget(self, name, index):
"""
Get the element of ``index`` within the queue ``name``
:param string name: the queue name
:param int index: the specified index, can < 0
:return: the value at ``index`` within queue ``name`` , or ``None`` if the
element doesn't exist
:rtype: string
"""
index = get_integer('index', index)
return self.execute_command('qget', name, index) | def function[qget, parameter[self, name, index]]:
constant[
Get the element of ``index`` within the queue ``name``
:param string name: the queue name
:param int index: the specified index, can < 0
:return: the value at ``index`` within queue ``name`` , or ``None`` if the
element doesn't exist
:rtype: string
]
variable[index] assign[=] call[name[get_integer], parameter[constant[index], name[index]]]
return[call[name[self].execute_command, parameter[constant[qget], name[name], name[index]]]] | keyword[def] identifier[qget] ( identifier[self] , identifier[name] , identifier[index] ):
literal[string]
identifier[index] = identifier[get_integer] ( literal[string] , identifier[index] )
keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] , identifier[index] ) | def qget(self, name, index):
"""
Get the element of ``index`` within the queue ``name``
:param string name: the queue name
:param int index: the specified index, can < 0
:return: the value at ``index`` within queue ``name`` , or ``None`` if the
element doesn't exist
:rtype: string
"""
index = get_integer('index', index)
return self.execute_command('qget', name, index) |
def first(self, predicate=None):
'''The first element in a sequence (optionally satisfying a predicate).
If the predicate is omitted or is None this query returns the first
element in the sequence; otherwise, it returns the first element in
the sequence for which the predicate evaluates to True. Exceptions are
raised if there is no such element.
Note: This method uses immediate execution.
Args:
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the first element of the source sequence will
be returned.
Returns:
The first element of the sequence if predicate is None, otherwise
the first element for which the predicate returns True.
Raises:
ValueError: If the Queryable is closed.
ValueError: If the source sequence is empty.
ValueError: If there are no elements matching the predicate.
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call first() on a closed Queryable.")
return self._first() if predicate is None else self._first_predicate(predicate) | def function[first, parameter[self, predicate]]:
constant[The first element in a sequence (optionally satisfying a predicate).
If the predicate is omitted or is None this query returns the first
element in the sequence; otherwise, it returns the first element in
the sequence for which the predicate evaluates to True. Exceptions are
raised if there is no such element.
Note: This method uses immediate execution.
Args:
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the first element of the source sequence will
be returned.
Returns:
The first element of the sequence if predicate is None, otherwise
the first element for which the predicate returns True.
Raises:
ValueError: If the Queryable is closed.
ValueError: If the source sequence is empty.
ValueError: If there are no elements matching the predicate.
TypeError: If the predicate is not callable.
]
if call[name[self].closed, parameter[]] begin[:]
<ast.Raise object at 0x7da1b1a3eb60>
return[<ast.IfExp object at 0x7da1b1b0e8f0>] | keyword[def] identifier[first] ( identifier[self] , identifier[predicate] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[closed] ():
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[_first] () keyword[if] identifier[predicate] keyword[is] keyword[None] keyword[else] identifier[self] . identifier[_first_predicate] ( identifier[predicate] ) | def first(self, predicate=None):
"""The first element in a sequence (optionally satisfying a predicate).
If the predicate is omitted or is None this query returns the first
element in the sequence; otherwise, it returns the first element in
the sequence for which the predicate evaluates to True. Exceptions are
raised if there is no such element.
Note: This method uses immediate execution.
Args:
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the first element of the source sequence will
be returned.
Returns:
The first element of the sequence if predicate is None, otherwise
the first element for which the predicate returns True.
Raises:
ValueError: If the Queryable is closed.
ValueError: If the source sequence is empty.
ValueError: If there are no elements matching the predicate.
TypeError: If the predicate is not callable.
"""
if self.closed():
raise ValueError('Attempt to call first() on a closed Queryable.') # depends on [control=['if'], data=[]]
return self._first() if predicate is None else self._first_predicate(predicate) |
def createCellsList (self):
''' Create population cells based on list of individual cells'''
from .. import sim
cells = []
self.tags['numCells'] = len(self.tags['cellsList'])
for i in self._distributeCells(len(self.tags['cellsList']))[sim.rank]:
#if 'cellModel' in self.tags['cellsList'][i]:
# self.cellModelClass = getattr(f, self.tags['cellsList'][i]['cellModel']) # select cell class to instantiate cells based on the cellModel tags
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags.update(self.tags['cellsList'][i]) # add tags specific to this cells
for coord in ['x','y','z']:
if coord in cellTags: # if absolute coord exists
cellTags[coord+'norm'] = cellTags[coord]/getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
elif coord+'norm' in cellTags: # elif norm coord exists
cellTags[coord] = cellTags[coord+'norm']*getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
else:
cellTags[coord+'norm'] = cellTags[coord] = 0
if 'cellModel' in self.tags.keys() and self.tags['cellModel'] == 'Vecstim': # if VecStim, copy spike times to params
cellTags['params']['spkTimes'] = self.tags['cellsList'][i]['spkTimes']
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %d, on node %d, '%(i, self.tags['numCells']-1, gid, i, sim.rank)))
sim.net.lastGid = sim.net.lastGid + len(self.tags['cellsList'])
return cells | def function[createCellsList, parameter[self]]:
constant[ Create population cells based on list of individual cells]
from relative_module[None] import module[sim]
variable[cells] assign[=] list[[]]
call[name[self].tags][constant[numCells]] assign[=] call[name[len], parameter[call[name[self].tags][constant[cellsList]]]]
for taget[name[i]] in starred[call[call[name[self]._distributeCells, parameter[call[name[len], parameter[call[name[self].tags][constant[cellsList]]]]]]][name[sim].rank]] begin[:]
variable[gid] assign[=] binary_operation[name[sim].net.lastGid + name[i]]
call[name[self].cellGids.append, parameter[name[gid]]]
variable[cellTags] assign[=] <ast.DictComp object at 0x7da20c6aada0>
call[name[cellTags]][constant[pop]] assign[=] call[name[self].tags][constant[pop]]
call[name[cellTags].update, parameter[call[call[name[self].tags][constant[cellsList]]][name[i]]]]
for taget[name[coord]] in starred[list[[<ast.Constant object at 0x7da20c6a9e10>, <ast.Constant object at 0x7da20c6ab820>, <ast.Constant object at 0x7da20c6a8bb0>]]] begin[:]
if compare[name[coord] in name[cellTags]] begin[:]
call[name[cellTags]][binary_operation[name[coord] + constant[norm]]] assign[=] binary_operation[call[name[cellTags]][name[coord]] / call[name[getattr], parameter[name[sim].net.params, binary_operation[constant[size] + call[name[coord].upper, parameter[]]]]]]
if <ast.BoolOp object at 0x7da20c6aa860> begin[:]
call[call[name[cellTags]][constant[params]]][constant[spkTimes]] assign[=] call[call[call[name[self].tags][constant[cellsList]]][name[i]]][constant[spkTimes]]
call[name[cells].append, parameter[call[name[self].cellModelClass, parameter[name[gid], name[cellTags]]]]]
if name[sim].cfg.verbose begin[:]
call[name[print], parameter[binary_operation[constant[Cell %d/%d (gid=%d) of pop %d, on node %d, ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6ab670>, <ast.BinOp object at 0x7da20c6ab190>, <ast.Name object at 0x7da20c6ab3a0>, <ast.Name object at 0x7da20c6a85e0>, <ast.Attribute object at 0x7da20c6aa350>]]]]]
name[sim].net.lastGid assign[=] binary_operation[name[sim].net.lastGid + call[name[len], parameter[call[name[self].tags][constant[cellsList]]]]]
return[name[cells]] | keyword[def] identifier[createCellsList] ( identifier[self] ):
literal[string]
keyword[from] .. keyword[import] identifier[sim]
identifier[cells] =[]
identifier[self] . identifier[tags] [ literal[string] ]= identifier[len] ( identifier[self] . identifier[tags] [ literal[string] ])
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_distributeCells] ( identifier[len] ( identifier[self] . identifier[tags] [ literal[string] ]))[ identifier[sim] . identifier[rank] ]:
identifier[gid] = identifier[sim] . identifier[net] . identifier[lastGid] + identifier[i]
identifier[self] . identifier[cellGids] . identifier[append] ( identifier[gid] )
identifier[cellTags] ={ identifier[k] : identifier[v] keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[self] . identifier[tags] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[sim] . identifier[net] . identifier[params] . identifier[popTagsCopiedToCells] }
identifier[cellTags] [ literal[string] ]= identifier[self] . identifier[tags] [ literal[string] ]
identifier[cellTags] . identifier[update] ( identifier[self] . identifier[tags] [ literal[string] ][ identifier[i] ])
keyword[for] identifier[coord] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[coord] keyword[in] identifier[cellTags] :
identifier[cellTags] [ identifier[coord] + literal[string] ]= identifier[cellTags] [ identifier[coord] ]/ identifier[getattr] ( identifier[sim] . identifier[net] . identifier[params] , literal[string] + identifier[coord] . identifier[upper] ())
keyword[elif] identifier[coord] + literal[string] keyword[in] identifier[cellTags] :
identifier[cellTags] [ identifier[coord] ]= identifier[cellTags] [ identifier[coord] + literal[string] ]* identifier[getattr] ( identifier[sim] . identifier[net] . identifier[params] , literal[string] + identifier[coord] . identifier[upper] ())
keyword[else] :
identifier[cellTags] [ identifier[coord] + literal[string] ]= identifier[cellTags] [ identifier[coord] ]= literal[int]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[tags] . identifier[keys] () keyword[and] identifier[self] . identifier[tags] [ literal[string] ]== literal[string] :
identifier[cellTags] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[tags] [ literal[string] ][ identifier[i] ][ literal[string] ]
identifier[cells] . identifier[append] ( identifier[self] . identifier[cellModelClass] ( identifier[gid] , identifier[cellTags] ))
keyword[if] identifier[sim] . identifier[cfg] . identifier[verbose] : identifier[print] (( literal[string] %( identifier[i] , identifier[self] . identifier[tags] [ literal[string] ]- literal[int] , identifier[gid] , identifier[i] , identifier[sim] . identifier[rank] )))
identifier[sim] . identifier[net] . identifier[lastGid] = identifier[sim] . identifier[net] . identifier[lastGid] + identifier[len] ( identifier[self] . identifier[tags] [ literal[string] ])
keyword[return] identifier[cells] | def createCellsList(self):
""" Create population cells based on list of individual cells"""
from .. import sim
cells = []
self.tags['numCells'] = len(self.tags['cellsList'])
for i in self._distributeCells(len(self.tags['cellsList']))[sim.rank]:
#if 'cellModel' in self.tags['cellsList'][i]:
# self.cellModelClass = getattr(f, self.tags['cellsList'][i]['cellModel']) # select cell class to instantiate cells based on the cellModel tags
gid = sim.net.lastGid + i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags.update(self.tags['cellsList'][i]) # add tags specific to this cells
for coord in ['x', 'y', 'z']:
if coord in cellTags: # if absolute coord exists
cellTags[coord + 'norm'] = cellTags[coord] / getattr(sim.net.params, 'size' + coord.upper()) # calculate norm coord # depends on [control=['if'], data=['coord', 'cellTags']]
elif coord + 'norm' in cellTags: # elif norm coord exists
cellTags[coord] = cellTags[coord + 'norm'] * getattr(sim.net.params, 'size' + coord.upper()) # calculate norm coord # depends on [control=['if'], data=['cellTags']]
else:
cellTags[coord + 'norm'] = cellTags[coord] = 0 # depends on [control=['for'], data=['coord']]
if 'cellModel' in self.tags.keys() and self.tags['cellModel'] == 'Vecstim': # if VecStim, copy spike times to params
cellTags['params']['spkTimes'] = self.tags['cellsList'][i]['spkTimes'] # depends on [control=['if'], data=[]]
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose:
print('Cell %d/%d (gid=%d) of pop %d, on node %d, ' % (i, self.tags['numCells'] - 1, gid, i, sim.rank)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
sim.net.lastGid = sim.net.lastGid + len(self.tags['cellsList'])
return cells |
def format(self):
"""Handles the actual behaviour involved with formatting.
To change the behaviour, this method should be overridden.
Returns
--------
list
A paginated output of the help command.
"""
values = {}
title = "Description"
description = self.command.description + "\n\n" + self.get_ending_note() if not self.is_cog() else inspect.getdoc(self.command)
sections = []
if isinstance(self.command, Command):
description = self.command.short_doc
sections = [{"name": "Usage", "value": self.get_command_signature()},
{"name": "More Info", "value": self.command.help.replace(self.command.short_doc, "").format(prefix=self.clean_prefix),
"inline": False}]
def category(tup):
cog = tup[1].cog_name
return cog + ':' if cog is not None else '\u200bNo Category:'
if self.is_bot():
title = self.bot.user.display_name + " Help"
data = sorted(self.filter_command_list(), key=category)
for category, commands in itertools.groupby(data, key=category):
section = {}
commands = list(commands)
if len(commands) > 0:
section['name'] = category
section['value'] = self.add_commands(commands)
section['inline'] = False
sections.append(section)
elif not sections or self.has_subcommands():
section = {"name": "Commands:", "inline": False, "value": self.add_commands(self.filter_command_list())}
sections.append(section)
values['title'] = title
values['description'] = description
values['sections'] = sections
return values | def function[format, parameter[self]]:
constant[Handles the actual behaviour involved with formatting.
To change the behaviour, this method should be overridden.
Returns
--------
list
A paginated output of the help command.
]
variable[values] assign[=] dictionary[[], []]
variable[title] assign[=] constant[Description]
variable[description] assign[=] <ast.IfExp object at 0x7da2041db1f0>
variable[sections] assign[=] list[[]]
if call[name[isinstance], parameter[name[self].command, name[Command]]] begin[:]
variable[description] assign[=] name[self].command.short_doc
variable[sections] assign[=] list[[<ast.Dict object at 0x7da1b28f25c0>, <ast.Dict object at 0x7da1b28f28f0>]]
def function[category, parameter[tup]]:
variable[cog] assign[=] call[name[tup]][constant[1]].cog_name
return[<ast.IfExp object at 0x7da1b28f2e90>]
if call[name[self].is_bot, parameter[]] begin[:]
variable[title] assign[=] binary_operation[name[self].bot.user.display_name + constant[ Help]]
variable[data] assign[=] call[name[sorted], parameter[call[name[self].filter_command_list, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b28f1720>, <ast.Name object at 0x7da1b28f1510>]]] in starred[call[name[itertools].groupby, parameter[name[data]]]] begin[:]
variable[section] assign[=] dictionary[[], []]
variable[commands] assign[=] call[name[list], parameter[name[commands]]]
if compare[call[name[len], parameter[name[commands]]] greater[>] constant[0]] begin[:]
call[name[section]][constant[name]] assign[=] name[category]
call[name[section]][constant[value]] assign[=] call[name[self].add_commands, parameter[name[commands]]]
call[name[section]][constant[inline]] assign[=] constant[False]
call[name[sections].append, parameter[name[section]]]
call[name[values]][constant[title]] assign[=] name[title]
call[name[values]][constant[description]] assign[=] name[description]
call[name[values]][constant[sections]] assign[=] name[sections]
return[name[values]] | keyword[def] identifier[format] ( identifier[self] ):
literal[string]
identifier[values] ={}
identifier[title] = literal[string]
identifier[description] = identifier[self] . identifier[command] . identifier[description] + literal[string] + identifier[self] . identifier[get_ending_note] () keyword[if] keyword[not] identifier[self] . identifier[is_cog] () keyword[else] identifier[inspect] . identifier[getdoc] ( identifier[self] . identifier[command] )
identifier[sections] =[]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[command] , identifier[Command] ):
identifier[description] = identifier[self] . identifier[command] . identifier[short_doc]
identifier[sections] =[{ literal[string] : literal[string] , literal[string] : identifier[self] . identifier[get_command_signature] ()},
{ literal[string] : literal[string] , literal[string] : identifier[self] . identifier[command] . identifier[help] . identifier[replace] ( identifier[self] . identifier[command] . identifier[short_doc] , literal[string] ). identifier[format] ( identifier[prefix] = identifier[self] . identifier[clean_prefix] ),
literal[string] : keyword[False] }]
keyword[def] identifier[category] ( identifier[tup] ):
identifier[cog] = identifier[tup] [ literal[int] ]. identifier[cog_name]
keyword[return] identifier[cog] + literal[string] keyword[if] identifier[cog] keyword[is] keyword[not] keyword[None] keyword[else] literal[string]
keyword[if] identifier[self] . identifier[is_bot] ():
identifier[title] = identifier[self] . identifier[bot] . identifier[user] . identifier[display_name] + literal[string]
identifier[data] = identifier[sorted] ( identifier[self] . identifier[filter_command_list] (), identifier[key] = identifier[category] )
keyword[for] identifier[category] , identifier[commands] keyword[in] identifier[itertools] . identifier[groupby] ( identifier[data] , identifier[key] = identifier[category] ):
identifier[section] ={}
identifier[commands] = identifier[list] ( identifier[commands] )
keyword[if] identifier[len] ( identifier[commands] )> literal[int] :
identifier[section] [ literal[string] ]= identifier[category]
identifier[section] [ literal[string] ]= identifier[self] . identifier[add_commands] ( identifier[commands] )
identifier[section] [ literal[string] ]= keyword[False]
identifier[sections] . identifier[append] ( identifier[section] )
keyword[elif] keyword[not] identifier[sections] keyword[or] identifier[self] . identifier[has_subcommands] ():
identifier[section] ={ literal[string] : literal[string] , literal[string] : keyword[False] , literal[string] : identifier[self] . identifier[add_commands] ( identifier[self] . identifier[filter_command_list] ())}
identifier[sections] . identifier[append] ( identifier[section] )
identifier[values] [ literal[string] ]= identifier[title]
identifier[values] [ literal[string] ]= identifier[description]
identifier[values] [ literal[string] ]= identifier[sections]
keyword[return] identifier[values] | def format(self):
"""Handles the actual behaviour involved with formatting.
To change the behaviour, this method should be overridden.
Returns
--------
list
A paginated output of the help command.
"""
values = {}
title = 'Description'
description = self.command.description + '\n\n' + self.get_ending_note() if not self.is_cog() else inspect.getdoc(self.command)
sections = []
if isinstance(self.command, Command):
description = self.command.short_doc
sections = [{'name': 'Usage', 'value': self.get_command_signature()}, {'name': 'More Info', 'value': self.command.help.replace(self.command.short_doc, '').format(prefix=self.clean_prefix), 'inline': False}] # depends on [control=['if'], data=[]]
def category(tup):
cog = tup[1].cog_name
return cog + ':' if cog is not None else '\u200bNo Category:'
if self.is_bot():
title = self.bot.user.display_name + ' Help'
data = sorted(self.filter_command_list(), key=category)
for (category, commands) in itertools.groupby(data, key=category):
section = {}
commands = list(commands)
if len(commands) > 0:
section['name'] = category # depends on [control=['if'], data=[]]
section['value'] = self.add_commands(commands)
section['inline'] = False
sections.append(section) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif not sections or self.has_subcommands():
section = {'name': 'Commands:', 'inline': False, 'value': self.add_commands(self.filter_command_list())}
sections.append(section) # depends on [control=['if'], data=[]]
values['title'] = title
values['description'] = description
values['sections'] = sections
return values |
def unquoted(self):
"""
Return *key* with one level of double quotes removed.
Redshift stores some identifiers without quotes in internal tables,
even though the name must be quoted elsewhere.
In particular, this happens for tables named as a keyword.
"""
key = str(self)
if key.startswith('"') and key.endswith('"'):
return key[1:-1]
return key | def function[unquoted, parameter[self]]:
constant[
Return *key* with one level of double quotes removed.
Redshift stores some identifiers without quotes in internal tables,
even though the name must be quoted elsewhere.
In particular, this happens for tables named as a keyword.
]
variable[key] assign[=] call[name[str], parameter[name[self]]]
if <ast.BoolOp object at 0x7da18ede4c10> begin[:]
return[call[name[key]][<ast.Slice object at 0x7da18ede5cf0>]]
return[name[key]] | keyword[def] identifier[unquoted] ( identifier[self] ):
literal[string]
identifier[key] = identifier[str] ( identifier[self] )
keyword[if] identifier[key] . identifier[startswith] ( literal[string] ) keyword[and] identifier[key] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[key] [ literal[int] :- literal[int] ]
keyword[return] identifier[key] | def unquoted(self):
"""
Return *key* with one level of double quotes removed.
Redshift stores some identifiers without quotes in internal tables,
even though the name must be quoted elsewhere.
In particular, this happens for tables named as a keyword.
"""
key = str(self)
if key.startswith('"') and key.endswith('"'):
return key[1:-1] # depends on [control=['if'], data=[]]
return key |
def block(self):
"""While this context manager is active any signals for aborting
the process will be queued and exit the program once the context
is left.
"""
self._nosig = True
yield
self._nosig = False
if self._interrupted:
raise SystemExit("Aborted...") | def function[block, parameter[self]]:
constant[While this context manager is active any signals for aborting
the process will be queued and exit the program once the context
is left.
]
name[self]._nosig assign[=] constant[True]
<ast.Yield object at 0x7da1b20a98d0>
name[self]._nosig assign[=] constant[False]
if name[self]._interrupted begin[:]
<ast.Raise object at 0x7da1b20ab280> | keyword[def] identifier[block] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_nosig] = keyword[True]
keyword[yield]
identifier[self] . identifier[_nosig] = keyword[False]
keyword[if] identifier[self] . identifier[_interrupted] :
keyword[raise] identifier[SystemExit] ( literal[string] ) | def block(self):
"""While this context manager is active any signals for aborting
the process will be queued and exit the program once the context
is left.
"""
self._nosig = True
yield
self._nosig = False
if self._interrupted:
raise SystemExit('Aborted...') # depends on [control=['if'], data=[]] |
def _get_any_translated_model(self, meta=None):
"""
Return any available translation.
Returns None if there are no translations at all.
"""
if meta is None:
meta = self._parler_meta.root
tr_model = meta.model
local_cache = self._translations_cache[tr_model]
if local_cache:
# There is already a language available in the case. No need for queries.
# Give consistent answers if they exist.
check_languages = [self._current_language] + self.get_fallback_languages()
try:
for fallback_lang in check_languages:
trans = local_cache.get(fallback_lang, None)
if trans and not is_missing(trans):
return trans
return next(t for t in six.itervalues(local_cache) if not is_missing(t))
except StopIteration:
pass
try:
# Use prefetch if available, otherwise perform separate query.
prefetch = self._get_prefetched_translations(meta=meta)
if prefetch is not None:
translation = prefetch[0] # Already a list
else:
translation = self._get_translated_queryset(meta=meta)[0]
except IndexError:
return None
else:
local_cache[translation.language_code] = translation
_cache_translation(translation)
return translation | def function[_get_any_translated_model, parameter[self, meta]]:
constant[
Return any available translation.
Returns None if there are no translations at all.
]
if compare[name[meta] is constant[None]] begin[:]
variable[meta] assign[=] name[self]._parler_meta.root
variable[tr_model] assign[=] name[meta].model
variable[local_cache] assign[=] call[name[self]._translations_cache][name[tr_model]]
if name[local_cache] begin[:]
variable[check_languages] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da18bc73b20>]] + call[name[self].get_fallback_languages, parameter[]]]
<ast.Try object at 0x7da18bc73be0>
<ast.Try object at 0x7da18bc72da0> | keyword[def] identifier[_get_any_translated_model] ( identifier[self] , identifier[meta] = keyword[None] ):
literal[string]
keyword[if] identifier[meta] keyword[is] keyword[None] :
identifier[meta] = identifier[self] . identifier[_parler_meta] . identifier[root]
identifier[tr_model] = identifier[meta] . identifier[model]
identifier[local_cache] = identifier[self] . identifier[_translations_cache] [ identifier[tr_model] ]
keyword[if] identifier[local_cache] :
identifier[check_languages] =[ identifier[self] . identifier[_current_language] ]+ identifier[self] . identifier[get_fallback_languages] ()
keyword[try] :
keyword[for] identifier[fallback_lang] keyword[in] identifier[check_languages] :
identifier[trans] = identifier[local_cache] . identifier[get] ( identifier[fallback_lang] , keyword[None] )
keyword[if] identifier[trans] keyword[and] keyword[not] identifier[is_missing] ( identifier[trans] ):
keyword[return] identifier[trans]
keyword[return] identifier[next] ( identifier[t] keyword[for] identifier[t] keyword[in] identifier[six] . identifier[itervalues] ( identifier[local_cache] ) keyword[if] keyword[not] identifier[is_missing] ( identifier[t] ))
keyword[except] identifier[StopIteration] :
keyword[pass]
keyword[try] :
identifier[prefetch] = identifier[self] . identifier[_get_prefetched_translations] ( identifier[meta] = identifier[meta] )
keyword[if] identifier[prefetch] keyword[is] keyword[not] keyword[None] :
identifier[translation] = identifier[prefetch] [ literal[int] ]
keyword[else] :
identifier[translation] = identifier[self] . identifier[_get_translated_queryset] ( identifier[meta] = identifier[meta] )[ literal[int] ]
keyword[except] identifier[IndexError] :
keyword[return] keyword[None]
keyword[else] :
identifier[local_cache] [ identifier[translation] . identifier[language_code] ]= identifier[translation]
identifier[_cache_translation] ( identifier[translation] )
keyword[return] identifier[translation] | def _get_any_translated_model(self, meta=None):
"""
Return any available translation.
Returns None if there are no translations at all.
"""
if meta is None:
meta = self._parler_meta.root # depends on [control=['if'], data=['meta']]
tr_model = meta.model
local_cache = self._translations_cache[tr_model]
if local_cache:
# There is already a language available in the case. No need for queries.
# Give consistent answers if they exist.
check_languages = [self._current_language] + self.get_fallback_languages()
try:
for fallback_lang in check_languages:
trans = local_cache.get(fallback_lang, None)
if trans and (not is_missing(trans)):
return trans # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fallback_lang']]
return next((t for t in six.itervalues(local_cache) if not is_missing(t))) # depends on [control=['try'], data=[]]
except StopIteration:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
try:
# Use prefetch if available, otherwise perform separate query.
prefetch = self._get_prefetched_translations(meta=meta)
if prefetch is not None:
translation = prefetch[0] # Already a list # depends on [control=['if'], data=['prefetch']]
else:
translation = self._get_translated_queryset(meta=meta)[0] # depends on [control=['try'], data=[]]
except IndexError:
return None # depends on [control=['except'], data=[]]
else:
local_cache[translation.language_code] = translation
_cache_translation(translation)
return translation |
def create_from_template(self, client_id, subject, name, from_name,
from_email, reply_to, list_ids, segment_ids, template_id, template_content):
"""Creates a new campaign for a client, from a template.
:param client_id: String representing the ID of the client for whom the
campaign will be created.
:param subject: String representing the subject of the campaign.
:param name: String representing the name of the campaign.
:param from_name: String representing the from name for the campaign.
:param from_email: String representing the from address for the campaign.
:param reply_to: String representing the reply-to address for the campaign.
:param list_ids: Array of Strings representing the IDs of the lists to
which the campaign will be sent.
:param segment_ids: Array of Strings representing the IDs of the segments to
which the campaign will be sent.
:param template_id: String representing the ID of the template on which
the campaign will be based.
:param template_content: Hash representing the content to be used for the
editable areas of the template. See documentation at
campaignmonitor.com/api/campaigns/#creating_a_campaign_from_template
for full details of template content format.
:returns String representing the ID of the newly created campaign.
"""
body = {
"Subject": subject,
"Name": name,
"FromName": from_name,
"FromEmail": from_email,
"ReplyTo": reply_to,
"ListIDs": list_ids,
"SegmentIDs": segment_ids,
"TemplateID": template_id,
"TemplateContent": template_content}
response = self._post("/campaigns/%s/fromtemplate.json" %
client_id, json.dumps(body))
self.campaign_id = json_to_py(response)
return self.campaign_id | def function[create_from_template, parameter[self, client_id, subject, name, from_name, from_email, reply_to, list_ids, segment_ids, template_id, template_content]]:
constant[Creates a new campaign for a client, from a template.
:param client_id: String representing the ID of the client for whom the
campaign will be created.
:param subject: String representing the subject of the campaign.
:param name: String representing the name of the campaign.
:param from_name: String representing the from name for the campaign.
:param from_email: String representing the from address for the campaign.
:param reply_to: String representing the reply-to address for the campaign.
:param list_ids: Array of Strings representing the IDs of the lists to
which the campaign will be sent.
:param segment_ids: Array of Strings representing the IDs of the segments to
which the campaign will be sent.
:param template_id: String representing the ID of the template on which
the campaign will be based.
:param template_content: Hash representing the content to be used for the
editable areas of the template. See documentation at
campaignmonitor.com/api/campaigns/#creating_a_campaign_from_template
for full details of template content format.
:returns String representing the ID of the newly created campaign.
]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da18f7224a0>, <ast.Constant object at 0x7da18f722fb0>, <ast.Constant object at 0x7da18f7239d0>, <ast.Constant object at 0x7da18f722830>, <ast.Constant object at 0x7da18f723b20>, <ast.Constant object at 0x7da18f723670>, <ast.Constant object at 0x7da18f723820>, <ast.Constant object at 0x7da18f7219c0>, <ast.Constant object at 0x7da18f721450>], [<ast.Name object at 0x7da18f721c90>, <ast.Name object at 0x7da18f723790>, <ast.Name object at 0x7da18f722320>, <ast.Name object at 0x7da18f723e50>, <ast.Name object at 0x7da18f723a00>, <ast.Name object at 0x7da18f7230a0>, <ast.Name object at 0x7da18f722ce0>, <ast.Name object at 0x7da18f723040>, <ast.Name object at 0x7da18f720e50>]]
variable[response] assign[=] call[name[self]._post, parameter[binary_operation[constant[/campaigns/%s/fromtemplate.json] <ast.Mod object at 0x7da2590d6920> name[client_id]], call[name[json].dumps, parameter[name[body]]]]]
name[self].campaign_id assign[=] call[name[json_to_py], parameter[name[response]]]
return[name[self].campaign_id] | keyword[def] identifier[create_from_template] ( identifier[self] , identifier[client_id] , identifier[subject] , identifier[name] , identifier[from_name] ,
identifier[from_email] , identifier[reply_to] , identifier[list_ids] , identifier[segment_ids] , identifier[template_id] , identifier[template_content] ):
literal[string]
identifier[body] ={
literal[string] : identifier[subject] ,
literal[string] : identifier[name] ,
literal[string] : identifier[from_name] ,
literal[string] : identifier[from_email] ,
literal[string] : identifier[reply_to] ,
literal[string] : identifier[list_ids] ,
literal[string] : identifier[segment_ids] ,
literal[string] : identifier[template_id] ,
literal[string] : identifier[template_content] }
identifier[response] = identifier[self] . identifier[_post] ( literal[string] %
identifier[client_id] , identifier[json] . identifier[dumps] ( identifier[body] ))
identifier[self] . identifier[campaign_id] = identifier[json_to_py] ( identifier[response] )
keyword[return] identifier[self] . identifier[campaign_id] | def create_from_template(self, client_id, subject, name, from_name, from_email, reply_to, list_ids, segment_ids, template_id, template_content):
"""Creates a new campaign for a client, from a template.
:param client_id: String representing the ID of the client for whom the
campaign will be created.
:param subject: String representing the subject of the campaign.
:param name: String representing the name of the campaign.
:param from_name: String representing the from name for the campaign.
:param from_email: String representing the from address for the campaign.
:param reply_to: String representing the reply-to address for the campaign.
:param list_ids: Array of Strings representing the IDs of the lists to
which the campaign will be sent.
:param segment_ids: Array of Strings representing the IDs of the segments to
which the campaign will be sent.
:param template_id: String representing the ID of the template on which
the campaign will be based.
:param template_content: Hash representing the content to be used for the
editable areas of the template. See documentation at
campaignmonitor.com/api/campaigns/#creating_a_campaign_from_template
for full details of template content format.
:returns String representing the ID of the newly created campaign.
"""
body = {'Subject': subject, 'Name': name, 'FromName': from_name, 'FromEmail': from_email, 'ReplyTo': reply_to, 'ListIDs': list_ids, 'SegmentIDs': segment_ids, 'TemplateID': template_id, 'TemplateContent': template_content}
response = self._post('/campaigns/%s/fromtemplate.json' % client_id, json.dumps(body))
self.campaign_id = json_to_py(response)
return self.campaign_id |
def gotResolverError(self, failure, protocol, message, address):
'''
Copied from twisted.names.
Removes logging the whole failure traceback.
'''
if failure.check(dns.DomainError, dns.AuthoritativeDomainError):
message.rCode = dns.ENAME
else:
message.rCode = dns.ESERVER
log.msg(failure.getErrorMessage())
self.sendReply(protocol, message, address)
if self.verbose:
log.msg("Lookup failed") | def function[gotResolverError, parameter[self, failure, protocol, message, address]]:
constant[
Copied from twisted.names.
Removes logging the whole failure traceback.
]
if call[name[failure].check, parameter[name[dns].DomainError, name[dns].AuthoritativeDomainError]] begin[:]
name[message].rCode assign[=] name[dns].ENAME
call[name[self].sendReply, parameter[name[protocol], name[message], name[address]]]
if name[self].verbose begin[:]
call[name[log].msg, parameter[constant[Lookup failed]]] | keyword[def] identifier[gotResolverError] ( identifier[self] , identifier[failure] , identifier[protocol] , identifier[message] , identifier[address] ):
literal[string]
keyword[if] identifier[failure] . identifier[check] ( identifier[dns] . identifier[DomainError] , identifier[dns] . identifier[AuthoritativeDomainError] ):
identifier[message] . identifier[rCode] = identifier[dns] . identifier[ENAME]
keyword[else] :
identifier[message] . identifier[rCode] = identifier[dns] . identifier[ESERVER]
identifier[log] . identifier[msg] ( identifier[failure] . identifier[getErrorMessage] ())
identifier[self] . identifier[sendReply] ( identifier[protocol] , identifier[message] , identifier[address] )
keyword[if] identifier[self] . identifier[verbose] :
identifier[log] . identifier[msg] ( literal[string] ) | def gotResolverError(self, failure, protocol, message, address):
"""
Copied from twisted.names.
Removes logging the whole failure traceback.
"""
if failure.check(dns.DomainError, dns.AuthoritativeDomainError):
message.rCode = dns.ENAME # depends on [control=['if'], data=[]]
else:
message.rCode = dns.ESERVER
log.msg(failure.getErrorMessage())
self.sendReply(protocol, message, address)
if self.verbose:
log.msg('Lookup failed') # depends on [control=['if'], data=[]] |
def surface_2D(num_lat=90, num_lon=180, water_depth=10., lon=None,
lat=None, **kwargs):
"""Creates a 2D slab ocean Domain in latitude and longitude with uniform water depth.
Domain has a single heat capacity according to the specified water depth.
**Function-call argument** \n
:param int num_lat: number of latitude points [default: 90]
:param int num_lon: number of longitude points [default: 180]
:param float water_depth: depth of the slab ocean in meters [default: 10.]
:param lat: specification for latitude axis (optional)
:type lat: :class:`~climlab.domain.axis.Axis` or latitude array
:param lon: specification for longitude axis (optional)
:type lon: :class:`~climlab.domain.axis.Axis` or longitude array
:raises: :exc:`ValueError` if `lat` is given but neither Axis nor latitude array.
:raises: :exc:`ValueError` if `lon` is given but neither Axis nor longitude array.
:returns: surface domain
:rtype: :class:`SlabOcean`
:Example:
::
>>> from climlab import domain
>>> sfc = domain.surface_2D(num_lat=36, num_lat=72)
>>> print sfc
climlab Domain object with domain_type=ocean and shape=(36, 72, 1)
"""
if lat is None:
latax = Axis(axis_type='lat', num_points=num_lat)
elif isinstance(lat, Axis):
latax = lat
else:
try:
latax = Axis(axis_type='lat', points=lat)
except:
raise ValueError('lat must be Axis object or latitude array')
if lon is None:
lonax = Axis(axis_type='lon', num_points=num_lon)
elif isinstance(lon, Axis):
lonax = lon
else:
try:
lonax = Axis(axis_type='lon', points=lon)
except:
raise ValueError('lon must be Axis object or longitude array')
depthax = Axis(axis_type='depth', bounds=[water_depth, 0.])
axes = {'lat': latax, 'lon': lonax, 'depth': depthax}
slab = SlabOcean(axes=axes, **kwargs)
return slab | def function[surface_2D, parameter[num_lat, num_lon, water_depth, lon, lat]]:
constant[Creates a 2D slab ocean Domain in latitude and longitude with uniform water depth.
Domain has a single heat capacity according to the specified water depth.
**Function-call argument**
:param int num_lat: number of latitude points [default: 90]
:param int num_lon: number of longitude points [default: 180]
:param float water_depth: depth of the slab ocean in meters [default: 10.]
:param lat: specification for latitude axis (optional)
:type lat: :class:`~climlab.domain.axis.Axis` or latitude array
:param lon: specification for longitude axis (optional)
:type lon: :class:`~climlab.domain.axis.Axis` or longitude array
:raises: :exc:`ValueError` if `lat` is given but neither Axis nor latitude array.
:raises: :exc:`ValueError` if `lon` is given but neither Axis nor longitude array.
:returns: surface domain
:rtype: :class:`SlabOcean`
:Example:
::
>>> from climlab import domain
>>> sfc = domain.surface_2D(num_lat=36, num_lat=72)
>>> print sfc
climlab Domain object with domain_type=ocean and shape=(36, 72, 1)
]
if compare[name[lat] is constant[None]] begin[:]
variable[latax] assign[=] call[name[Axis], parameter[]]
if compare[name[lon] is constant[None]] begin[:]
variable[lonax] assign[=] call[name[Axis], parameter[]]
variable[depthax] assign[=] call[name[Axis], parameter[]]
variable[axes] assign[=] dictionary[[<ast.Constant object at 0x7da1b26af8e0>, <ast.Constant object at 0x7da1b26ad090>, <ast.Constant object at 0x7da1b26aead0>], [<ast.Name object at 0x7da1b26ae470>, <ast.Name object at 0x7da1b26acc70>, <ast.Name object at 0x7da1b26ad810>]]
variable[slab] assign[=] call[name[SlabOcean], parameter[]]
return[name[slab]] | keyword[def] identifier[surface_2D] ( identifier[num_lat] = literal[int] , identifier[num_lon] = literal[int] , identifier[water_depth] = literal[int] , identifier[lon] = keyword[None] ,
identifier[lat] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[lat] keyword[is] keyword[None] :
identifier[latax] = identifier[Axis] ( identifier[axis_type] = literal[string] , identifier[num_points] = identifier[num_lat] )
keyword[elif] identifier[isinstance] ( identifier[lat] , identifier[Axis] ):
identifier[latax] = identifier[lat]
keyword[else] :
keyword[try] :
identifier[latax] = identifier[Axis] ( identifier[axis_type] = literal[string] , identifier[points] = identifier[lat] )
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[lon] keyword[is] keyword[None] :
identifier[lonax] = identifier[Axis] ( identifier[axis_type] = literal[string] , identifier[num_points] = identifier[num_lon] )
keyword[elif] identifier[isinstance] ( identifier[lon] , identifier[Axis] ):
identifier[lonax] = identifier[lon]
keyword[else] :
keyword[try] :
identifier[lonax] = identifier[Axis] ( identifier[axis_type] = literal[string] , identifier[points] = identifier[lon] )
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[depthax] = identifier[Axis] ( identifier[axis_type] = literal[string] , identifier[bounds] =[ identifier[water_depth] , literal[int] ])
identifier[axes] ={ literal[string] : identifier[latax] , literal[string] : identifier[lonax] , literal[string] : identifier[depthax] }
identifier[slab] = identifier[SlabOcean] ( identifier[axes] = identifier[axes] ,** identifier[kwargs] )
keyword[return] identifier[slab] | def surface_2D(num_lat=90, num_lon=180, water_depth=10.0, lon=None, lat=None, **kwargs):
"""Creates a 2D slab ocean Domain in latitude and longitude with uniform water depth.
Domain has a single heat capacity according to the specified water depth.
**Function-call argument**
:param int num_lat: number of latitude points [default: 90]
:param int num_lon: number of longitude points [default: 180]
:param float water_depth: depth of the slab ocean in meters [default: 10.]
:param lat: specification for latitude axis (optional)
:type lat: :class:`~climlab.domain.axis.Axis` or latitude array
:param lon: specification for longitude axis (optional)
:type lon: :class:`~climlab.domain.axis.Axis` or longitude array
:raises: :exc:`ValueError` if `lat` is given but neither Axis nor latitude array.
:raises: :exc:`ValueError` if `lon` is given but neither Axis nor longitude array.
:returns: surface domain
:rtype: :class:`SlabOcean`
:Example:
::
>>> from climlab import domain
>>> sfc = domain.surface_2D(num_lat=36, num_lat=72)
>>> print sfc
climlab Domain object with domain_type=ocean and shape=(36, 72, 1)
"""
if lat is None:
latax = Axis(axis_type='lat', num_points=num_lat) # depends on [control=['if'], data=[]]
elif isinstance(lat, Axis):
latax = lat # depends on [control=['if'], data=[]]
else:
try:
latax = Axis(axis_type='lat', points=lat) # depends on [control=['try'], data=[]]
except:
raise ValueError('lat must be Axis object or latitude array') # depends on [control=['except'], data=[]]
if lon is None:
lonax = Axis(axis_type='lon', num_points=num_lon) # depends on [control=['if'], data=[]]
elif isinstance(lon, Axis):
lonax = lon # depends on [control=['if'], data=[]]
else:
try:
lonax = Axis(axis_type='lon', points=lon) # depends on [control=['try'], data=[]]
except:
raise ValueError('lon must be Axis object or longitude array') # depends on [control=['except'], data=[]]
depthax = Axis(axis_type='depth', bounds=[water_depth, 0.0])
axes = {'lat': latax, 'lon': lonax, 'depth': depthax}
slab = SlabOcean(axes=axes, **kwargs)
return slab |
def fetchall(self):
"""Fetch all available rows from select result set.
:returns: list of row tuples
"""
result = r = self.fetchmany(size=self.FETCHALL_BLOCKSIZE)
while len(r) == self.FETCHALL_BLOCKSIZE or not self._received_last_resultset_part:
r = self.fetchmany(size=self.FETCHALL_BLOCKSIZE)
result.extend(r)
return result | def function[fetchall, parameter[self]]:
constant[Fetch all available rows from select result set.
:returns: list of row tuples
]
variable[result] assign[=] call[name[self].fetchmany, parameter[]]
while <ast.BoolOp object at 0x7da1b1793af0> begin[:]
variable[r] assign[=] call[name[self].fetchmany, parameter[]]
call[name[result].extend, parameter[name[r]]]
return[name[result]] | keyword[def] identifier[fetchall] ( identifier[self] ):
literal[string]
identifier[result] = identifier[r] = identifier[self] . identifier[fetchmany] ( identifier[size] = identifier[self] . identifier[FETCHALL_BLOCKSIZE] )
keyword[while] identifier[len] ( identifier[r] )== identifier[self] . identifier[FETCHALL_BLOCKSIZE] keyword[or] keyword[not] identifier[self] . identifier[_received_last_resultset_part] :
identifier[r] = identifier[self] . identifier[fetchmany] ( identifier[size] = identifier[self] . identifier[FETCHALL_BLOCKSIZE] )
identifier[result] . identifier[extend] ( identifier[r] )
keyword[return] identifier[result] | def fetchall(self):
"""Fetch all available rows from select result set.
:returns: list of row tuples
"""
result = r = self.fetchmany(size=self.FETCHALL_BLOCKSIZE)
while len(r) == self.FETCHALL_BLOCKSIZE or not self._received_last_resultset_part:
r = self.fetchmany(size=self.FETCHALL_BLOCKSIZE)
result.extend(r) # depends on [control=['while'], data=[]]
return result |
def load(name, **kwargs):
'''
Loads the configuration provided onto the junos device.
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- path: salt//configs/interface.set
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- template_path: salt//configs/interface.set
- template_vars:
interface_name: lo0
description: Creating interface via SaltStack.
name
Path where the configuration/template file is present. If the file has
a ``*.conf`` extension, the content is treated as text format. If the
file has a ``*.xml`` extension, the content is treated as XML format. If
the file has a ``*.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses "replace:" statements.
Only those statements under the 'replace' tag will be changed.
format:
Determines the format of the contents.
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1 (default = False)
template_vars
Variables to be passed into the template processing engine in addition
to those present in __pillar__, __opts__, __grains__, etc.
You may reference these variables in your template like so:
{{ template_vars["var_name"] }}
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
ret['changes'] = __salt__['junos.load'](name, **kwargs)
return ret | def function[load, parameter[name]]:
constant[
Loads the configuration provided onto the junos device.
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- path: salt//configs/interface.set
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- template_path: salt//configs/interface.set
- template_vars:
interface_name: lo0
description: Creating interface via SaltStack.
name
Path where the configuration/template file is present. If the file has
a ``*.conf`` extension, the content is treated as text format. If the
file has a ``*.xml`` extension, the content is treated as XML format. If
the file has a ``*.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses "replace:" statements.
Only those statements under the 'replace' tag will be changed.
format:
Determines the format of the contents.
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1 (default = False)
template_vars
Variables to be passed into the template processing engine in addition
to those present in __pillar__, __opts__, __grains__, etc.
You may reference these variables in your template like so:
{{ template_vars["var_name"] }}
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b21683d0>, <ast.Constant object at 0x7da1b216add0>, <ast.Constant object at 0x7da1b216b130>, <ast.Constant object at 0x7da1b2168e20>], [<ast.Name object at 0x7da1b21691e0>, <ast.Dict object at 0x7da1b21680d0>, <ast.Constant object at 0x7da1b2169000>, <ast.Constant object at 0x7da1b2169fc0>]]
call[name[ret]][constant[changes]] assign[=] call[call[name[__salt__]][constant[junos.load]], parameter[name[name]]]
return[name[ret]] | keyword[def] identifier[load] ( identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] , literal[string] :{}, literal[string] : keyword[True] , literal[string] : literal[string] }
identifier[ret] [ literal[string] ]= identifier[__salt__] [ literal[string] ]( identifier[name] ,** identifier[kwargs] )
keyword[return] identifier[ret] | def load(name, **kwargs):
"""
Loads the configuration provided onto the junos device.
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- path: salt//configs/interface.set
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- template_path: salt//configs/interface.set
- template_vars:
interface_name: lo0
description: Creating interface via SaltStack.
name
Path where the configuration/template file is present. If the file has
a ``*.conf`` extension, the content is treated as text format. If the
file has a ``*.xml`` extension, the content is treated as XML format. If
the file has a ``*.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses "replace:" statements.
Only those statements under the 'replace' tag will be changed.
format:
Determines the format of the contents.
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1 (default = False)
template_vars
Variables to be passed into the template processing engine in addition
to those present in __pillar__, __opts__, __grains__, etc.
You may reference these variables in your template like so:
{{ template_vars["var_name"] }}
"""
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
ret['changes'] = __salt__['junos.load'](name, **kwargs)
return ret |
def _resumeJobNoRetries(self, conn, jobID, alreadyRunning):
""" Resumes processing of an existing job that is presently in the
STATUS_COMPLETED state.
NOTE: this is primarily for resuming suspended Production and Stream Jobs; DO
NOT use it on Hypersearch jobs.
This prepares an existing job entry to resume processing. The CJM is always
periodically sweeping the jobs table and when it finds a job that is ready
to run, it will proceed to start it up on Hadoop.
Parameters:
----------------------------------------------------------------
conn: Owned connection acquired from ConnectionFactory.get()
jobID: jobID of the job to resume
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop.
raises: Throws a RuntimeError if no rows are affected. This could
either be because:
1) Because there was not matching jobID
2) or if the status of the job was not STATUS_COMPLETED.
retval: nothing
"""
# Initial status
if alreadyRunning:
# Use STATUS_TESTMODE so scheduler will leave our row alone
initStatus = self.STATUS_TESTMODE
else:
initStatus = self.STATUS_NOTSTARTED
# NOTE: some of our clients (e.g., StreamMgr) may call us (directly or
# indirectly) for the same job from different processes (even different
# machines), so we should be prepared for the update to fail; same holds
# if the UPDATE succeeds, but connection fails while reading result
assignments = [
'status=%s',
'completion_reason=DEFAULT',
'completion_msg=DEFAULT',
'worker_completion_reason=DEFAULT',
'worker_completion_msg=DEFAULT',
'end_time=DEFAULT',
'cancel=DEFAULT',
'_eng_last_update_time=UTC_TIMESTAMP()',
'_eng_allocate_new_workers=DEFAULT',
'_eng_untended_dead_workers=DEFAULT',
'num_failed_workers=DEFAULT',
'last_failed_worker_error_msg=DEFAULT',
'_eng_cleaning_status=DEFAULT',
]
assignmentValues = [initStatus]
if alreadyRunning:
assignments += ['_eng_cjm_conn_id=%s', 'start_time=UTC_TIMESTAMP()',
'_eng_last_update_time=UTC_TIMESTAMP()']
assignmentValues.append(self._connectionID)
else:
assignments += ['_eng_cjm_conn_id=DEFAULT', 'start_time=DEFAULT']
assignments = ', '.join(assignments)
query = 'UPDATE %s SET %s ' \
' WHERE job_id=%%s AND status=%%s' \
% (self.jobsTableName, assignments)
sqlParams = assignmentValues + [jobID, self.STATUS_COMPLETED]
numRowsAffected = conn.cursor.execute(query, sqlParams)
assert numRowsAffected <= 1, repr(numRowsAffected)
if numRowsAffected == 0:
self._logger.info(
"_resumeJobNoRetries: Redundant job-resume UPDATE: job was not "
"suspended or was resumed by another process or operation was retried "
"after connection failure; jobID=%s", jobID)
return | def function[_resumeJobNoRetries, parameter[self, conn, jobID, alreadyRunning]]:
constant[ Resumes processing of an existing job that is presently in the
STATUS_COMPLETED state.
NOTE: this is primarily for resuming suspended Production and Stream Jobs; DO
NOT use it on Hypersearch jobs.
This prepares an existing job entry to resume processing. The CJM is always
periodically sweeping the jobs table and when it finds a job that is ready
to run, it will proceed to start it up on Hadoop.
Parameters:
----------------------------------------------------------------
conn: Owned connection acquired from ConnectionFactory.get()
jobID: jobID of the job to resume
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop.
raises: Throws a RuntimeError if no rows are affected. This could
either be because:
1) Because there was not matching jobID
2) or if the status of the job was not STATUS_COMPLETED.
retval: nothing
]
if name[alreadyRunning] begin[:]
variable[initStatus] assign[=] name[self].STATUS_TESTMODE
variable[assignments] assign[=] list[[<ast.Constant object at 0x7da18f58f850>, <ast.Constant object at 0x7da18f58e680>, <ast.Constant object at 0x7da18f58cfd0>, <ast.Constant object at 0x7da18f58fc70>, <ast.Constant object at 0x7da18f58de40>, <ast.Constant object at 0x7da18f58c670>, <ast.Constant object at 0x7da18f58ff70>, <ast.Constant object at 0x7da18f58c070>, <ast.Constant object at 0x7da20c7c91e0>, <ast.Constant object at 0x7da20c7c9e40>, <ast.Constant object at 0x7da20c7c8fa0>, <ast.Constant object at 0x7da20c7cb640>, <ast.Constant object at 0x7da20c7c8190>]]
variable[assignmentValues] assign[=] list[[<ast.Name object at 0x7da20c7ca350>]]
if name[alreadyRunning] begin[:]
<ast.AugAssign object at 0x7da20c7cab00>
call[name[assignmentValues].append, parameter[name[self]._connectionID]]
variable[assignments] assign[=] call[constant[, ].join, parameter[name[assignments]]]
variable[query] assign[=] binary_operation[constant[UPDATE %s SET %s WHERE job_id=%%s AND status=%%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c7ca470>, <ast.Name object at 0x7da20c7c90f0>]]]
variable[sqlParams] assign[=] binary_operation[name[assignmentValues] + list[[<ast.Name object at 0x7da20c7cae00>, <ast.Attribute object at 0x7da20c7cbd00>]]]
variable[numRowsAffected] assign[=] call[name[conn].cursor.execute, parameter[name[query], name[sqlParams]]]
assert[compare[name[numRowsAffected] less_or_equal[<=] constant[1]]]
if compare[name[numRowsAffected] equal[==] constant[0]] begin[:]
call[name[self]._logger.info, parameter[constant[_resumeJobNoRetries: Redundant job-resume UPDATE: job was not suspended or was resumed by another process or operation was retried after connection failure; jobID=%s], name[jobID]]]
return[None] | keyword[def] identifier[_resumeJobNoRetries] ( identifier[self] , identifier[conn] , identifier[jobID] , identifier[alreadyRunning] ):
literal[string]
keyword[if] identifier[alreadyRunning] :
identifier[initStatus] = identifier[self] . identifier[STATUS_TESTMODE]
keyword[else] :
identifier[initStatus] = identifier[self] . identifier[STATUS_NOTSTARTED]
identifier[assignments] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
]
identifier[assignmentValues] =[ identifier[initStatus] ]
keyword[if] identifier[alreadyRunning] :
identifier[assignments] +=[ literal[string] , literal[string] ,
literal[string] ]
identifier[assignmentValues] . identifier[append] ( identifier[self] . identifier[_connectionID] )
keyword[else] :
identifier[assignments] +=[ literal[string] , literal[string] ]
identifier[assignments] = literal[string] . identifier[join] ( identifier[assignments] )
identifier[query] = literal[string] literal[string] %( identifier[self] . identifier[jobsTableName] , identifier[assignments] )
identifier[sqlParams] = identifier[assignmentValues] +[ identifier[jobID] , identifier[self] . identifier[STATUS_COMPLETED] ]
identifier[numRowsAffected] = identifier[conn] . identifier[cursor] . identifier[execute] ( identifier[query] , identifier[sqlParams] )
keyword[assert] identifier[numRowsAffected] <= literal[int] , identifier[repr] ( identifier[numRowsAffected] )
keyword[if] identifier[numRowsAffected] == literal[int] :
identifier[self] . identifier[_logger] . identifier[info] (
literal[string]
literal[string]
literal[string] , identifier[jobID] )
keyword[return] | def _resumeJobNoRetries(self, conn, jobID, alreadyRunning):
""" Resumes processing of an existing job that is presently in the
STATUS_COMPLETED state.
NOTE: this is primarily for resuming suspended Production and Stream Jobs; DO
NOT use it on Hypersearch jobs.
This prepares an existing job entry to resume processing. The CJM is always
periodically sweeping the jobs table and when it finds a job that is ready
to run, it will proceed to start it up on Hadoop.
Parameters:
----------------------------------------------------------------
conn: Owned connection acquired from ConnectionFactory.get()
jobID: jobID of the job to resume
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop.
raises: Throws a RuntimeError if no rows are affected. This could
either be because:
1) Because there was not matching jobID
2) or if the status of the job was not STATUS_COMPLETED.
retval: nothing
"""
# Initial status
if alreadyRunning:
# Use STATUS_TESTMODE so scheduler will leave our row alone
initStatus = self.STATUS_TESTMODE # depends on [control=['if'], data=[]]
else:
initStatus = self.STATUS_NOTSTARTED
# NOTE: some of our clients (e.g., StreamMgr) may call us (directly or
# indirectly) for the same job from different processes (even different
# machines), so we should be prepared for the update to fail; same holds
# if the UPDATE succeeds, but connection fails while reading result
assignments = ['status=%s', 'completion_reason=DEFAULT', 'completion_msg=DEFAULT', 'worker_completion_reason=DEFAULT', 'worker_completion_msg=DEFAULT', 'end_time=DEFAULT', 'cancel=DEFAULT', '_eng_last_update_time=UTC_TIMESTAMP()', '_eng_allocate_new_workers=DEFAULT', '_eng_untended_dead_workers=DEFAULT', 'num_failed_workers=DEFAULT', 'last_failed_worker_error_msg=DEFAULT', '_eng_cleaning_status=DEFAULT']
assignmentValues = [initStatus]
if alreadyRunning:
assignments += ['_eng_cjm_conn_id=%s', 'start_time=UTC_TIMESTAMP()', '_eng_last_update_time=UTC_TIMESTAMP()']
assignmentValues.append(self._connectionID) # depends on [control=['if'], data=[]]
else:
assignments += ['_eng_cjm_conn_id=DEFAULT', 'start_time=DEFAULT']
assignments = ', '.join(assignments)
query = 'UPDATE %s SET %s WHERE job_id=%%s AND status=%%s' % (self.jobsTableName, assignments)
sqlParams = assignmentValues + [jobID, self.STATUS_COMPLETED]
numRowsAffected = conn.cursor.execute(query, sqlParams)
assert numRowsAffected <= 1, repr(numRowsAffected)
if numRowsAffected == 0:
self._logger.info('_resumeJobNoRetries: Redundant job-resume UPDATE: job was not suspended or was resumed by another process or operation was retried after connection failure; jobID=%s', jobID) # depends on [control=['if'], data=[]]
return |
def generate_image_beacon(event_collection, body, timestamp=None):
""" Generates an image beacon URL.
:param event_collection: the name of the collection to insert the
event to
:param body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event
"""
_initialize_client_from_environment()
return _client.generate_image_beacon(event_collection, body, timestamp=timestamp) | def function[generate_image_beacon, parameter[event_collection, body, timestamp]]:
constant[ Generates an image beacon URL.
:param event_collection: the name of the collection to insert the
event to
:param body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event
]
call[name[_initialize_client_from_environment], parameter[]]
return[call[name[_client].generate_image_beacon, parameter[name[event_collection], name[body]]]] | keyword[def] identifier[generate_image_beacon] ( identifier[event_collection] , identifier[body] , identifier[timestamp] = keyword[None] ):
literal[string]
identifier[_initialize_client_from_environment] ()
keyword[return] identifier[_client] . identifier[generate_image_beacon] ( identifier[event_collection] , identifier[body] , identifier[timestamp] = identifier[timestamp] ) | def generate_image_beacon(event_collection, body, timestamp=None):
""" Generates an image beacon URL.
:param event_collection: the name of the collection to insert the
event to
:param body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event
"""
_initialize_client_from_environment()
return _client.generate_image_beacon(event_collection, body, timestamp=timestamp) |
def _verify_shape_compatibility(self, img, img_type):
"""Checks mask shape against input image shape."""
if self.input_image.shape[:-1] != img.shape:
raise ValueError('Shape of the {} ({}) is not compatible '
'with input image shape: {} '
''.format(img_type, img.shape, self.input_image.shape[:-1])) | def function[_verify_shape_compatibility, parameter[self, img, img_type]]:
constant[Checks mask shape against input image shape.]
if compare[call[name[self].input_image.shape][<ast.Slice object at 0x7da18bcc91e0>] not_equal[!=] name[img].shape] begin[:]
<ast.Raise object at 0x7da18bcc83a0> | keyword[def] identifier[_verify_shape_compatibility] ( identifier[self] , identifier[img] , identifier[img_type] ):
literal[string]
keyword[if] identifier[self] . identifier[input_image] . identifier[shape] [:- literal[int] ]!= identifier[img] . identifier[shape] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[img_type] , identifier[img] . identifier[shape] , identifier[self] . identifier[input_image] . identifier[shape] [:- literal[int] ])) | def _verify_shape_compatibility(self, img, img_type):
"""Checks mask shape against input image shape."""
if self.input_image.shape[:-1] != img.shape:
raise ValueError('Shape of the {} ({}) is not compatible with input image shape: {} '.format(img_type, img.shape, self.input_image.shape[:-1])) # depends on [control=['if'], data=[]] |
def textbox(text, width=78, boxchar='#', indent=0):
"""
Outputs line-wrapped text wrapped in a box drawn with a repeated (usually
ASCII) character.
For example:
>>> print(textbox('Text to wrap', width=16))
################
# #
# Text to wrap #
# #
################
Parameters
----------
text : string
The text to wrap
width : int
The width of the entire box, including the perimeter and
the indentation space. Because the
wrapped text is padded with an additional column of whitespace on each
side, the minimum width is 5--any width less than that is
is automatically increased to 5 (default: 78)
boxchar : string
(No pun intended.) The character to draw the box with. May also
be a string of multiple characters (default: '#')
indent : int
Amount of space by which the box should be indented. (default: 0)
"""
min_width = len(boxchar) * 2 + 3
width = max(width-indent, min_width)
indentspace = indent * ' '
wrap_width = width - min_width + 1
q, r = divmod(width, len(boxchar))
# The top/bottom border
top_border = indentspace + boxchar * q + boxchar[:r]
top_padding = indentspace + boxchar + ' ' * (width - len(boxchar) * 2) + boxchar
lines = ['%s%s %s %s' % (indentspace, boxchar, line.ljust(wrap_width),
boxchar)
for line in wrap(text, wrap_width)]
top = [top_border, top_padding]
bottom = [top_padding, top_border]
return '\n'.join(top + lines + bottom) | def function[textbox, parameter[text, width, boxchar, indent]]:
constant[
Outputs line-wrapped text wrapped in a box drawn with a repeated (usually
ASCII) character.
For example:
>>> print(textbox('Text to wrap', width=16))
################
# #
# Text to wrap #
# #
################
Parameters
----------
text : string
The text to wrap
width : int
The width of the entire box, including the perimeter and
the indentation space. Because the
wrapped text is padded with an additional column of whitespace on each
side, the minimum width is 5--any width less than that is
is automatically increased to 5 (default: 78)
boxchar : string
(No pun intended.) The character to draw the box with. May also
be a string of multiple characters (default: '#')
indent : int
Amount of space by which the box should be indented. (default: 0)
]
variable[min_width] assign[=] binary_operation[binary_operation[call[name[len], parameter[name[boxchar]]] * constant[2]] + constant[3]]
variable[width] assign[=] call[name[max], parameter[binary_operation[name[width] - name[indent]], name[min_width]]]
variable[indentspace] assign[=] binary_operation[name[indent] * constant[ ]]
variable[wrap_width] assign[=] binary_operation[binary_operation[name[width] - name[min_width]] + constant[1]]
<ast.Tuple object at 0x7da1b0e82a10> assign[=] call[name[divmod], parameter[name[width], call[name[len], parameter[name[boxchar]]]]]
variable[top_border] assign[=] binary_operation[binary_operation[name[indentspace] + binary_operation[name[boxchar] * name[q]]] + call[name[boxchar]][<ast.Slice object at 0x7da1b0e81fc0>]]
variable[top_padding] assign[=] binary_operation[binary_operation[binary_operation[name[indentspace] + name[boxchar]] + binary_operation[constant[ ] * binary_operation[name[width] - binary_operation[call[name[len], parameter[name[boxchar]]] * constant[2]]]]] + name[boxchar]]
variable[lines] assign[=] <ast.ListComp object at 0x7da1b0e81060>
variable[top] assign[=] list[[<ast.Name object at 0x7da1b0e822f0>, <ast.Name object at 0x7da1b0e81f90>]]
variable[bottom] assign[=] list[[<ast.Name object at 0x7da1b0e81090>, <ast.Name object at 0x7da1b0e831f0>]]
return[call[constant[
].join, parameter[binary_operation[binary_operation[name[top] + name[lines]] + name[bottom]]]]] | keyword[def] identifier[textbox] ( identifier[text] , identifier[width] = literal[int] , identifier[boxchar] = literal[string] , identifier[indent] = literal[int] ):
literal[string]
identifier[min_width] = identifier[len] ( identifier[boxchar] )* literal[int] + literal[int]
identifier[width] = identifier[max] ( identifier[width] - identifier[indent] , identifier[min_width] )
identifier[indentspace] = identifier[indent] * literal[string]
identifier[wrap_width] = identifier[width] - identifier[min_width] + literal[int]
identifier[q] , identifier[r] = identifier[divmod] ( identifier[width] , identifier[len] ( identifier[boxchar] ))
identifier[top_border] = identifier[indentspace] + identifier[boxchar] * identifier[q] + identifier[boxchar] [: identifier[r] ]
identifier[top_padding] = identifier[indentspace] + identifier[boxchar] + literal[string] *( identifier[width] - identifier[len] ( identifier[boxchar] )* literal[int] )+ identifier[boxchar]
identifier[lines] =[ literal[string] %( identifier[indentspace] , identifier[boxchar] , identifier[line] . identifier[ljust] ( identifier[wrap_width] ),
identifier[boxchar] )
keyword[for] identifier[line] keyword[in] identifier[wrap] ( identifier[text] , identifier[wrap_width] )]
identifier[top] =[ identifier[top_border] , identifier[top_padding] ]
identifier[bottom] =[ identifier[top_padding] , identifier[top_border] ]
keyword[return] literal[string] . identifier[join] ( identifier[top] + identifier[lines] + identifier[bottom] ) | def textbox(text, width=78, boxchar='#', indent=0):
"""
Outputs line-wrapped text wrapped in a box drawn with a repeated (usually
ASCII) character.
For example:
>>> print(textbox('Text to wrap', width=16))
################
# #
# Text to wrap #
# #
################
Parameters
----------
text : string
The text to wrap
width : int
The width of the entire box, including the perimeter and
the indentation space. Because the
wrapped text is padded with an additional column of whitespace on each
side, the minimum width is 5--any width less than that is
is automatically increased to 5 (default: 78)
boxchar : string
(No pun intended.) The character to draw the box with. May also
be a string of multiple characters (default: '#')
indent : int
Amount of space by which the box should be indented. (default: 0)
"""
min_width = len(boxchar) * 2 + 3
width = max(width - indent, min_width)
indentspace = indent * ' '
wrap_width = width - min_width + 1
(q, r) = divmod(width, len(boxchar))
# The top/bottom border
top_border = indentspace + boxchar * q + boxchar[:r]
top_padding = indentspace + boxchar + ' ' * (width - len(boxchar) * 2) + boxchar
lines = ['%s%s %s %s' % (indentspace, boxchar, line.ljust(wrap_width), boxchar) for line in wrap(text, wrap_width)]
top = [top_border, top_padding]
bottom = [top_padding, top_border]
return '\n'.join(top + lines + bottom) |
def inner(a,b):
'''
inner(a,b) yields the dot product of a and b, doing so in a fashion that respects sparse
matrices when encountered. This does not error check for bad dimensionality.
If a or b are constants, then the result is just the a*b; if a and b are both vectors or both
matrices, then the inner product is dot(a,b); if a is a vector and b is a matrix, this is
equivalent to as if a were a matrix with 1 row; and if a is a matrix and b a vector, this is
equivalent to as if b were a matrix with 1 column.
'''
if sps.issparse(a): return a.dot(b)
else: a = np.asarray(a)
if len(a.shape) == 0: return a*b
if sps.issparse(b):
if len(a.shape) == 1: return b.T.dot(a)
else: return b.T.dot(a.T).T
else: b = np.asarray(b)
if len(b.shape) == 0: return a*b
if len(a.shape) == 1 and len(b.shape) == 2: return np.dot(b.T, a)
else: return np.dot(a,b) | def function[inner, parameter[a, b]]:
constant[
inner(a,b) yields the dot product of a and b, doing so in a fashion that respects sparse
matrices when encountered. This does not error check for bad dimensionality.
If a or b are constants, then the result is just the a*b; if a and b are both vectors or both
matrices, then the inner product is dot(a,b); if a is a vector and b is a matrix, this is
equivalent to as if a were a matrix with 1 row; and if a is a matrix and b a vector, this is
equivalent to as if b were a matrix with 1 column.
]
if call[name[sps].issparse, parameter[name[a]]] begin[:]
return[call[name[a].dot, parameter[name[b]]]]
if compare[call[name[len], parameter[name[a].shape]] equal[==] constant[0]] begin[:]
return[binary_operation[name[a] * name[b]]]
if call[name[sps].issparse, parameter[name[b]]] begin[:]
if compare[call[name[len], parameter[name[a].shape]] equal[==] constant[1]] begin[:]
return[call[name[b].T.dot, parameter[name[a]]]]
if compare[call[name[len], parameter[name[b].shape]] equal[==] constant[0]] begin[:]
return[binary_operation[name[a] * name[b]]]
if <ast.BoolOp object at 0x7da1b0b449a0> begin[:]
return[call[name[np].dot, parameter[name[b].T, name[a]]]] | keyword[def] identifier[inner] ( identifier[a] , identifier[b] ):
literal[string]
keyword[if] identifier[sps] . identifier[issparse] ( identifier[a] ): keyword[return] identifier[a] . identifier[dot] ( identifier[b] )
keyword[else] : identifier[a] = identifier[np] . identifier[asarray] ( identifier[a] )
keyword[if] identifier[len] ( identifier[a] . identifier[shape] )== literal[int] : keyword[return] identifier[a] * identifier[b]
keyword[if] identifier[sps] . identifier[issparse] ( identifier[b] ):
keyword[if] identifier[len] ( identifier[a] . identifier[shape] )== literal[int] : keyword[return] identifier[b] . identifier[T] . identifier[dot] ( identifier[a] )
keyword[else] : keyword[return] identifier[b] . identifier[T] . identifier[dot] ( identifier[a] . identifier[T] ). identifier[T]
keyword[else] : identifier[b] = identifier[np] . identifier[asarray] ( identifier[b] )
keyword[if] identifier[len] ( identifier[b] . identifier[shape] )== literal[int] : keyword[return] identifier[a] * identifier[b]
keyword[if] identifier[len] ( identifier[a] . identifier[shape] )== literal[int] keyword[and] identifier[len] ( identifier[b] . identifier[shape] )== literal[int] : keyword[return] identifier[np] . identifier[dot] ( identifier[b] . identifier[T] , identifier[a] )
keyword[else] : keyword[return] identifier[np] . identifier[dot] ( identifier[a] , identifier[b] ) | def inner(a, b):
"""
inner(a,b) yields the dot product of a and b, doing so in a fashion that respects sparse
matrices when encountered. This does not error check for bad dimensionality.
If a or b are constants, then the result is just the a*b; if a and b are both vectors or both
matrices, then the inner product is dot(a,b); if a is a vector and b is a matrix, this is
equivalent to as if a were a matrix with 1 row; and if a is a matrix and b a vector, this is
equivalent to as if b were a matrix with 1 column.
"""
if sps.issparse(a):
return a.dot(b) # depends on [control=['if'], data=[]]
else:
a = np.asarray(a)
if len(a.shape) == 0:
return a * b # depends on [control=['if'], data=[]]
if sps.issparse(b):
if len(a.shape) == 1:
return b.T.dot(a) # depends on [control=['if'], data=[]]
else:
return b.T.dot(a.T).T # depends on [control=['if'], data=[]]
else:
b = np.asarray(b)
if len(b.shape) == 0:
return a * b # depends on [control=['if'], data=[]]
if len(a.shape) == 1 and len(b.shape) == 2:
return np.dot(b.T, a) # depends on [control=['if'], data=[]]
else:
return np.dot(a, b) |
def cache(func, memory, func_memory_level=None, memory_level=None,
**kwargs):
""" Return a joblib.Memory object.
The memory_level determines the level above which the wrapped
function output is cached. By specifying a numeric value for
this level, the user can to control the amount of cache memory
used. This function will cache the function call or not
depending on the cache level.
Parameters
----------
func: function
The function which output is to be cached.
memory: instance of joblib.Memory or string
Used to cache the function call.
func_memory_level: int, optional
The memory_level from which caching must be enabled for the wrapped
function.
memory_level: int, optional
The memory_level used to determine if function call must
be cached or not (if user_memory_level is equal of greater than
func_memory_level the function is cached)
kwargs: keyword arguments
The keyword arguments passed to memory.cache
Returns
-------
mem: joblib.MemorizedFunc
object that wraps the function func. This object may be
a no-op, if the requested level is lower than the value given
to _cache()). For consistency, a joblib.Memory object is always
returned.
"""
verbose = kwargs.get('verbose', 0)
# memory_level and func_memory_level must be both None or both integers.
memory_levels = [memory_level, func_memory_level]
both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels)
both_params_none = all(lvl is None for lvl in memory_levels)
if not (both_params_integers or both_params_none):
raise ValueError('Reference and user memory levels must be both None '
'or both integers.')
if memory is not None and (func_memory_level is None or
memory_level >= func_memory_level):
if isinstance(memory, _basestring):
memory = Memory(cachedir=memory, verbose=verbose)
if not isinstance(memory, MEMORY_CLASSES):
raise TypeError("'memory' argument must be a string or a "
"joblib.Memory object. "
"%s %s was given." % (memory, type(memory)))
if (memory.cachedir is None and memory_level is not None
and memory_level > 1):
warnings.warn("Caching has been enabled (memory_level = %d) "
"but no Memory object or path has been provided"
" (parameter memory). Caching deactivated for "
"function %s." %
(memory_level, func.__name__),
stacklevel=2)
else:
memory = Memory(cachedir=None, verbose=verbose)
return _safe_cache(memory, func, **kwargs) | def function[cache, parameter[func, memory, func_memory_level, memory_level]]:
constant[ Return a joblib.Memory object.
The memory_level determines the level above which the wrapped
function output is cached. By specifying a numeric value for
this level, the user can to control the amount of cache memory
used. This function will cache the function call or not
depending on the cache level.
Parameters
----------
func: function
The function which output is to be cached.
memory: instance of joblib.Memory or string
Used to cache the function call.
func_memory_level: int, optional
The memory_level from which caching must be enabled for the wrapped
function.
memory_level: int, optional
The memory_level used to determine if function call must
be cached or not (if user_memory_level is equal of greater than
func_memory_level the function is cached)
kwargs: keyword arguments
The keyword arguments passed to memory.cache
Returns
-------
mem: joblib.MemorizedFunc
object that wraps the function func. This object may be
a no-op, if the requested level is lower than the value given
to _cache()). For consistency, a joblib.Memory object is always
returned.
]
variable[verbose] assign[=] call[name[kwargs].get, parameter[constant[verbose], constant[0]]]
variable[memory_levels] assign[=] list[[<ast.Name object at 0x7da1b004d240>, <ast.Name object at 0x7da1b004ee30>]]
variable[both_params_integers] assign[=] call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b004dc90>]]
variable[both_params_none] assign[=] call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b004e380>]]
if <ast.UnaryOp object at 0x7da1b004d930> begin[:]
<ast.Raise object at 0x7da1b004d0c0>
if <ast.BoolOp object at 0x7da1b004caf0> begin[:]
if call[name[isinstance], parameter[name[memory], name[_basestring]]] begin[:]
variable[memory] assign[=] call[name[Memory], parameter[]]
if <ast.UnaryOp object at 0x7da1b004fac0> begin[:]
<ast.Raise object at 0x7da1b004fa90>
if <ast.BoolOp object at 0x7da1b004cc40> begin[:]
call[name[warnings].warn, parameter[binary_operation[constant[Caching has been enabled (memory_level = %d) but no Memory object or path has been provided (parameter memory). Caching deactivated for function %s.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b004fa30>, <ast.Attribute object at 0x7da1b004ca60>]]]]]
return[call[name[_safe_cache], parameter[name[memory], name[func]]]] | keyword[def] identifier[cache] ( identifier[func] , identifier[memory] , identifier[func_memory_level] = keyword[None] , identifier[memory_level] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[verbose] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[memory_levels] =[ identifier[memory_level] , identifier[func_memory_level] ]
identifier[both_params_integers] = identifier[all] ( identifier[isinstance] ( identifier[lvl] , identifier[int] ) keyword[for] identifier[lvl] keyword[in] identifier[memory_levels] )
identifier[both_params_none] = identifier[all] ( identifier[lvl] keyword[is] keyword[None] keyword[for] identifier[lvl] keyword[in] identifier[memory_levels] )
keyword[if] keyword[not] ( identifier[both_params_integers] keyword[or] identifier[both_params_none] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[memory] keyword[is] keyword[not] keyword[None] keyword[and] ( identifier[func_memory_level] keyword[is] keyword[None] keyword[or]
identifier[memory_level] >= identifier[func_memory_level] ):
keyword[if] identifier[isinstance] ( identifier[memory] , identifier[_basestring] ):
identifier[memory] = identifier[Memory] ( identifier[cachedir] = identifier[memory] , identifier[verbose] = identifier[verbose] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[memory] , identifier[MEMORY_CLASSES] ):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string]
literal[string] %( identifier[memory] , identifier[type] ( identifier[memory] )))
keyword[if] ( identifier[memory] . identifier[cachedir] keyword[is] keyword[None] keyword[and] identifier[memory_level] keyword[is] keyword[not] keyword[None]
keyword[and] identifier[memory_level] > literal[int] ):
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
literal[string]
literal[string] %
( identifier[memory_level] , identifier[func] . identifier[__name__] ),
identifier[stacklevel] = literal[int] )
keyword[else] :
identifier[memory] = identifier[Memory] ( identifier[cachedir] = keyword[None] , identifier[verbose] = identifier[verbose] )
keyword[return] identifier[_safe_cache] ( identifier[memory] , identifier[func] ,** identifier[kwargs] ) | def cache(func, memory, func_memory_level=None, memory_level=None, **kwargs):
""" Return a joblib.Memory object.
The memory_level determines the level above which the wrapped
function output is cached. By specifying a numeric value for
this level, the user can to control the amount of cache memory
used. This function will cache the function call or not
depending on the cache level.
Parameters
----------
func: function
The function which output is to be cached.
memory: instance of joblib.Memory or string
Used to cache the function call.
func_memory_level: int, optional
The memory_level from which caching must be enabled for the wrapped
function.
memory_level: int, optional
The memory_level used to determine if function call must
be cached or not (if user_memory_level is equal of greater than
func_memory_level the function is cached)
kwargs: keyword arguments
The keyword arguments passed to memory.cache
Returns
-------
mem: joblib.MemorizedFunc
object that wraps the function func. This object may be
a no-op, if the requested level is lower than the value given
to _cache()). For consistency, a joblib.Memory object is always
returned.
"""
verbose = kwargs.get('verbose', 0)
# memory_level and func_memory_level must be both None or both integers.
memory_levels = [memory_level, func_memory_level]
both_params_integers = all((isinstance(lvl, int) for lvl in memory_levels))
both_params_none = all((lvl is None for lvl in memory_levels))
if not (both_params_integers or both_params_none):
raise ValueError('Reference and user memory levels must be both None or both integers.') # depends on [control=['if'], data=[]]
if memory is not None and (func_memory_level is None or memory_level >= func_memory_level):
if isinstance(memory, _basestring):
memory = Memory(cachedir=memory, verbose=verbose) # depends on [control=['if'], data=[]]
if not isinstance(memory, MEMORY_CLASSES):
raise TypeError("'memory' argument must be a string or a joblib.Memory object. %s %s was given." % (memory, type(memory))) # depends on [control=['if'], data=[]]
if memory.cachedir is None and memory_level is not None and (memory_level > 1):
warnings.warn('Caching has been enabled (memory_level = %d) but no Memory object or path has been provided (parameter memory). Caching deactivated for function %s.' % (memory_level, func.__name__), stacklevel=2) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
memory = Memory(cachedir=None, verbose=verbose)
return _safe_cache(memory, func, **kwargs) |
def scheme(name, bins, bin_method='quantiles'):
"""Return a custom scheme based on CARTOColors.
Args:
name (str): Name of a CARTOColor.
bins (int or iterable): If an `int`, the number of bins for classifying
data. CARTOColors have 7 bins max for quantitative data, and 11 max
for qualitative data. If `bins` is a `list`, it is the upper range
for classifying data. E.g., `bins` can be of the form ``(10, 20, 30,
40, 50)``.
bin_method (str, optional): One of methods in :obj:`BinMethod`.
Defaults to ``quantiles``. If `bins` is an interable, then that is
the bin method that will be used and this will be ignored.
.. Warning::
Input types are particularly sensitive in this function, and little
feedback is given for errors. ``name`` and ``bin_method`` arguments
are case-sensitive.
"""
return {
'name': name,
'bins': bins,
'bin_method': (bin_method if isinstance(bins, int) else ''),
} | def function[scheme, parameter[name, bins, bin_method]]:
constant[Return a custom scheme based on CARTOColors.
Args:
name (str): Name of a CARTOColor.
bins (int or iterable): If an `int`, the number of bins for classifying
data. CARTOColors have 7 bins max for quantitative data, and 11 max
for qualitative data. If `bins` is a `list`, it is the upper range
for classifying data. E.g., `bins` can be of the form ``(10, 20, 30,
40, 50)``.
bin_method (str, optional): One of methods in :obj:`BinMethod`.
Defaults to ``quantiles``. If `bins` is an interable, then that is
the bin method that will be used and this will be ignored.
.. Warning::
Input types are particularly sensitive in this function, and little
feedback is given for errors. ``name`` and ``bin_method`` arguments
are case-sensitive.
]
return[dictionary[[<ast.Constant object at 0x7da2044c2ef0>, <ast.Constant object at 0x7da2044c3dc0>, <ast.Constant object at 0x7da2044c0820>], [<ast.Name object at 0x7da2044c1120>, <ast.Name object at 0x7da2044c3ac0>, <ast.IfExp object at 0x7da2044c13f0>]]] | keyword[def] identifier[scheme] ( identifier[name] , identifier[bins] , identifier[bin_method] = literal[string] ):
literal[string]
keyword[return] {
literal[string] : identifier[name] ,
literal[string] : identifier[bins] ,
literal[string] :( identifier[bin_method] keyword[if] identifier[isinstance] ( identifier[bins] , identifier[int] ) keyword[else] literal[string] ),
} | def scheme(name, bins, bin_method='quantiles'):
"""Return a custom scheme based on CARTOColors.
Args:
name (str): Name of a CARTOColor.
bins (int or iterable): If an `int`, the number of bins for classifying
data. CARTOColors have 7 bins max for quantitative data, and 11 max
for qualitative data. If `bins` is a `list`, it is the upper range
for classifying data. E.g., `bins` can be of the form ``(10, 20, 30,
40, 50)``.
bin_method (str, optional): One of methods in :obj:`BinMethod`.
Defaults to ``quantiles``. If `bins` is an interable, then that is
the bin method that will be used and this will be ignored.
.. Warning::
Input types are particularly sensitive in this function, and little
feedback is given for errors. ``name`` and ``bin_method`` arguments
are case-sensitive.
"""
return {'name': name, 'bins': bins, 'bin_method': bin_method if isinstance(bins, int) else ''} |
def __convert_string_list(node):
"""Converts a StringListProperty node to JSON format."""
converted = __convert_node(node)
# Determine flags for the string list
flags = vsflags(VSFlags.UserValue)
# Check for a separator to determine if it is semicolon appendable
# If not present assume the value should be ;
separator = __get_attribute(node, 'Separator', default_value=';')
if separator == ';':
flags = vsflags(flags, VSFlags.SemicolonAppendable)
converted['flags'] = flags
return __check_for_flag(converted) | def function[__convert_string_list, parameter[node]]:
constant[Converts a StringListProperty node to JSON format.]
variable[converted] assign[=] call[name[__convert_node], parameter[name[node]]]
variable[flags] assign[=] call[name[vsflags], parameter[name[VSFlags].UserValue]]
variable[separator] assign[=] call[name[__get_attribute], parameter[name[node], constant[Separator]]]
if compare[name[separator] equal[==] constant[;]] begin[:]
variable[flags] assign[=] call[name[vsflags], parameter[name[flags], name[VSFlags].SemicolonAppendable]]
call[name[converted]][constant[flags]] assign[=] name[flags]
return[call[name[__check_for_flag], parameter[name[converted]]]] | keyword[def] identifier[__convert_string_list] ( identifier[node] ):
literal[string]
identifier[converted] = identifier[__convert_node] ( identifier[node] )
identifier[flags] = identifier[vsflags] ( identifier[VSFlags] . identifier[UserValue] )
identifier[separator] = identifier[__get_attribute] ( identifier[node] , literal[string] , identifier[default_value] = literal[string] )
keyword[if] identifier[separator] == literal[string] :
identifier[flags] = identifier[vsflags] ( identifier[flags] , identifier[VSFlags] . identifier[SemicolonAppendable] )
identifier[converted] [ literal[string] ]= identifier[flags]
keyword[return] identifier[__check_for_flag] ( identifier[converted] ) | def __convert_string_list(node):
"""Converts a StringListProperty node to JSON format."""
converted = __convert_node(node)
# Determine flags for the string list
flags = vsflags(VSFlags.UserValue)
# Check for a separator to determine if it is semicolon appendable
# If not present assume the value should be ;
separator = __get_attribute(node, 'Separator', default_value=';')
if separator == ';':
flags = vsflags(flags, VSFlags.SemicolonAppendable) # depends on [control=['if'], data=[]]
converted['flags'] = flags
return __check_for_flag(converted) |
def broadcast_1d_array(arr, ndim, axis=1):
"""
Broadcast 1-d array `arr` to `ndim` dimensions on the first axis
(`axis`=0) or on the last axis (`axis`=1).
Useful for 'outer' calculations involving 1-d arrays that are related to
different axes on a multidimensional grid.
"""
ext_arr = arr
for i in range(ndim - 1):
ext_arr = np.expand_dims(ext_arr, axis=axis)
return ext_arr | def function[broadcast_1d_array, parameter[arr, ndim, axis]]:
constant[
Broadcast 1-d array `arr` to `ndim` dimensions on the first axis
(`axis`=0) or on the last axis (`axis`=1).
Useful for 'outer' calculations involving 1-d arrays that are related to
different axes on a multidimensional grid.
]
variable[ext_arr] assign[=] name[arr]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[ndim] - constant[1]]]]] begin[:]
variable[ext_arr] assign[=] call[name[np].expand_dims, parameter[name[ext_arr]]]
return[name[ext_arr]] | keyword[def] identifier[broadcast_1d_array] ( identifier[arr] , identifier[ndim] , identifier[axis] = literal[int] ):
literal[string]
identifier[ext_arr] = identifier[arr]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ndim] - literal[int] ):
identifier[ext_arr] = identifier[np] . identifier[expand_dims] ( identifier[ext_arr] , identifier[axis] = identifier[axis] )
keyword[return] identifier[ext_arr] | def broadcast_1d_array(arr, ndim, axis=1):
"""
Broadcast 1-d array `arr` to `ndim` dimensions on the first axis
(`axis`=0) or on the last axis (`axis`=1).
Useful for 'outer' calculations involving 1-d arrays that are related to
different axes on a multidimensional grid.
"""
ext_arr = arr
for i in range(ndim - 1):
ext_arr = np.expand_dims(ext_arr, axis=axis) # depends on [control=['for'], data=[]]
return ext_arr |
def get(self, ip_address):
"""Return the record for the ip_address in the MaxMind DB
Arguments:
ip_address -- an IP address in the standard string notation
"""
address = ipaddress.ip_address(ip_address)
if address.version == 6 and self._metadata.ip_version == 4:
raise ValueError('Error looking up {0}. You attempted to look up '
'an IPv6 address in an IPv4-only database.'.format(
ip_address))
pointer = self._find_address_in_tree(address)
return self._resolve_data_pointer(pointer) if pointer else None | def function[get, parameter[self, ip_address]]:
constant[Return the record for the ip_address in the MaxMind DB
Arguments:
ip_address -- an IP address in the standard string notation
]
variable[address] assign[=] call[name[ipaddress].ip_address, parameter[name[ip_address]]]
if <ast.BoolOp object at 0x7da20c7c9540> begin[:]
<ast.Raise object at 0x7da18f811420>
variable[pointer] assign[=] call[name[self]._find_address_in_tree, parameter[name[address]]]
return[<ast.IfExp object at 0x7da18f8133a0>] | keyword[def] identifier[get] ( identifier[self] , identifier[ip_address] ):
literal[string]
identifier[address] = identifier[ipaddress] . identifier[ip_address] ( identifier[ip_address] )
keyword[if] identifier[address] . identifier[version] == literal[int] keyword[and] identifier[self] . identifier[_metadata] . identifier[ip_version] == literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] (
identifier[ip_address] ))
identifier[pointer] = identifier[self] . identifier[_find_address_in_tree] ( identifier[address] )
keyword[return] identifier[self] . identifier[_resolve_data_pointer] ( identifier[pointer] ) keyword[if] identifier[pointer] keyword[else] keyword[None] | def get(self, ip_address):
"""Return the record for the ip_address in the MaxMind DB
Arguments:
ip_address -- an IP address in the standard string notation
"""
address = ipaddress.ip_address(ip_address)
if address.version == 6 and self._metadata.ip_version == 4:
raise ValueError('Error looking up {0}. You attempted to look up an IPv6 address in an IPv4-only database.'.format(ip_address)) # depends on [control=['if'], data=[]]
pointer = self._find_address_in_tree(address)
return self._resolve_data_pointer(pointer) if pointer else None |
def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not itms:
return itms
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env) | def function[_stripixes, parameter[prefix, itms, suffix, stripprefixes, stripsuffixes, env, c]]:
constant[
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
]
if <ast.UnaryOp object at 0x7da20c6c5a80> begin[:]
return[name[itms]]
if <ast.UnaryOp object at 0x7da20c6c79a0> begin[:]
variable[env_c] assign[=] call[name[env]][constant[_concat]]
if <ast.BoolOp object at 0x7da18f00eb30> begin[:]
variable[c] assign[=] name[env_c]
variable[stripprefixes] assign[=] call[name[list], parameter[call[name[map], parameter[name[env].subst, call[name[SCons].Util.flatten, parameter[name[stripprefixes]]]]]]]
variable[stripsuffixes] assign[=] call[name[list], parameter[call[name[map], parameter[name[env].subst, call[name[SCons].Util.flatten, parameter[name[stripsuffixes]]]]]]]
variable[stripped] assign[=] list[[]]
for taget[name[l]] in starred[call[call[name[SCons].PathList.PathList, parameter[name[itms]]].subst_path, parameter[name[env], constant[None], constant[None]]]] begin[:]
if call[name[isinstance], parameter[name[l], name[SCons].Node.FS.File]] begin[:]
call[name[stripped].append, parameter[name[l]]]
continue
if <ast.UnaryOp object at 0x7da2045678e0> begin[:]
variable[l] assign[=] call[name[str], parameter[name[l]]]
for taget[name[stripprefix]] in starred[name[stripprefixes]] begin[:]
variable[lsp] assign[=] call[name[len], parameter[name[stripprefix]]]
if compare[call[name[l]][<ast.Slice object at 0x7da204567ac0>] equal[==] name[stripprefix]] begin[:]
variable[l] assign[=] call[name[l]][<ast.Slice object at 0x7da2045648b0>]
break
for taget[name[stripsuffix]] in starred[name[stripsuffixes]] begin[:]
variable[lss] assign[=] call[name[len], parameter[name[stripsuffix]]]
if compare[call[name[l]][<ast.Slice object at 0x7da2045662f0>] equal[==] name[stripsuffix]] begin[:]
variable[l] assign[=] call[name[l]][<ast.Slice object at 0x7da204567df0>]
break
call[name[stripped].append, parameter[name[l]]]
return[call[name[c], parameter[name[prefix], name[stripped], name[suffix], name[env]]]] | keyword[def] identifier[_stripixes] ( identifier[prefix] , identifier[itms] , identifier[suffix] , identifier[stripprefixes] , identifier[stripsuffixes] , identifier[env] , identifier[c] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[itms] :
keyword[return] identifier[itms]
keyword[if] keyword[not] identifier[callable] ( identifier[c] ):
identifier[env_c] = identifier[env] [ literal[string] ]
keyword[if] identifier[env_c] != identifier[_concat] keyword[and] identifier[callable] ( identifier[env_c] ):
identifier[c] = identifier[env_c]
keyword[else] :
identifier[c] = identifier[_concat_ixes]
identifier[stripprefixes] = identifier[list] ( identifier[map] ( identifier[env] . identifier[subst] , identifier[SCons] . identifier[Util] . identifier[flatten] ( identifier[stripprefixes] )))
identifier[stripsuffixes] = identifier[list] ( identifier[map] ( identifier[env] . identifier[subst] , identifier[SCons] . identifier[Util] . identifier[flatten] ( identifier[stripsuffixes] )))
identifier[stripped] =[]
keyword[for] identifier[l] keyword[in] identifier[SCons] . identifier[PathList] . identifier[PathList] ( identifier[itms] ). identifier[subst_path] ( identifier[env] , keyword[None] , keyword[None] ):
keyword[if] identifier[isinstance] ( identifier[l] , identifier[SCons] . identifier[Node] . identifier[FS] . identifier[File] ):
identifier[stripped] . identifier[append] ( identifier[l] )
keyword[continue]
keyword[if] keyword[not] identifier[SCons] . identifier[Util] . identifier[is_String] ( identifier[l] ):
identifier[l] = identifier[str] ( identifier[l] )
keyword[for] identifier[stripprefix] keyword[in] identifier[stripprefixes] :
identifier[lsp] = identifier[len] ( identifier[stripprefix] )
keyword[if] identifier[l] [: identifier[lsp] ]== identifier[stripprefix] :
identifier[l] = identifier[l] [ identifier[lsp] :]
keyword[break]
keyword[for] identifier[stripsuffix] keyword[in] identifier[stripsuffixes] :
identifier[lss] = identifier[len] ( identifier[stripsuffix] )
keyword[if] identifier[l] [- identifier[lss] :]== identifier[stripsuffix] :
identifier[l] = identifier[l] [:- identifier[lss] ]
keyword[break]
identifier[stripped] . identifier[append] ( identifier[l] )
keyword[return] identifier[c] ( identifier[prefix] , identifier[stripped] , identifier[suffix] , identifier[env] ) | def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not itms:
return itms # depends on [control=['if'], data=[]]
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c # depends on [control=['if'], data=[]]
else:
c = _concat_ixes # depends on [control=['if'], data=[]]
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue # depends on [control=['if'], data=[]]
if not SCons.Util.is_String(l):
l = str(l) # depends on [control=['if'], data=[]]
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['stripprefix']]
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['stripsuffix']]
stripped.append(l) # depends on [control=['for'], data=['l']]
return c(prefix, stripped, suffix, env) |
def parse_date(record):
"Parse a date from sqlite. Assumes the date is in US/Pacific time zone."
dt = record.pop('datetime')
fmts = [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
]
for fmt in fmts:
try:
dt = datetime.datetime.strptime(dt, fmt)
break
except ValueError:
pass
else:
raise
tz = pytz.timezone('US/Pacific')
loc_dt = tz.localize(dt)
record['datetime'] = loc_dt
return record | def function[parse_date, parameter[record]]:
constant[Parse a date from sqlite. Assumes the date is in US/Pacific time zone.]
variable[dt] assign[=] call[name[record].pop, parameter[constant[datetime]]]
variable[fmts] assign[=] list[[<ast.Constant object at 0x7da1b04d4b80>, <ast.Constant object at 0x7da1b04d73a0>]]
for taget[name[fmt]] in starred[name[fmts]] begin[:]
<ast.Try object at 0x7da1b04d41c0>
variable[tz] assign[=] call[name[pytz].timezone, parameter[constant[US/Pacific]]]
variable[loc_dt] assign[=] call[name[tz].localize, parameter[name[dt]]]
call[name[record]][constant[datetime]] assign[=] name[loc_dt]
return[name[record]] | keyword[def] identifier[parse_date] ( identifier[record] ):
literal[string]
identifier[dt] = identifier[record] . identifier[pop] ( literal[string] )
identifier[fmts] =[
literal[string] ,
literal[string] ,
]
keyword[for] identifier[fmt] keyword[in] identifier[fmts] :
keyword[try] :
identifier[dt] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[dt] , identifier[fmt] )
keyword[break]
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[else] :
keyword[raise]
identifier[tz] = identifier[pytz] . identifier[timezone] ( literal[string] )
identifier[loc_dt] = identifier[tz] . identifier[localize] ( identifier[dt] )
identifier[record] [ literal[string] ]= identifier[loc_dt]
keyword[return] identifier[record] | def parse_date(record):
"""Parse a date from sqlite. Assumes the date is in US/Pacific time zone."""
dt = record.pop('datetime')
fmts = ['%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S']
for fmt in fmts:
try:
dt = datetime.datetime.strptime(dt, fmt)
break # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['fmt']]
else:
raise
tz = pytz.timezone('US/Pacific')
loc_dt = tz.localize(dt)
record['datetime'] = loc_dt
return record |
def __make_hash(cls, innermsg, token, seqnum):
"""return the hash for this innermsg, token, seqnum
return digest bytes
"""
hobj = hmacNew(token, digestmod=hashfunc)
hobj.update(innermsg)
hobj.update(cls.__byte_packer(seqnum))
return hobj.digest() | def function[__make_hash, parameter[cls, innermsg, token, seqnum]]:
constant[return the hash for this innermsg, token, seqnum
return digest bytes
]
variable[hobj] assign[=] call[name[hmacNew], parameter[name[token]]]
call[name[hobj].update, parameter[name[innermsg]]]
call[name[hobj].update, parameter[call[name[cls].__byte_packer, parameter[name[seqnum]]]]]
return[call[name[hobj].digest, parameter[]]] | keyword[def] identifier[__make_hash] ( identifier[cls] , identifier[innermsg] , identifier[token] , identifier[seqnum] ):
literal[string]
identifier[hobj] = identifier[hmacNew] ( identifier[token] , identifier[digestmod] = identifier[hashfunc] )
identifier[hobj] . identifier[update] ( identifier[innermsg] )
identifier[hobj] . identifier[update] ( identifier[cls] . identifier[__byte_packer] ( identifier[seqnum] ))
keyword[return] identifier[hobj] . identifier[digest] () | def __make_hash(cls, innermsg, token, seqnum):
"""return the hash for this innermsg, token, seqnum
return digest bytes
"""
hobj = hmacNew(token, digestmod=hashfunc)
hobj.update(innermsg)
hobj.update(cls.__byte_packer(seqnum))
return hobj.digest() |
def campaign(self, name, **kwargs):
"""Add Campaign data to Batch object.
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
first_seen (str, kwargs): The first seen datetime expression for this Group.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Campaign.
"""
group_obj = Campaign(name, **kwargs)
return self._group(group_obj) | def function[campaign, parameter[self, name]]:
constant[Add Campaign data to Batch object.
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
first_seen (str, kwargs): The first seen datetime expression for this Group.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Campaign.
]
variable[group_obj] assign[=] call[name[Campaign], parameter[name[name]]]
return[call[name[self]._group, parameter[name[group_obj]]]] | keyword[def] identifier[campaign] ( identifier[self] , identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[group_obj] = identifier[Campaign] ( identifier[name] ,** identifier[kwargs] )
keyword[return] identifier[self] . identifier[_group] ( identifier[group_obj] ) | def campaign(self, name, **kwargs):
"""Add Campaign data to Batch object.
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
first_seen (str, kwargs): The first seen datetime expression for this Group.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Campaign.
"""
group_obj = Campaign(name, **kwargs)
return self._group(group_obj) |
def harvest_repo(root_url, archive_path, tag=None, archive_format='tar.gz'):
"""
Archives a specific tag in a specific Git repository.
:param root_url: The URL to the Git repo
- Supported protocols: git, ssh, http[s].
:param archive_path: A temporary path to clone the repo to
- Must end in .git
:param tag: The path to which the .tar.gz will go to
- Must end in the same as format (NOT inside clone_path)
:param format: One of the following: tar.gz / tar / zip
"""
if not git_exists():
raise Exception("Git not found. It probably needs installing.")
clone_path = mkdtemp(dir=cfg['CFG_TMPDIR'])
git = get_which_git()
call([git, 'clone', root_url, clone_path])
chdir(clone_path)
if tag:
call([git, 'archive', '--format=' + archive_format, '-o',
archive_path, tag])
else:
call([git, 'archive', '--format=' + archive_format, '-o',
archive_path, 'HEAD'])
try:
rmtree(clone_path)
except OSError as e:
# Reraise unless ENOENT: No such file or directory
# (ok if directory has already been deleted)
if e.errno != errno.ENOENT:
raise | def function[harvest_repo, parameter[root_url, archive_path, tag, archive_format]]:
constant[
Archives a specific tag in a specific Git repository.
:param root_url: The URL to the Git repo
- Supported protocols: git, ssh, http[s].
:param archive_path: A temporary path to clone the repo to
- Must end in .git
:param tag: The path to which the .tar.gz will go to
- Must end in the same as format (NOT inside clone_path)
:param format: One of the following: tar.gz / tar / zip
]
if <ast.UnaryOp object at 0x7da20c6a92a0> begin[:]
<ast.Raise object at 0x7da20c6a8070>
variable[clone_path] assign[=] call[name[mkdtemp], parameter[]]
variable[git] assign[=] call[name[get_which_git], parameter[]]
call[name[call], parameter[list[[<ast.Name object at 0x7da20c6a8c70>, <ast.Constant object at 0x7da20c6a9150>, <ast.Name object at 0x7da20c6a95a0>, <ast.Name object at 0x7da20c6a9c00>]]]]
call[name[chdir], parameter[name[clone_path]]]
if name[tag] begin[:]
call[name[call], parameter[list[[<ast.Name object at 0x7da20c6a8af0>, <ast.Constant object at 0x7da20c6a98d0>, <ast.BinOp object at 0x7da20c6a85b0>, <ast.Constant object at 0x7da20c6aac50>, <ast.Name object at 0x7da20c6ab100>, <ast.Name object at 0x7da20c6a8a30>]]]]
<ast.Try object at 0x7da20c6a8310> | keyword[def] identifier[harvest_repo] ( identifier[root_url] , identifier[archive_path] , identifier[tag] = keyword[None] , identifier[archive_format] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[git_exists] ():
keyword[raise] identifier[Exception] ( literal[string] )
identifier[clone_path] = identifier[mkdtemp] ( identifier[dir] = identifier[cfg] [ literal[string] ])
identifier[git] = identifier[get_which_git] ()
identifier[call] ([ identifier[git] , literal[string] , identifier[root_url] , identifier[clone_path] ])
identifier[chdir] ( identifier[clone_path] )
keyword[if] identifier[tag] :
identifier[call] ([ identifier[git] , literal[string] , literal[string] + identifier[archive_format] , literal[string] ,
identifier[archive_path] , identifier[tag] ])
keyword[else] :
identifier[call] ([ identifier[git] , literal[string] , literal[string] + identifier[archive_format] , literal[string] ,
identifier[archive_path] , literal[string] ])
keyword[try] :
identifier[rmtree] ( identifier[clone_path] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] != identifier[errno] . identifier[ENOENT] :
keyword[raise] | def harvest_repo(root_url, archive_path, tag=None, archive_format='tar.gz'):
"""
Archives a specific tag in a specific Git repository.
:param root_url: The URL to the Git repo
- Supported protocols: git, ssh, http[s].
:param archive_path: A temporary path to clone the repo to
- Must end in .git
:param tag: The path to which the .tar.gz will go to
- Must end in the same as format (NOT inside clone_path)
:param format: One of the following: tar.gz / tar / zip
"""
if not git_exists():
raise Exception('Git not found. It probably needs installing.') # depends on [control=['if'], data=[]]
clone_path = mkdtemp(dir=cfg['CFG_TMPDIR'])
git = get_which_git()
call([git, 'clone', root_url, clone_path])
chdir(clone_path)
if tag:
call([git, 'archive', '--format=' + archive_format, '-o', archive_path, tag]) # depends on [control=['if'], data=[]]
else:
call([git, 'archive', '--format=' + archive_format, '-o', archive_path, 'HEAD'])
try:
rmtree(clone_path) # depends on [control=['try'], data=[]]
except OSError as e:
# Reraise unless ENOENT: No such file or directory
# (ok if directory has already been deleted)
if e.errno != errno.ENOENT:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] |
def open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object was already opened or the open failed.
OSError: if the file-like object was already opened or the open failed.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification or mode is invalid.
"""
if self._is_open and not self._is_cached:
raise IOError('Already open.')
if mode != 'rb':
raise ValueError('Unsupported mode: {0:s}.'.format(mode))
if not self._is_open:
self._Open(path_spec=path_spec, mode=mode)
self._is_open = True
if path_spec and not self._resolver_context.GetFileObject(path_spec):
self._resolver_context.CacheFileObject(path_spec, self)
self._is_cached = True
if self._is_cached:
self._resolver_context.GrabFileObject(path_spec) | def function[open, parameter[self, path_spec, mode]]:
constant[Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object was already opened or the open failed.
OSError: if the file-like object was already opened or the open failed.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification or mode is invalid.
]
if <ast.BoolOp object at 0x7da1b065bd30> begin[:]
<ast.Raise object at 0x7da1b0659960>
if compare[name[mode] not_equal[!=] constant[rb]] begin[:]
<ast.Raise object at 0x7da1b0658850>
if <ast.UnaryOp object at 0x7da1b07f7100> begin[:]
call[name[self]._Open, parameter[]]
name[self]._is_open assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b07f7f10> begin[:]
call[name[self]._resolver_context.CacheFileObject, parameter[name[path_spec], name[self]]]
name[self]._is_cached assign[=] constant[True]
if name[self]._is_cached begin[:]
call[name[self]._resolver_context.GrabFileObject, parameter[name[path_spec]]] | keyword[def] identifier[open] ( identifier[self] , identifier[path_spec] = keyword[None] , identifier[mode] = literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[_is_open] keyword[and] keyword[not] identifier[self] . identifier[_is_cached] :
keyword[raise] identifier[IOError] ( literal[string] )
keyword[if] identifier[mode] != literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[mode] ))
keyword[if] keyword[not] identifier[self] . identifier[_is_open] :
identifier[self] . identifier[_Open] ( identifier[path_spec] = identifier[path_spec] , identifier[mode] = identifier[mode] )
identifier[self] . identifier[_is_open] = keyword[True]
keyword[if] identifier[path_spec] keyword[and] keyword[not] identifier[self] . identifier[_resolver_context] . identifier[GetFileObject] ( identifier[path_spec] ):
identifier[self] . identifier[_resolver_context] . identifier[CacheFileObject] ( identifier[path_spec] , identifier[self] )
identifier[self] . identifier[_is_cached] = keyword[True]
keyword[if] identifier[self] . identifier[_is_cached] :
identifier[self] . identifier[_resolver_context] . identifier[GrabFileObject] ( identifier[path_spec] ) | def open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object was already opened or the open failed.
OSError: if the file-like object was already opened or the open failed.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification or mode is invalid.
"""
if self._is_open and (not self._is_cached):
raise IOError('Already open.') # depends on [control=['if'], data=[]]
if mode != 'rb':
raise ValueError('Unsupported mode: {0:s}.'.format(mode)) # depends on [control=['if'], data=['mode']]
if not self._is_open:
self._Open(path_spec=path_spec, mode=mode)
self._is_open = True
if path_spec and (not self._resolver_context.GetFileObject(path_spec)):
self._resolver_context.CacheFileObject(path_spec, self)
self._is_cached = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self._is_cached:
self._resolver_context.GrabFileObject(path_spec) # depends on [control=['if'], data=[]] |
def phase_offsets(Idat,Qdat,Udat,Vdat,tsamp,chan_per_coarse,feedtype='l',**kwargs):
'''
Calculates phase difference between X and Y feeds given U and V (U and Q for circular basis)
data from a noise diode measurement on the target
'''
#Fold noise diode data and calculate ON OFF diferences for U and V
if feedtype=='l':
U_OFF,U_ON = foldcal(Udat,tsamp,**kwargs)
V_OFF,V_ON = foldcal(Vdat,tsamp,**kwargs)
Udiff = U_ON-U_OFF
Vdiff = V_ON-V_OFF
poffset = np.arctan2(-1*Vdiff,Udiff)
if feedtype=='c':
U_OFF,U_ON = foldcal(Udat,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Qdat,tsamp,**kwargs)
Udiff = U_ON-U_OFF
Qdiff = Q_ON-Q_OFF
poffset = np.arctan2(Udiff,Qdiff)
coarse_p = convert_to_coarse(poffset,chan_per_coarse)
#Correct for problems created by discontinuity in arctan
#Find whether phase offsets have increasing or decreasing slope
y = coarse_p[:6]
x = np.arange(y.size)
m = np.polyfit(x,y,1)[0]
for i in range(coarse_p.size-3):
if (m>0 and coarse_p[i+1]<coarse_p[i]) or (m<0 and coarse_p[i+1]>coarse_p[i]):
coarse_p[i+1] = 2*coarse_p[i+2]-coarse_p[i+3] #Move problem point near the next
return coarse_p | def function[phase_offsets, parameter[Idat, Qdat, Udat, Vdat, tsamp, chan_per_coarse, feedtype]]:
constant[
Calculates phase difference between X and Y feeds given U and V (U and Q for circular basis)
data from a noise diode measurement on the target
]
if compare[name[feedtype] equal[==] constant[l]] begin[:]
<ast.Tuple object at 0x7da2041dac80> assign[=] call[name[foldcal], parameter[name[Udat], name[tsamp]]]
<ast.Tuple object at 0x7da2041d9a80> assign[=] call[name[foldcal], parameter[name[Vdat], name[tsamp]]]
variable[Udiff] assign[=] binary_operation[name[U_ON] - name[U_OFF]]
variable[Vdiff] assign[=] binary_operation[name[V_ON] - name[V_OFF]]
variable[poffset] assign[=] call[name[np].arctan2, parameter[binary_operation[<ast.UnaryOp object at 0x7da2041db370> * name[Vdiff]], name[Udiff]]]
if compare[name[feedtype] equal[==] constant[c]] begin[:]
<ast.Tuple object at 0x7da2041db550> assign[=] call[name[foldcal], parameter[name[Udat], name[tsamp]]]
<ast.Tuple object at 0x7da2041da350> assign[=] call[name[foldcal], parameter[name[Qdat], name[tsamp]]]
variable[Udiff] assign[=] binary_operation[name[U_ON] - name[U_OFF]]
variable[Qdiff] assign[=] binary_operation[name[Q_ON] - name[Q_OFF]]
variable[poffset] assign[=] call[name[np].arctan2, parameter[name[Udiff], name[Qdiff]]]
variable[coarse_p] assign[=] call[name[convert_to_coarse], parameter[name[poffset], name[chan_per_coarse]]]
variable[y] assign[=] call[name[coarse_p]][<ast.Slice object at 0x7da2041dab90>]
variable[x] assign[=] call[name[np].arange, parameter[name[y].size]]
variable[m] assign[=] call[call[name[np].polyfit, parameter[name[x], name[y], constant[1]]]][constant[0]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[coarse_p].size - constant[3]]]]] begin[:]
if <ast.BoolOp object at 0x7da18f00d180> begin[:]
call[name[coarse_p]][binary_operation[name[i] + constant[1]]] assign[=] binary_operation[binary_operation[constant[2] * call[name[coarse_p]][binary_operation[name[i] + constant[2]]]] - call[name[coarse_p]][binary_operation[name[i] + constant[3]]]]
return[name[coarse_p]] | keyword[def] identifier[phase_offsets] ( identifier[Idat] , identifier[Qdat] , identifier[Udat] , identifier[Vdat] , identifier[tsamp] , identifier[chan_per_coarse] , identifier[feedtype] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[feedtype] == literal[string] :
identifier[U_OFF] , identifier[U_ON] = identifier[foldcal] ( identifier[Udat] , identifier[tsamp] ,** identifier[kwargs] )
identifier[V_OFF] , identifier[V_ON] = identifier[foldcal] ( identifier[Vdat] , identifier[tsamp] ,** identifier[kwargs] )
identifier[Udiff] = identifier[U_ON] - identifier[U_OFF]
identifier[Vdiff] = identifier[V_ON] - identifier[V_OFF]
identifier[poffset] = identifier[np] . identifier[arctan2] (- literal[int] * identifier[Vdiff] , identifier[Udiff] )
keyword[if] identifier[feedtype] == literal[string] :
identifier[U_OFF] , identifier[U_ON] = identifier[foldcal] ( identifier[Udat] , identifier[tsamp] ,** identifier[kwargs] )
identifier[Q_OFF] , identifier[Q_ON] = identifier[foldcal] ( identifier[Qdat] , identifier[tsamp] ,** identifier[kwargs] )
identifier[Udiff] = identifier[U_ON] - identifier[U_OFF]
identifier[Qdiff] = identifier[Q_ON] - identifier[Q_OFF]
identifier[poffset] = identifier[np] . identifier[arctan2] ( identifier[Udiff] , identifier[Qdiff] )
identifier[coarse_p] = identifier[convert_to_coarse] ( identifier[poffset] , identifier[chan_per_coarse] )
identifier[y] = identifier[coarse_p] [: literal[int] ]
identifier[x] = identifier[np] . identifier[arange] ( identifier[y] . identifier[size] )
identifier[m] = identifier[np] . identifier[polyfit] ( identifier[x] , identifier[y] , literal[int] )[ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[coarse_p] . identifier[size] - literal[int] ):
keyword[if] ( identifier[m] > literal[int] keyword[and] identifier[coarse_p] [ identifier[i] + literal[int] ]< identifier[coarse_p] [ identifier[i] ]) keyword[or] ( identifier[m] < literal[int] keyword[and] identifier[coarse_p] [ identifier[i] + literal[int] ]> identifier[coarse_p] [ identifier[i] ]):
identifier[coarse_p] [ identifier[i] + literal[int] ]= literal[int] * identifier[coarse_p] [ identifier[i] + literal[int] ]- identifier[coarse_p] [ identifier[i] + literal[int] ]
keyword[return] identifier[coarse_p] | def phase_offsets(Idat, Qdat, Udat, Vdat, tsamp, chan_per_coarse, feedtype='l', **kwargs):
"""
Calculates phase difference between X and Y feeds given U and V (U and Q for circular basis)
data from a noise diode measurement on the target
"""
#Fold noise diode data and calculate ON OFF diferences for U and V
if feedtype == 'l':
(U_OFF, U_ON) = foldcal(Udat, tsamp, **kwargs)
(V_OFF, V_ON) = foldcal(Vdat, tsamp, **kwargs)
Udiff = U_ON - U_OFF
Vdiff = V_ON - V_OFF
poffset = np.arctan2(-1 * Vdiff, Udiff) # depends on [control=['if'], data=[]]
if feedtype == 'c':
(U_OFF, U_ON) = foldcal(Udat, tsamp, **kwargs)
(Q_OFF, Q_ON) = foldcal(Qdat, tsamp, **kwargs)
Udiff = U_ON - U_OFF
Qdiff = Q_ON - Q_OFF
poffset = np.arctan2(Udiff, Qdiff) # depends on [control=['if'], data=[]]
coarse_p = convert_to_coarse(poffset, chan_per_coarse)
#Correct for problems created by discontinuity in arctan
#Find whether phase offsets have increasing or decreasing slope
y = coarse_p[:6]
x = np.arange(y.size)
m = np.polyfit(x, y, 1)[0]
for i in range(coarse_p.size - 3):
if m > 0 and coarse_p[i + 1] < coarse_p[i] or (m < 0 and coarse_p[i + 1] > coarse_p[i]):
coarse_p[i + 1] = 2 * coarse_p[i + 2] - coarse_p[i + 3] #Move problem point near the next # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return coarse_p |
def _on_loop_start(self, variables):
"""
performs on-loop-start actions like callbacks
variables contains local namespace variables.
Parameters
---------
variables : dict of available variables
Returns
-------
None
"""
for callback in self.callbacks:
if hasattr(callback, 'on_loop_start'):
self.logs_[str(callback)].append(callback.on_loop_start(**variables)) | def function[_on_loop_start, parameter[self, variables]]:
constant[
performs on-loop-start actions like callbacks
variables contains local namespace variables.
Parameters
---------
variables : dict of available variables
Returns
-------
None
]
for taget[name[callback]] in starred[name[self].callbacks] begin[:]
if call[name[hasattr], parameter[name[callback], constant[on_loop_start]]] begin[:]
call[call[name[self].logs_][call[name[str], parameter[name[callback]]]].append, parameter[call[name[callback].on_loop_start, parameter[]]]] | keyword[def] identifier[_on_loop_start] ( identifier[self] , identifier[variables] ):
literal[string]
keyword[for] identifier[callback] keyword[in] identifier[self] . identifier[callbacks] :
keyword[if] identifier[hasattr] ( identifier[callback] , literal[string] ):
identifier[self] . identifier[logs_] [ identifier[str] ( identifier[callback] )]. identifier[append] ( identifier[callback] . identifier[on_loop_start] (** identifier[variables] )) | def _on_loop_start(self, variables):
"""
performs on-loop-start actions like callbacks
variables contains local namespace variables.
Parameters
---------
variables : dict of available variables
Returns
-------
None
"""
for callback in self.callbacks:
if hasattr(callback, 'on_loop_start'):
self.logs_[str(callback)].append(callback.on_loop_start(**variables)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['callback']] |
def process_fields(self, fields):
"""Process a list of simple string field definitions and assign their order based on prefix."""
result = []
strip = ''.join(self.PREFIX_MAP)
for field in fields:
direction = self.PREFIX_MAP['']
if field[0] in self.PREFIX_MAP:
direction = self.PREFIX_MAP[field[0]]
field = field.lstrip(strip)
result.append((field, direction))
return result | def function[process_fields, parameter[self, fields]]:
constant[Process a list of simple string field definitions and assign their order based on prefix.]
variable[result] assign[=] list[[]]
variable[strip] assign[=] call[constant[].join, parameter[name[self].PREFIX_MAP]]
for taget[name[field]] in starred[name[fields]] begin[:]
variable[direction] assign[=] call[name[self].PREFIX_MAP][constant[]]
if compare[call[name[field]][constant[0]] in name[self].PREFIX_MAP] begin[:]
variable[direction] assign[=] call[name[self].PREFIX_MAP][call[name[field]][constant[0]]]
variable[field] assign[=] call[name[field].lstrip, parameter[name[strip]]]
call[name[result].append, parameter[tuple[[<ast.Name object at 0x7da2047eb4c0>, <ast.Name object at 0x7da2047e97b0>]]]]
return[name[result]] | keyword[def] identifier[process_fields] ( identifier[self] , identifier[fields] ):
literal[string]
identifier[result] =[]
identifier[strip] = literal[string] . identifier[join] ( identifier[self] . identifier[PREFIX_MAP] )
keyword[for] identifier[field] keyword[in] identifier[fields] :
identifier[direction] = identifier[self] . identifier[PREFIX_MAP] [ literal[string] ]
keyword[if] identifier[field] [ literal[int] ] keyword[in] identifier[self] . identifier[PREFIX_MAP] :
identifier[direction] = identifier[self] . identifier[PREFIX_MAP] [ identifier[field] [ literal[int] ]]
identifier[field] = identifier[field] . identifier[lstrip] ( identifier[strip] )
identifier[result] . identifier[append] (( identifier[field] , identifier[direction] ))
keyword[return] identifier[result] | def process_fields(self, fields):
"""Process a list of simple string field definitions and assign their order based on prefix."""
result = []
strip = ''.join(self.PREFIX_MAP)
for field in fields:
direction = self.PREFIX_MAP['']
if field[0] in self.PREFIX_MAP:
direction = self.PREFIX_MAP[field[0]]
field = field.lstrip(strip) # depends on [control=['if'], data=[]]
result.append((field, direction)) # depends on [control=['for'], data=['field']]
return result |
def get_version(root):
"""
Load and return the contents of version.json.
:param root: The root path that the ``version.json`` file will be opened
:type root: str
:returns: Content of ``version.json`` or None
:rtype: dict or None
"""
version_json = os.path.join(root, 'version.json')
if os.path.exists(version_json):
with open(version_json, 'r') as version_json_file:
return json.load(version_json_file)
return None | def function[get_version, parameter[root]]:
constant[
Load and return the contents of version.json.
:param root: The root path that the ``version.json`` file will be opened
:type root: str
:returns: Content of ``version.json`` or None
:rtype: dict or None
]
variable[version_json] assign[=] call[name[os].path.join, parameter[name[root], constant[version.json]]]
if call[name[os].path.exists, parameter[name[version_json]]] begin[:]
with call[name[open], parameter[name[version_json], constant[r]]] begin[:]
return[call[name[json].load, parameter[name[version_json_file]]]]
return[constant[None]] | keyword[def] identifier[get_version] ( identifier[root] ):
literal[string]
identifier[version_json] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[version_json] ):
keyword[with] identifier[open] ( identifier[version_json] , literal[string] ) keyword[as] identifier[version_json_file] :
keyword[return] identifier[json] . identifier[load] ( identifier[version_json_file] )
keyword[return] keyword[None] | def get_version(root):
"""
Load and return the contents of version.json.
:param root: The root path that the ``version.json`` file will be opened
:type root: str
:returns: Content of ``version.json`` or None
:rtype: dict or None
"""
version_json = os.path.join(root, 'version.json')
if os.path.exists(version_json):
with open(version_json, 'r') as version_json_file:
return json.load(version_json_file) # depends on [control=['with'], data=['version_json_file']] # depends on [control=['if'], data=[]]
return None |
def get_object_references(tb, source, max_string_length=1000):
"""
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
global obj_ref_regex
referenced_attr = set()
for line in source.split('\n'):
referenced_attr.update(set(re.findall(obj_ref_regex, line)))
referenced_attr = sorted(referenced_attr)
info = []
for attr in referenced_attr:
v = string_variable_lookup(tb, attr)
if v is not ValueError:
ref_string = format_reference(v, max_string_length=max_string_length)
info.append((attr, ref_string))
return info | def function[get_object_references, parameter[tb, source, max_string_length]]:
constant[
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
]
<ast.Global object at 0x7da20c76ead0>
variable[referenced_attr] assign[=] call[name[set], parameter[]]
for taget[name[line]] in starred[call[name[source].split, parameter[constant[
]]]] begin[:]
call[name[referenced_attr].update, parameter[call[name[set], parameter[call[name[re].findall, parameter[name[obj_ref_regex], name[line]]]]]]]
variable[referenced_attr] assign[=] call[name[sorted], parameter[name[referenced_attr]]]
variable[info] assign[=] list[[]]
for taget[name[attr]] in starred[name[referenced_attr]] begin[:]
variable[v] assign[=] call[name[string_variable_lookup], parameter[name[tb], name[attr]]]
if compare[name[v] is_not name[ValueError]] begin[:]
variable[ref_string] assign[=] call[name[format_reference], parameter[name[v]]]
call[name[info].append, parameter[tuple[[<ast.Name object at 0x7da20c76fdc0>, <ast.Name object at 0x7da20c76c640>]]]]
return[name[info]] | keyword[def] identifier[get_object_references] ( identifier[tb] , identifier[source] , identifier[max_string_length] = literal[int] ):
literal[string]
keyword[global] identifier[obj_ref_regex]
identifier[referenced_attr] = identifier[set] ()
keyword[for] identifier[line] keyword[in] identifier[source] . identifier[split] ( literal[string] ):
identifier[referenced_attr] . identifier[update] ( identifier[set] ( identifier[re] . identifier[findall] ( identifier[obj_ref_regex] , identifier[line] )))
identifier[referenced_attr] = identifier[sorted] ( identifier[referenced_attr] )
identifier[info] =[]
keyword[for] identifier[attr] keyword[in] identifier[referenced_attr] :
identifier[v] = identifier[string_variable_lookup] ( identifier[tb] , identifier[attr] )
keyword[if] identifier[v] keyword[is] keyword[not] identifier[ValueError] :
identifier[ref_string] = identifier[format_reference] ( identifier[v] , identifier[max_string_length] = identifier[max_string_length] )
identifier[info] . identifier[append] (( identifier[attr] , identifier[ref_string] ))
keyword[return] identifier[info] | def get_object_references(tb, source, max_string_length=1000):
"""
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
global obj_ref_regex
referenced_attr = set()
for line in source.split('\n'):
referenced_attr.update(set(re.findall(obj_ref_regex, line))) # depends on [control=['for'], data=['line']]
referenced_attr = sorted(referenced_attr)
info = []
for attr in referenced_attr:
v = string_variable_lookup(tb, attr)
if v is not ValueError:
ref_string = format_reference(v, max_string_length=max_string_length)
info.append((attr, ref_string)) # depends on [control=['if'], data=['v']] # depends on [control=['for'], data=['attr']]
return info |
def get_collection_name(cls):
'''
Gets the full name of the collection, as declared by the ModelOptions class like so:
namespace.name
If no namespace or name is provided, the class's lowercase name is used
'''
if hasattr(cls, '_meta'):
np = getattr(cls._meta, 'namespace', None)
cname = getattr(cls._meta, 'name', None)
if np:
return '{}.{}'.format(np, cname or cls.__name__.lower())
return cname or cls.__name__.lower() | def function[get_collection_name, parameter[cls]]:
constant[
Gets the full name of the collection, as declared by the ModelOptions class like so:
namespace.name
If no namespace or name is provided, the class's lowercase name is used
]
if call[name[hasattr], parameter[name[cls], constant[_meta]]] begin[:]
variable[np] assign[=] call[name[getattr], parameter[name[cls]._meta, constant[namespace], constant[None]]]
variable[cname] assign[=] call[name[getattr], parameter[name[cls]._meta, constant[name], constant[None]]]
if name[np] begin[:]
return[call[constant[{}.{}].format, parameter[name[np], <ast.BoolOp object at 0x7da18eb54eb0>]]]
return[<ast.BoolOp object at 0x7da18eb57460>] | keyword[def] identifier[get_collection_name] ( identifier[cls] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[cls] , literal[string] ):
identifier[np] = identifier[getattr] ( identifier[cls] . identifier[_meta] , literal[string] , keyword[None] )
identifier[cname] = identifier[getattr] ( identifier[cls] . identifier[_meta] , literal[string] , keyword[None] )
keyword[if] identifier[np] :
keyword[return] literal[string] . identifier[format] ( identifier[np] , identifier[cname] keyword[or] identifier[cls] . identifier[__name__] . identifier[lower] ())
keyword[return] identifier[cname] keyword[or] identifier[cls] . identifier[__name__] . identifier[lower] () | def get_collection_name(cls):
"""
Gets the full name of the collection, as declared by the ModelOptions class like so:
namespace.name
If no namespace or name is provided, the class's lowercase name is used
"""
if hasattr(cls, '_meta'):
np = getattr(cls._meta, 'namespace', None)
cname = getattr(cls._meta, 'name', None)
if np:
return '{}.{}'.format(np, cname or cls.__name__.lower()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return cname or cls.__name__.lower() |
def report_final_result(metric):
"""Reports final result to tuner.
metric: serializable object.
"""
assert _params is not None, 'nni.get_next_parameter() needs to be called before report_final_result'
metric = json_tricks.dumps({
'parameter_id': _params['parameter_id'],
'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID,
'type': 'FINAL',
'sequence': 0, # TODO: may be unnecessary
'value': metric
})
platform.send_metric(metric) | def function[report_final_result, parameter[metric]]:
constant[Reports final result to tuner.
metric: serializable object.
]
assert[compare[name[_params] is_not constant[None]]]
variable[metric] assign[=] call[name[json_tricks].dumps, parameter[dictionary[[<ast.Constant object at 0x7da18f09d870>, <ast.Constant object at 0x7da18dc05a80>, <ast.Constant object at 0x7da18dc047f0>, <ast.Constant object at 0x7da18dc05fc0>, <ast.Constant object at 0x7da18dc07730>], [<ast.Subscript object at 0x7da18dc07520>, <ast.Attribute object at 0x7da18ede6b90>, <ast.Constant object at 0x7da18ede4e80>, <ast.Constant object at 0x7da18ede5390>, <ast.Name object at 0x7da18ede4a00>]]]]
call[name[platform].send_metric, parameter[name[metric]]] | keyword[def] identifier[report_final_result] ( identifier[metric] ):
literal[string]
keyword[assert] identifier[_params] keyword[is] keyword[not] keyword[None] , literal[string]
identifier[metric] = identifier[json_tricks] . identifier[dumps] ({
literal[string] : identifier[_params] [ literal[string] ],
literal[string] : identifier[trial_env_vars] . identifier[NNI_TRIAL_JOB_ID] ,
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : identifier[metric]
})
identifier[platform] . identifier[send_metric] ( identifier[metric] ) | def report_final_result(metric):
"""Reports final result to tuner.
metric: serializable object.
"""
assert _params is not None, 'nni.get_next_parameter() needs to be called before report_final_result' # TODO: may be unnecessary
metric = json_tricks.dumps({'parameter_id': _params['parameter_id'], 'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID, 'type': 'FINAL', 'sequence': 0, 'value': metric})
platform.send_metric(metric) |
async def get_property(self, command):
"""Get property state from device."""
_LOGGER.debug("Getting property %s", command)
if self.__checkLock():
return BUSY
timeout = self.__get_timeout(command)
response = await self.send_request(
timeout=timeout,
params=EPSON_KEY_COMMANDS[command],
type='json_query')
if not response:
return False
try:
return response['projector']['feature']['reply']
except KeyError:
return BUSY | <ast.AsyncFunctionDef object at 0x7da18f58c070> | keyword[async] keyword[def] identifier[get_property] ( identifier[self] , identifier[command] ):
literal[string]
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[command] )
keyword[if] identifier[self] . identifier[__checkLock] ():
keyword[return] identifier[BUSY]
identifier[timeout] = identifier[self] . identifier[__get_timeout] ( identifier[command] )
identifier[response] = keyword[await] identifier[self] . identifier[send_request] (
identifier[timeout] = identifier[timeout] ,
identifier[params] = identifier[EPSON_KEY_COMMANDS] [ identifier[command] ],
identifier[type] = literal[string] )
keyword[if] keyword[not] identifier[response] :
keyword[return] keyword[False]
keyword[try] :
keyword[return] identifier[response] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[return] identifier[BUSY] | async def get_property(self, command):
"""Get property state from device."""
_LOGGER.debug('Getting property %s', command)
if self.__checkLock():
return BUSY # depends on [control=['if'], data=[]]
timeout = self.__get_timeout(command)
response = await self.send_request(timeout=timeout, params=EPSON_KEY_COMMANDS[command], type='json_query')
if not response:
return False # depends on [control=['if'], data=[]]
try:
return response['projector']['feature']['reply'] # depends on [control=['try'], data=[]]
except KeyError:
return BUSY # depends on [control=['except'], data=[]] |
def load_dic28():
"""DIC28 Dataset from Pajek.
This network represents connections among English words in a dictionary.
It was generated from Knuth's dictionary. Two words are connected by an
edge if we can reach one from the other by
- changing a single character (e. g., work - word)
- adding / removing a single character (e. g., ever - fever).
There exist 52,652 words (vertices in a network) having 2 up to 8 characters
in the dictionary. The obtained network has 89038 edges.
"""
dataset_path = _load('dic28')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph1 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph1.gml')))
graph2 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph2.gml')))
graph = graph1.copy()
graph.add_nodes_from(graph2.nodes(data=True))
graph.add_edges_from(graph2.edges)
graph.add_edges_from(X[['graph1', 'graph2']].values)
graphs = {
'graph1': graph1,
'graph2': graph2,
}
return Dataset(load_dic28.__doc__, X, y, accuracy_score,
stratify=True, graph=graph, graphs=graphs) | def function[load_dic28, parameter[]]:
constant[DIC28 Dataset from Pajek.
This network represents connections among English words in a dictionary.
It was generated from Knuth's dictionary. Two words are connected by an
edge if we can reach one from the other by
- changing a single character (e. g., work - word)
- adding / removing a single character (e. g., ever - fever).
There exist 52,652 words (vertices in a network) having 2 up to 8 characters
in the dictionary. The obtained network has 89038 edges.
]
variable[dataset_path] assign[=] call[name[_load], parameter[constant[dic28]]]
variable[X] assign[=] call[name[_load_csv], parameter[name[dataset_path], constant[data]]]
variable[y] assign[=] call[name[X].pop, parameter[constant[label]]].values
variable[graph1] assign[=] call[name[nx].Graph, parameter[call[name[nx].read_gml, parameter[call[name[os].path.join, parameter[name[dataset_path], constant[graph1.gml]]]]]]]
variable[graph2] assign[=] call[name[nx].Graph, parameter[call[name[nx].read_gml, parameter[call[name[os].path.join, parameter[name[dataset_path], constant[graph2.gml]]]]]]]
variable[graph] assign[=] call[name[graph1].copy, parameter[]]
call[name[graph].add_nodes_from, parameter[call[name[graph2].nodes, parameter[]]]]
call[name[graph].add_edges_from, parameter[name[graph2].edges]]
call[name[graph].add_edges_from, parameter[call[name[X]][list[[<ast.Constant object at 0x7da18f00ece0>, <ast.Constant object at 0x7da18f00c910>]]].values]]
variable[graphs] assign[=] dictionary[[<ast.Constant object at 0x7da18f00db70>, <ast.Constant object at 0x7da18f00ca00>], [<ast.Name object at 0x7da18f00d6f0>, <ast.Name object at 0x7da18f00e170>]]
return[call[name[Dataset], parameter[name[load_dic28].__doc__, name[X], name[y], name[accuracy_score]]]] | keyword[def] identifier[load_dic28] ():
literal[string]
identifier[dataset_path] = identifier[_load] ( literal[string] )
identifier[X] = identifier[_load_csv] ( identifier[dataset_path] , literal[string] )
identifier[y] = identifier[X] . identifier[pop] ( literal[string] ). identifier[values]
identifier[graph1] = identifier[nx] . identifier[Graph] ( identifier[nx] . identifier[read_gml] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dataset_path] , literal[string] )))
identifier[graph2] = identifier[nx] . identifier[Graph] ( identifier[nx] . identifier[read_gml] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dataset_path] , literal[string] )))
identifier[graph] = identifier[graph1] . identifier[copy] ()
identifier[graph] . identifier[add_nodes_from] ( identifier[graph2] . identifier[nodes] ( identifier[data] = keyword[True] ))
identifier[graph] . identifier[add_edges_from] ( identifier[graph2] . identifier[edges] )
identifier[graph] . identifier[add_edges_from] ( identifier[X] [[ literal[string] , literal[string] ]]. identifier[values] )
identifier[graphs] ={
literal[string] : identifier[graph1] ,
literal[string] : identifier[graph2] ,
}
keyword[return] identifier[Dataset] ( identifier[load_dic28] . identifier[__doc__] , identifier[X] , identifier[y] , identifier[accuracy_score] ,
identifier[stratify] = keyword[True] , identifier[graph] = identifier[graph] , identifier[graphs] = identifier[graphs] ) | def load_dic28():
"""DIC28 Dataset from Pajek.
This network represents connections among English words in a dictionary.
It was generated from Knuth's dictionary. Two words are connected by an
edge if we can reach one from the other by
- changing a single character (e. g., work - word)
- adding / removing a single character (e. g., ever - fever).
There exist 52,652 words (vertices in a network) having 2 up to 8 characters
in the dictionary. The obtained network has 89038 edges.
"""
dataset_path = _load('dic28')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph1 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph1.gml')))
graph2 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph2.gml')))
graph = graph1.copy()
graph.add_nodes_from(graph2.nodes(data=True))
graph.add_edges_from(graph2.edges)
graph.add_edges_from(X[['graph1', 'graph2']].values)
graphs = {'graph1': graph1, 'graph2': graph2}
return Dataset(load_dic28.__doc__, X, y, accuracy_score, stratify=True, graph=graph, graphs=graphs) |
def apply_all_link_refs(
bytecode: bytes, link_refs: List[Dict[str, Any]], attr_dict: Dict[str, str]
) -> bytes:
"""
Applies all link references corresponding to a valid attr_dict to the bytecode.
"""
if link_refs is None:
return bytecode
link_fns = (
apply_link_ref(offset, ref["length"], attr_dict[ref["name"]])
for ref in link_refs
for offset in ref["offsets"]
)
linked_bytecode = pipe(bytecode, *link_fns)
return linked_bytecode | def function[apply_all_link_refs, parameter[bytecode, link_refs, attr_dict]]:
constant[
Applies all link references corresponding to a valid attr_dict to the bytecode.
]
if compare[name[link_refs] is constant[None]] begin[:]
return[name[bytecode]]
variable[link_fns] assign[=] <ast.GeneratorExp object at 0x7da20e960730>
variable[linked_bytecode] assign[=] call[name[pipe], parameter[name[bytecode], <ast.Starred object at 0x7da20e963640>]]
return[name[linked_bytecode]] | keyword[def] identifier[apply_all_link_refs] (
identifier[bytecode] : identifier[bytes] , identifier[link_refs] : identifier[List] [ identifier[Dict] [ identifier[str] , identifier[Any] ]], identifier[attr_dict] : identifier[Dict] [ identifier[str] , identifier[str] ]
)-> identifier[bytes] :
literal[string]
keyword[if] identifier[link_refs] keyword[is] keyword[None] :
keyword[return] identifier[bytecode]
identifier[link_fns] =(
identifier[apply_link_ref] ( identifier[offset] , identifier[ref] [ literal[string] ], identifier[attr_dict] [ identifier[ref] [ literal[string] ]])
keyword[for] identifier[ref] keyword[in] identifier[link_refs]
keyword[for] identifier[offset] keyword[in] identifier[ref] [ literal[string] ]
)
identifier[linked_bytecode] = identifier[pipe] ( identifier[bytecode] ,* identifier[link_fns] )
keyword[return] identifier[linked_bytecode] | def apply_all_link_refs(bytecode: bytes, link_refs: List[Dict[str, Any]], attr_dict: Dict[str, str]) -> bytes:
"""
Applies all link references corresponding to a valid attr_dict to the bytecode.
"""
if link_refs is None:
return bytecode # depends on [control=['if'], data=[]]
link_fns = (apply_link_ref(offset, ref['length'], attr_dict[ref['name']]) for ref in link_refs for offset in ref['offsets'])
linked_bytecode = pipe(bytecode, *link_fns)
return linked_bytecode |
def preprocess(string):
"""
Preprocess string to transform all diacritics and remove other special characters than appropriate
:param string:
:return:
"""
string = unicode(string, encoding="utf-8")
# convert diacritics to simpler forms
string = regex1.sub(lambda x: accents[x.group()], string)
# remove all rest of the unwanted characters
return regex2.sub('', string).encode('utf-8') | def function[preprocess, parameter[string]]:
constant[
Preprocess string to transform all diacritics and remove other special characters than appropriate
:param string:
:return:
]
variable[string] assign[=] call[name[unicode], parameter[name[string]]]
variable[string] assign[=] call[name[regex1].sub, parameter[<ast.Lambda object at 0x7da1b26af910>, name[string]]]
return[call[call[name[regex2].sub, parameter[constant[], name[string]]].encode, parameter[constant[utf-8]]]] | keyword[def] identifier[preprocess] ( identifier[string] ):
literal[string]
identifier[string] = identifier[unicode] ( identifier[string] , identifier[encoding] = literal[string] )
identifier[string] = identifier[regex1] . identifier[sub] ( keyword[lambda] identifier[x] : identifier[accents] [ identifier[x] . identifier[group] ()], identifier[string] )
keyword[return] identifier[regex2] . identifier[sub] ( literal[string] , identifier[string] ). identifier[encode] ( literal[string] ) | def preprocess(string):
"""
Preprocess string to transform all diacritics and remove other special characters than appropriate
:param string:
:return:
"""
string = unicode(string, encoding='utf-8')
# convert diacritics to simpler forms
string = regex1.sub(lambda x: accents[x.group()], string)
# remove all rest of the unwanted characters
return regex2.sub('', string).encode('utf-8') |
def get_shape(cursor, name):
"""Return the shape of the table ``name``."""
cursor.execute('select * from [%s]' % name)
inds = cursor.description[-1][0][1:].split('_')
return tuple([int(i) for i in inds]) | def function[get_shape, parameter[cursor, name]]:
constant[Return the shape of the table ``name``.]
call[name[cursor].execute, parameter[binary_operation[constant[select * from [%s]] <ast.Mod object at 0x7da2590d6920> name[name]]]]
variable[inds] assign[=] call[call[call[call[name[cursor].description][<ast.UnaryOp object at 0x7da2041db7c0>]][constant[0]]][<ast.Slice object at 0x7da2041da5f0>].split, parameter[constant[_]]]
return[call[name[tuple], parameter[<ast.ListComp object at 0x7da2041dace0>]]] | keyword[def] identifier[get_shape] ( identifier[cursor] , identifier[name] ):
literal[string]
identifier[cursor] . identifier[execute] ( literal[string] % identifier[name] )
identifier[inds] = identifier[cursor] . identifier[description] [- literal[int] ][ literal[int] ][ literal[int] :]. identifier[split] ( literal[string] )
keyword[return] identifier[tuple] ([ identifier[int] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[inds] ]) | def get_shape(cursor, name):
"""Return the shape of the table ``name``."""
cursor.execute('select * from [%s]' % name)
inds = cursor.description[-1][0][1:].split('_')
return tuple([int(i) for i in inds]) |
def tplot_restore(filename):
"""
This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot')
"""
#Error check
if not (os.path.isfile(filename)):
print("Not a valid file name")
return
#Check if the restored file was an IDL file
if filename.endswith('.tplot'):
temp_tplot = readsav(filename)
for i in range(len(temp_tplot['dq'])):
data_name = temp_tplot['dq'][i][0].decode("utf-8")
temp_x_data = temp_tplot['dq'][i][1][0][0]
#Pandas reads in data the other way I guess
if len(temp_tplot['dq'][i][1][0][2].shape) == 2:
temp_y_data = np.transpose(temp_tplot['dq'][i][1][0][2])
else:
temp_y_data = temp_tplot['dq'][i][1][0][2]
#If there are more than 4 fields, that means it is a spectrogram
if len(temp_tplot['dq'][i][1][0]) > 4:
temp_v_data = temp_tplot['dq'][i][1][0][4]
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
if (temp_v_data.dtype.byteorder == '>'):
temp_v_data = temp_v_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data, 'v':temp_v_data})
else:
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data})
if temp_tplot['dq'][i][3].dtype.names is not None:
for option_name in temp_tplot['dq'][i][3].dtype.names:
options(data_name, option_name, temp_tplot['dq'][i][3][option_name][0])
data_quants[data_name].trange = temp_tplot['dq'][i][4].tolist()
data_quants[data_name].dtype = temp_tplot['dq'][i][5]
data_quants[data_name].create_time = temp_tplot['dq'][i][6]
for option_name in temp_tplot['tv'][0][0].dtype.names:
if option_name == 'TRANGE':
tplot_options('x_range', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'WSIZE':
tplot_options('wsize', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'VAR_LABEL':
tplot_options('var_label', temp_tplot['tv'][0][0][option_name][0])
if 'P' in temp_tplot['tv'][0][1].tolist():
for option_name in temp_tplot['tv'][0][1]['P'][0].dtype.names:
if option_name == 'TITLE':
tplot_options('title', temp_tplot['tv'][0][1]['P'][0][option_name][0])
#temp_tplot['tv'][0][1] is all of the "settings" variables
#temp_tplot['tv'][0][1]['D'][0] is "device" options
#temp_tplot['tv'][0][1]['P'][0] is "plot" options
#temp_tplot['tv'][0][1]['X'][0] is x axis options
#temp_tplot['tv'][0][1]['Y'][0] is y axis options
####################################################################
else:
temp = pickle.load(open(filename,"rb"))
num_data_quants = temp[0]
for i in range(0, num_data_quants):
data_quants[temp[i+1].name] = temp[i+1]
tplot_opt_glob = temp[num_data_quants+1]
return | def function[tplot_restore, parameter[filename]]:
constant[
This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot')
]
if <ast.UnaryOp object at 0x7da1b05bc1c0> begin[:]
call[name[print], parameter[constant[Not a valid file name]]]
return[None]
if call[name[filename].endswith, parameter[constant[.tplot]]] begin[:]
variable[temp_tplot] assign[=] call[name[readsav], parameter[name[filename]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[temp_tplot]][constant[dq]]]]]]] begin[:]
variable[data_name] assign[=] call[call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[0]].decode, parameter[constant[utf-8]]]
variable[temp_x_data] assign[=] call[call[call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[1]]][constant[0]]][constant[0]]
if compare[call[name[len], parameter[call[call[call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[1]]][constant[0]]][constant[2]].shape]] equal[==] constant[2]] begin[:]
variable[temp_y_data] assign[=] call[name[np].transpose, parameter[call[call[call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[1]]][constant[0]]][constant[2]]]]
if compare[call[name[len], parameter[call[call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[1]]][constant[0]]]] greater[>] constant[4]] begin[:]
variable[temp_v_data] assign[=] call[call[call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[1]]][constant[0]]][constant[4]]
if compare[name[temp_x_data].dtype.byteorder equal[==] constant[>]] begin[:]
variable[temp_x_data] assign[=] call[call[name[temp_x_data].byteswap, parameter[]].newbyteorder, parameter[]]
if compare[name[temp_y_data].dtype.byteorder equal[==] constant[>]] begin[:]
variable[temp_y_data] assign[=] call[call[name[temp_y_data].byteswap, parameter[]].newbyteorder, parameter[]]
if compare[name[temp_v_data].dtype.byteorder equal[==] constant[>]] begin[:]
variable[temp_v_data] assign[=] call[call[name[temp_v_data].byteswap, parameter[]].newbyteorder, parameter[]]
call[name[store_data], parameter[name[data_name]]]
if compare[call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[3]].dtype.names is_not constant[None]] begin[:]
for taget[name[option_name]] in starred[call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[3]].dtype.names] begin[:]
call[name[options], parameter[name[data_name], name[option_name], call[call[call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[3]]][name[option_name]]][constant[0]]]]
call[name[data_quants]][name[data_name]].trange assign[=] call[call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[4]].tolist, parameter[]]
call[name[data_quants]][name[data_name]].dtype assign[=] call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[5]]
call[name[data_quants]][name[data_name]].create_time assign[=] call[call[call[name[temp_tplot]][constant[dq]]][name[i]]][constant[6]]
for taget[name[option_name]] in starred[call[call[call[name[temp_tplot]][constant[tv]]][constant[0]]][constant[0]].dtype.names] begin[:]
if compare[name[option_name] equal[==] constant[TRANGE]] begin[:]
call[name[tplot_options], parameter[constant[x_range], call[call[call[call[call[name[temp_tplot]][constant[tv]]][constant[0]]][constant[0]]][name[option_name]]][constant[0]]]]
if compare[name[option_name] equal[==] constant[WSIZE]] begin[:]
call[name[tplot_options], parameter[constant[wsize], call[call[call[call[call[name[temp_tplot]][constant[tv]]][constant[0]]][constant[0]]][name[option_name]]][constant[0]]]]
if compare[name[option_name] equal[==] constant[VAR_LABEL]] begin[:]
call[name[tplot_options], parameter[constant[var_label], call[call[call[call[call[name[temp_tplot]][constant[tv]]][constant[0]]][constant[0]]][name[option_name]]][constant[0]]]]
if compare[constant[P] in call[call[call[call[name[temp_tplot]][constant[tv]]][constant[0]]][constant[1]].tolist, parameter[]]] begin[:]
for taget[name[option_name]] in starred[call[call[call[call[call[name[temp_tplot]][constant[tv]]][constant[0]]][constant[1]]][constant[P]]][constant[0]].dtype.names] begin[:]
if compare[name[option_name] equal[==] constant[TITLE]] begin[:]
call[name[tplot_options], parameter[constant[title], call[call[call[call[call[call[call[name[temp_tplot]][constant[tv]]][constant[0]]][constant[1]]][constant[P]]][constant[0]]][name[option_name]]][constant[0]]]]
return[None] | keyword[def] identifier[tplot_restore] ( identifier[filename] ):
literal[string]
keyword[if] keyword[not] ( identifier[os] . identifier[path] . identifier[isfile] ( identifier[filename] )):
identifier[print] ( literal[string] )
keyword[return]
keyword[if] identifier[filename] . identifier[endswith] ( literal[string] ):
identifier[temp_tplot] = identifier[readsav] ( identifier[filename] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[temp_tplot] [ literal[string] ])):
identifier[data_name] = identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ]. identifier[decode] ( literal[string] )
identifier[temp_x_data] = identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ][ literal[int] ][ literal[int] ]
keyword[if] identifier[len] ( identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ][ literal[int] ][ literal[int] ]. identifier[shape] )== literal[int] :
identifier[temp_y_data] = identifier[np] . identifier[transpose] ( identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ][ literal[int] ][ literal[int] ])
keyword[else] :
identifier[temp_y_data] = identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ][ literal[int] ][ literal[int] ]
keyword[if] identifier[len] ( identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ][ literal[int] ])> literal[int] :
identifier[temp_v_data] = identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ][ literal[int] ][ literal[int] ]
keyword[if] ( identifier[temp_x_data] . identifier[dtype] . identifier[byteorder] == literal[string] ):
identifier[temp_x_data] = identifier[temp_x_data] . identifier[byteswap] (). identifier[newbyteorder] ()
keyword[if] ( identifier[temp_y_data] . identifier[dtype] . identifier[byteorder] == literal[string] ):
identifier[temp_y_data] = identifier[temp_y_data] . identifier[byteswap] (). identifier[newbyteorder] ()
keyword[if] ( identifier[temp_v_data] . identifier[dtype] . identifier[byteorder] == literal[string] ):
identifier[temp_v_data] = identifier[temp_v_data] . identifier[byteswap] (). identifier[newbyteorder] ()
identifier[store_data] ( identifier[data_name] , identifier[data] ={ literal[string] : identifier[temp_x_data] , literal[string] : identifier[temp_y_data] , literal[string] : identifier[temp_v_data] })
keyword[else] :
keyword[if] ( identifier[temp_x_data] . identifier[dtype] . identifier[byteorder] == literal[string] ):
identifier[temp_x_data] = identifier[temp_x_data] . identifier[byteswap] (). identifier[newbyteorder] ()
keyword[if] ( identifier[temp_y_data] . identifier[dtype] . identifier[byteorder] == literal[string] ):
identifier[temp_y_data] = identifier[temp_y_data] . identifier[byteswap] (). identifier[newbyteorder] ()
identifier[store_data] ( identifier[data_name] , identifier[data] ={ literal[string] : identifier[temp_x_data] , literal[string] : identifier[temp_y_data] })
keyword[if] identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ]. identifier[dtype] . identifier[names] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[option_name] keyword[in] identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ]. identifier[dtype] . identifier[names] :
identifier[options] ( identifier[data_name] , identifier[option_name] , identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ][ identifier[option_name] ][ literal[int] ])
identifier[data_quants] [ identifier[data_name] ]. identifier[trange] = identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ]. identifier[tolist] ()
identifier[data_quants] [ identifier[data_name] ]. identifier[dtype] = identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ]
identifier[data_quants] [ identifier[data_name] ]. identifier[create_time] = identifier[temp_tplot] [ literal[string] ][ identifier[i] ][ literal[int] ]
keyword[for] identifier[option_name] keyword[in] identifier[temp_tplot] [ literal[string] ][ literal[int] ][ literal[int] ]. identifier[dtype] . identifier[names] :
keyword[if] identifier[option_name] == literal[string] :
identifier[tplot_options] ( literal[string] , identifier[temp_tplot] [ literal[string] ][ literal[int] ][ literal[int] ][ identifier[option_name] ][ literal[int] ])
keyword[if] identifier[option_name] == literal[string] :
identifier[tplot_options] ( literal[string] , identifier[temp_tplot] [ literal[string] ][ literal[int] ][ literal[int] ][ identifier[option_name] ][ literal[int] ])
keyword[if] identifier[option_name] == literal[string] :
identifier[tplot_options] ( literal[string] , identifier[temp_tplot] [ literal[string] ][ literal[int] ][ literal[int] ][ identifier[option_name] ][ literal[int] ])
keyword[if] literal[string] keyword[in] identifier[temp_tplot] [ literal[string] ][ literal[int] ][ literal[int] ]. identifier[tolist] ():
keyword[for] identifier[option_name] keyword[in] identifier[temp_tplot] [ literal[string] ][ literal[int] ][ literal[int] ][ literal[string] ][ literal[int] ]. identifier[dtype] . identifier[names] :
keyword[if] identifier[option_name] == literal[string] :
identifier[tplot_options] ( literal[string] , identifier[temp_tplot] [ literal[string] ][ literal[int] ][ literal[int] ][ literal[string] ][ literal[int] ][ identifier[option_name] ][ literal[int] ])
keyword[else] :
identifier[temp] = identifier[pickle] . identifier[load] ( identifier[open] ( identifier[filename] , literal[string] ))
identifier[num_data_quants] = identifier[temp] [ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[num_data_quants] ):
identifier[data_quants] [ identifier[temp] [ identifier[i] + literal[int] ]. identifier[name] ]= identifier[temp] [ identifier[i] + literal[int] ]
identifier[tplot_opt_glob] = identifier[temp] [ identifier[num_data_quants] + literal[int] ]
keyword[return] | def tplot_restore(filename):
"""
This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot')
"""
#Error check
if not os.path.isfile(filename):
print('Not a valid file name')
return # depends on [control=['if'], data=[]]
#Check if the restored file was an IDL file
if filename.endswith('.tplot'):
temp_tplot = readsav(filename)
for i in range(len(temp_tplot['dq'])):
data_name = temp_tplot['dq'][i][0].decode('utf-8')
temp_x_data = temp_tplot['dq'][i][1][0][0]
#Pandas reads in data the other way I guess
if len(temp_tplot['dq'][i][1][0][2].shape) == 2:
temp_y_data = np.transpose(temp_tplot['dq'][i][1][0][2]) # depends on [control=['if'], data=[]]
else:
temp_y_data = temp_tplot['dq'][i][1][0][2] #If there are more than 4 fields, that means it is a spectrogram
if len(temp_tplot['dq'][i][1][0]) > 4:
temp_v_data = temp_tplot['dq'][i][1][0][4]
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if temp_x_data.dtype.byteorder == '>':
temp_x_data = temp_x_data.byteswap().newbyteorder() # depends on [control=['if'], data=[]]
if temp_y_data.dtype.byteorder == '>':
temp_y_data = temp_y_data.byteswap().newbyteorder() # depends on [control=['if'], data=[]]
if temp_v_data.dtype.byteorder == '>':
temp_v_data = temp_v_data.byteswap().newbyteorder() # depends on [control=['if'], data=[]]
store_data(data_name, data={'x': temp_x_data, 'y': temp_y_data, 'v': temp_v_data}) # depends on [control=['if'], data=[]]
else:
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if temp_x_data.dtype.byteorder == '>':
temp_x_data = temp_x_data.byteswap().newbyteorder() # depends on [control=['if'], data=[]]
if temp_y_data.dtype.byteorder == '>':
temp_y_data = temp_y_data.byteswap().newbyteorder() # depends on [control=['if'], data=[]]
store_data(data_name, data={'x': temp_x_data, 'y': temp_y_data})
if temp_tplot['dq'][i][3].dtype.names is not None:
for option_name in temp_tplot['dq'][i][3].dtype.names:
options(data_name, option_name, temp_tplot['dq'][i][3][option_name][0]) # depends on [control=['for'], data=['option_name']] # depends on [control=['if'], data=[]]
data_quants[data_name].trange = temp_tplot['dq'][i][4].tolist()
data_quants[data_name].dtype = temp_tplot['dq'][i][5]
data_quants[data_name].create_time = temp_tplot['dq'][i][6]
for option_name in temp_tplot['tv'][0][0].dtype.names:
if option_name == 'TRANGE':
tplot_options('x_range', temp_tplot['tv'][0][0][option_name][0]) # depends on [control=['if'], data=['option_name']]
if option_name == 'WSIZE':
tplot_options('wsize', temp_tplot['tv'][0][0][option_name][0]) # depends on [control=['if'], data=['option_name']]
if option_name == 'VAR_LABEL':
tplot_options('var_label', temp_tplot['tv'][0][0][option_name][0]) # depends on [control=['if'], data=['option_name']] # depends on [control=['for'], data=['option_name']]
if 'P' in temp_tplot['tv'][0][1].tolist():
for option_name in temp_tplot['tv'][0][1]['P'][0].dtype.names:
if option_name == 'TITLE':
tplot_options('title', temp_tplot['tv'][0][1]['P'][0][option_name][0]) # depends on [control=['if'], data=['option_name']] # depends on [control=['for'], data=['option_name']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
#temp_tplot['tv'][0][1] is all of the "settings" variables
#temp_tplot['tv'][0][1]['D'][0] is "device" options
#temp_tplot['tv'][0][1]['P'][0] is "plot" options
#temp_tplot['tv'][0][1]['X'][0] is x axis options
#temp_tplot['tv'][0][1]['Y'][0] is y axis options
####################################################################
temp = pickle.load(open(filename, 'rb'))
num_data_quants = temp[0]
for i in range(0, num_data_quants):
data_quants[temp[i + 1].name] = temp[i + 1] # depends on [control=['for'], data=['i']]
tplot_opt_glob = temp[num_data_quants + 1]
return |
def get_song_type(self, cache=True):
"""Get the types of a song.
Args:
cache (boolean): A boolean indicating whether or not the cached value should be used
(if available). Defaults to True.
Returns:
A list of strings, each representing a song type: 'christmas', for example.
Example:
>>> s = song.Song('SOQKVPH12A58A7AF4D')
>>> s.song_type
[u'christmas']
>>>
"""
if not (cache and ('song_type' in self.cache)):
response = self.get_attribute('profile', bucket='song_type')
if response['songs'][0].has_key('song_type'):
self.cache['song_type'] = response['songs'][0]['song_type']
else:
self.cache['song_type'] = []
return self.cache['song_type'] | def function[get_song_type, parameter[self, cache]]:
constant[Get the types of a song.
Args:
cache (boolean): A boolean indicating whether or not the cached value should be used
(if available). Defaults to True.
Returns:
A list of strings, each representing a song type: 'christmas', for example.
Example:
>>> s = song.Song('SOQKVPH12A58A7AF4D')
>>> s.song_type
[u'christmas']
>>>
]
if <ast.UnaryOp object at 0x7da1b04a7a00> begin[:]
variable[response] assign[=] call[name[self].get_attribute, parameter[constant[profile]]]
if call[call[call[name[response]][constant[songs]]][constant[0]].has_key, parameter[constant[song_type]]] begin[:]
call[name[self].cache][constant[song_type]] assign[=] call[call[call[name[response]][constant[songs]]][constant[0]]][constant[song_type]]
return[call[name[self].cache][constant[song_type]]] | keyword[def] identifier[get_song_type] ( identifier[self] , identifier[cache] = keyword[True] ):
literal[string]
keyword[if] keyword[not] ( identifier[cache] keyword[and] ( literal[string] keyword[in] identifier[self] . identifier[cache] )):
identifier[response] = identifier[self] . identifier[get_attribute] ( literal[string] , identifier[bucket] = literal[string] )
keyword[if] identifier[response] [ literal[string] ][ literal[int] ]. identifier[has_key] ( literal[string] ):
identifier[self] . identifier[cache] [ literal[string] ]= identifier[response] [ literal[string] ][ literal[int] ][ literal[string] ]
keyword[else] :
identifier[self] . identifier[cache] [ literal[string] ]=[]
keyword[return] identifier[self] . identifier[cache] [ literal[string] ] | def get_song_type(self, cache=True):
"""Get the types of a song.
Args:
cache (boolean): A boolean indicating whether or not the cached value should be used
(if available). Defaults to True.
Returns:
A list of strings, each representing a song type: 'christmas', for example.
Example:
>>> s = song.Song('SOQKVPH12A58A7AF4D')
>>> s.song_type
[u'christmas']
>>>
"""
if not (cache and 'song_type' in self.cache):
response = self.get_attribute('profile', bucket='song_type')
if response['songs'][0].has_key('song_type'):
self.cache['song_type'] = response['songs'][0]['song_type'] # depends on [control=['if'], data=[]]
else:
self.cache['song_type'] = [] # depends on [control=['if'], data=[]]
return self.cache['song_type'] |
def is_closed_chunk(self, chk):
"""Check the chunk is free or not"""
cs = self.get_chunk_status(chk)
if cs & 0x2 != 0:
return True
return False | def function[is_closed_chunk, parameter[self, chk]]:
constant[Check the chunk is free or not]
variable[cs] assign[=] call[name[self].get_chunk_status, parameter[name[chk]]]
if compare[binary_operation[name[cs] <ast.BitAnd object at 0x7da2590d6b60> constant[2]] not_equal[!=] constant[0]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_closed_chunk] ( identifier[self] , identifier[chk] ):
literal[string]
identifier[cs] = identifier[self] . identifier[get_chunk_status] ( identifier[chk] )
keyword[if] identifier[cs] & literal[int] != literal[int] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_closed_chunk(self, chk):
"""Check the chunk is free or not"""
cs = self.get_chunk_status(chk)
if cs & 2 != 0:
return True # depends on [control=['if'], data=[]]
return False |
def fetch_stackexchange(
dataset,
test_set_fraction=0.2,
min_training_interactions=1,
data_home=None,
indicator_features=True,
tag_features=False,
download_if_missing=True,
):
"""
Fetch a dataset from the `StackExchange network <http://stackexchange.com/>`_.
The datasets contain users answering questions: an interaction is defined as a user
answering a given question.
The following datasets from the StackExchange network are available:
- CrossValidated: From stats.stackexchange.com. Approximately 9000 users, 72000 questions,
and 70000 answers.
- StackOverflow: From stackoverflow.stackexchange.com. Approximately 1.3M users, 11M questions,
and 18M answers.
Parameters
----------
dataset: string, one of ('crossvalidated', 'stackoverflow')
The part of the StackExchange network for which to fetch the dataset.
test_set_fraction: float, optional
The fraction of the dataset used for testing. Splitting into the train and test set is done
in a time-based fashion: all interactions before a certain time are in the train set and
all interactions after that time are in the test set.
min_training_interactions: int, optional
Only include users with this amount of interactions in the training set.
data_home: path, optional
Path to the directory in which the downloaded data should be placed.
Defaults to ``~/lightfm_data/``.
indicator_features: bool, optional
Use an [n_users, n_users] identity matrix for item features. When True with genre_features,
indicator and genre features are concatenated into a single feature matrix of shape
[n_users, n_users + n_genres].
download_if_missing: bool, optional
Download the data if not present. Raises an IOError if False and data is missing.
Notes
-----
The return value is a dictionary containing the following keys:
Returns
-------
train: sp.coo_matrix of shape [n_users, n_items]
Contains training set interactions.
test: sp.coo_matrix of shape [n_users, n_items]
Contains testing set interactions.
item_features: sp.csr_matrix of shape [n_items, n_item_features]
Contains item features.
item_feature_labels: np.array of strings of shape [n_item_features,]
Labels of item features.
"""
if not (indicator_features or tag_features):
raise ValueError(
"At least one of item_indicator_features " "or tag_features must be True"
)
if dataset not in ("crossvalidated", "stackoverflow"):
raise ValueError("Unknown dataset")
if not (0.0 < test_set_fraction < 1.0):
raise ValueError("Test set fraction must be between 0 and 1")
urls = {
"crossvalidated": (
"https://github.com/maciejkula/lightfm_datasets/releases/"
"download/v0.1.0/stackexchange_crossvalidated.npz"
),
"stackoverflow": (
"https://github.com/maciejkula/lightfm_datasets/releases/"
"download/v0.1.0/stackexchange_stackoverflow.npz"
),
}
path = _common.get_data(
data_home,
urls[dataset],
os.path.join("stackexchange", dataset),
"data.npz",
download_if_missing,
)
data = np.load(path)
interactions = sp.coo_matrix(
(
data["interactions_data"],
(data["interactions_row"], data["interactions_col"]),
),
shape=data["interactions_shape"].flatten(),
)
interactions.sum_duplicates()
tag_features_mat = sp.coo_matrix(
(data["features_data"], (data["features_row"], data["features_col"])),
shape=data["features_shape"].flatten(),
)
tag_labels = data["labels"]
test_cutoff_index = int(len(interactions.data) * (1.0 - test_set_fraction))
test_cutoff_timestamp = np.sort(interactions.data)[test_cutoff_index]
in_train = interactions.data < test_cutoff_timestamp
in_test = np.logical_not(in_train)
train = sp.coo_matrix(
(
np.ones(in_train.sum(), dtype=np.float32),
(interactions.row[in_train], interactions.col[in_train]),
),
shape=interactions.shape,
)
test = sp.coo_matrix(
(
np.ones(in_test.sum(), dtype=np.float32),
(interactions.row[in_test], interactions.col[in_test]),
),
shape=interactions.shape,
)
if min_training_interactions > 0:
include = np.squeeze(np.array(train.getnnz(axis=1))) > min_training_interactions
train = train.tocsr()[include].tocoo()
test = test.tocsr()[include].tocoo()
if indicator_features and not tag_features:
features = sp.identity(train.shape[1], format="csr", dtype=np.float32)
labels = np.array(["question_id:{}".format(x) for x in range(train.shape[1])])
elif not indicator_features and tag_features:
features = tag_features_mat.tocsr()
labels = tag_labels
else:
id_features = sp.identity(train.shape[1], format="csr", dtype=np.float32)
features = sp.hstack([id_features, tag_features_mat]).tocsr()
labels = np.concatenate(
[
np.array(["question_id:{}".format(x) for x in range(train.shape[1])]),
tag_labels,
]
)
return {
"train": train,
"test": test,
"item_features": features,
"item_feature_labels": labels,
} | def function[fetch_stackexchange, parameter[dataset, test_set_fraction, min_training_interactions, data_home, indicator_features, tag_features, download_if_missing]]:
constant[
Fetch a dataset from the `StackExchange network <http://stackexchange.com/>`_.
The datasets contain users answering questions: an interaction is defined as a user
answering a given question.
The following datasets from the StackExchange network are available:
- CrossValidated: From stats.stackexchange.com. Approximately 9000 users, 72000 questions,
and 70000 answers.
- StackOverflow: From stackoverflow.stackexchange.com. Approximately 1.3M users, 11M questions,
and 18M answers.
Parameters
----------
dataset: string, one of ('crossvalidated', 'stackoverflow')
The part of the StackExchange network for which to fetch the dataset.
test_set_fraction: float, optional
The fraction of the dataset used for testing. Splitting into the train and test set is done
in a time-based fashion: all interactions before a certain time are in the train set and
all interactions after that time are in the test set.
min_training_interactions: int, optional
Only include users with this amount of interactions in the training set.
data_home: path, optional
Path to the directory in which the downloaded data should be placed.
Defaults to ``~/lightfm_data/``.
indicator_features: bool, optional
Use an [n_users, n_users] identity matrix for item features. When True with genre_features,
indicator and genre features are concatenated into a single feature matrix of shape
[n_users, n_users + n_genres].
download_if_missing: bool, optional
Download the data if not present. Raises an IOError if False and data is missing.
Notes
-----
The return value is a dictionary containing the following keys:
Returns
-------
train: sp.coo_matrix of shape [n_users, n_items]
Contains training set interactions.
test: sp.coo_matrix of shape [n_users, n_items]
Contains testing set interactions.
item_features: sp.csr_matrix of shape [n_items, n_item_features]
Contains item features.
item_feature_labels: np.array of strings of shape [n_item_features,]
Labels of item features.
]
if <ast.UnaryOp object at 0x7da1b1894f40> begin[:]
<ast.Raise object at 0x7da1b1896c50>
if compare[name[dataset] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b17379a0>, <ast.Constant object at 0x7da1b1737970>]]] begin[:]
<ast.Raise object at 0x7da1b1735270>
if <ast.UnaryOp object at 0x7da1b1735a50> begin[:]
<ast.Raise object at 0x7da1b17353f0>
variable[urls] assign[=] dictionary[[<ast.Constant object at 0x7da1b1735ff0>, <ast.Constant object at 0x7da1b17342b0>], [<ast.Constant object at 0x7da1b1735fc0>, <ast.Constant object at 0x7da1b17360b0>]]
variable[path] assign[=] call[name[_common].get_data, parameter[name[data_home], call[name[urls]][name[dataset]], call[name[os].path.join, parameter[constant[stackexchange], name[dataset]]], constant[data.npz], name[download_if_missing]]]
variable[data] assign[=] call[name[np].load, parameter[name[path]]]
variable[interactions] assign[=] call[name[sp].coo_matrix, parameter[tuple[[<ast.Subscript object at 0x7da1b1736200>, <ast.Tuple object at 0x7da1b1735900>]]]]
call[name[interactions].sum_duplicates, parameter[]]
variable[tag_features_mat] assign[=] call[name[sp].coo_matrix, parameter[tuple[[<ast.Subscript object at 0x7da1b1735e10>, <ast.Tuple object at 0x7da1b1734430>]]]]
variable[tag_labels] assign[=] call[name[data]][constant[labels]]
variable[test_cutoff_index] assign[=] call[name[int], parameter[binary_operation[call[name[len], parameter[name[interactions].data]] * binary_operation[constant[1.0] - name[test_set_fraction]]]]]
variable[test_cutoff_timestamp] assign[=] call[call[name[np].sort, parameter[name[interactions].data]]][name[test_cutoff_index]]
variable[in_train] assign[=] compare[name[interactions].data less[<] name[test_cutoff_timestamp]]
variable[in_test] assign[=] call[name[np].logical_not, parameter[name[in_train]]]
variable[train] assign[=] call[name[sp].coo_matrix, parameter[tuple[[<ast.Call object at 0x7da1b1737b80>, <ast.Tuple object at 0x7da1b17374c0>]]]]
variable[test] assign[=] call[name[sp].coo_matrix, parameter[tuple[[<ast.Call object at 0x7da1b17376d0>, <ast.Tuple object at 0x7da1b1736650>]]]]
if compare[name[min_training_interactions] greater[>] constant[0]] begin[:]
variable[include] assign[=] compare[call[name[np].squeeze, parameter[call[name[np].array, parameter[call[name[train].getnnz, parameter[]]]]]] greater[>] name[min_training_interactions]]
variable[train] assign[=] call[call[call[name[train].tocsr, parameter[]]][name[include]].tocoo, parameter[]]
variable[test] assign[=] call[call[call[name[test].tocsr, parameter[]]][name[include]].tocoo, parameter[]]
if <ast.BoolOp object at 0x7da1b17367a0> begin[:]
variable[features] assign[=] call[name[sp].identity, parameter[call[name[train].shape][constant[1]]]]
variable[labels] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b1736620>]]
return[dictionary[[<ast.Constant object at 0x7da1b17ba2c0>, <ast.Constant object at 0x7da1b17b9bd0>, <ast.Constant object at 0x7da1b17b9c00>, <ast.Constant object at 0x7da1b17b8ac0>], [<ast.Name object at 0x7da1b17b9d20>, <ast.Name object at 0x7da1b17bb3d0>, <ast.Name object at 0x7da1b17b8610>, <ast.Name object at 0x7da1b17b92a0>]]] | keyword[def] identifier[fetch_stackexchange] (
identifier[dataset] ,
identifier[test_set_fraction] = literal[int] ,
identifier[min_training_interactions] = literal[int] ,
identifier[data_home] = keyword[None] ,
identifier[indicator_features] = keyword[True] ,
identifier[tag_features] = keyword[False] ,
identifier[download_if_missing] = keyword[True] ,
):
literal[string]
keyword[if] keyword[not] ( identifier[indicator_features] keyword[or] identifier[tag_features] ):
keyword[raise] identifier[ValueError] (
literal[string] literal[string]
)
keyword[if] identifier[dataset] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] ( literal[int] < identifier[test_set_fraction] < literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[urls] ={
literal[string] :(
literal[string]
literal[string]
),
literal[string] :(
literal[string]
literal[string]
),
}
identifier[path] = identifier[_common] . identifier[get_data] (
identifier[data_home] ,
identifier[urls] [ identifier[dataset] ],
identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[dataset] ),
literal[string] ,
identifier[download_if_missing] ,
)
identifier[data] = identifier[np] . identifier[load] ( identifier[path] )
identifier[interactions] = identifier[sp] . identifier[coo_matrix] (
(
identifier[data] [ literal[string] ],
( identifier[data] [ literal[string] ], identifier[data] [ literal[string] ]),
),
identifier[shape] = identifier[data] [ literal[string] ]. identifier[flatten] (),
)
identifier[interactions] . identifier[sum_duplicates] ()
identifier[tag_features_mat] = identifier[sp] . identifier[coo_matrix] (
( identifier[data] [ literal[string] ],( identifier[data] [ literal[string] ], identifier[data] [ literal[string] ])),
identifier[shape] = identifier[data] [ literal[string] ]. identifier[flatten] (),
)
identifier[tag_labels] = identifier[data] [ literal[string] ]
identifier[test_cutoff_index] = identifier[int] ( identifier[len] ( identifier[interactions] . identifier[data] )*( literal[int] - identifier[test_set_fraction] ))
identifier[test_cutoff_timestamp] = identifier[np] . identifier[sort] ( identifier[interactions] . identifier[data] )[ identifier[test_cutoff_index] ]
identifier[in_train] = identifier[interactions] . identifier[data] < identifier[test_cutoff_timestamp]
identifier[in_test] = identifier[np] . identifier[logical_not] ( identifier[in_train] )
identifier[train] = identifier[sp] . identifier[coo_matrix] (
(
identifier[np] . identifier[ones] ( identifier[in_train] . identifier[sum] (), identifier[dtype] = identifier[np] . identifier[float32] ),
( identifier[interactions] . identifier[row] [ identifier[in_train] ], identifier[interactions] . identifier[col] [ identifier[in_train] ]),
),
identifier[shape] = identifier[interactions] . identifier[shape] ,
)
identifier[test] = identifier[sp] . identifier[coo_matrix] (
(
identifier[np] . identifier[ones] ( identifier[in_test] . identifier[sum] (), identifier[dtype] = identifier[np] . identifier[float32] ),
( identifier[interactions] . identifier[row] [ identifier[in_test] ], identifier[interactions] . identifier[col] [ identifier[in_test] ]),
),
identifier[shape] = identifier[interactions] . identifier[shape] ,
)
keyword[if] identifier[min_training_interactions] > literal[int] :
identifier[include] = identifier[np] . identifier[squeeze] ( identifier[np] . identifier[array] ( identifier[train] . identifier[getnnz] ( identifier[axis] = literal[int] )))> identifier[min_training_interactions]
identifier[train] = identifier[train] . identifier[tocsr] ()[ identifier[include] ]. identifier[tocoo] ()
identifier[test] = identifier[test] . identifier[tocsr] ()[ identifier[include] ]. identifier[tocoo] ()
keyword[if] identifier[indicator_features] keyword[and] keyword[not] identifier[tag_features] :
identifier[features] = identifier[sp] . identifier[identity] ( identifier[train] . identifier[shape] [ literal[int] ], identifier[format] = literal[string] , identifier[dtype] = identifier[np] . identifier[float32] )
identifier[labels] = identifier[np] . identifier[array] ([ literal[string] . identifier[format] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[train] . identifier[shape] [ literal[int] ])])
keyword[elif] keyword[not] identifier[indicator_features] keyword[and] identifier[tag_features] :
identifier[features] = identifier[tag_features_mat] . identifier[tocsr] ()
identifier[labels] = identifier[tag_labels]
keyword[else] :
identifier[id_features] = identifier[sp] . identifier[identity] ( identifier[train] . identifier[shape] [ literal[int] ], identifier[format] = literal[string] , identifier[dtype] = identifier[np] . identifier[float32] )
identifier[features] = identifier[sp] . identifier[hstack] ([ identifier[id_features] , identifier[tag_features_mat] ]). identifier[tocsr] ()
identifier[labels] = identifier[np] . identifier[concatenate] (
[
identifier[np] . identifier[array] ([ literal[string] . identifier[format] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[train] . identifier[shape] [ literal[int] ])]),
identifier[tag_labels] ,
]
)
keyword[return] {
literal[string] : identifier[train] ,
literal[string] : identifier[test] ,
literal[string] : identifier[features] ,
literal[string] : identifier[labels] ,
} | def fetch_stackexchange(dataset, test_set_fraction=0.2, min_training_interactions=1, data_home=None, indicator_features=True, tag_features=False, download_if_missing=True):
"""
Fetch a dataset from the `StackExchange network <http://stackexchange.com/>`_.
The datasets contain users answering questions: an interaction is defined as a user
answering a given question.
The following datasets from the StackExchange network are available:
- CrossValidated: From stats.stackexchange.com. Approximately 9000 users, 72000 questions,
and 70000 answers.
- StackOverflow: From stackoverflow.stackexchange.com. Approximately 1.3M users, 11M questions,
and 18M answers.
Parameters
----------
dataset: string, one of ('crossvalidated', 'stackoverflow')
The part of the StackExchange network for which to fetch the dataset.
test_set_fraction: float, optional
The fraction of the dataset used for testing. Splitting into the train and test set is done
in a time-based fashion: all interactions before a certain time are in the train set and
all interactions after that time are in the test set.
min_training_interactions: int, optional
Only include users with this amount of interactions in the training set.
data_home: path, optional
Path to the directory in which the downloaded data should be placed.
Defaults to ``~/lightfm_data/``.
indicator_features: bool, optional
Use an [n_users, n_users] identity matrix for item features. When True with genre_features,
indicator and genre features are concatenated into a single feature matrix of shape
[n_users, n_users + n_genres].
download_if_missing: bool, optional
Download the data if not present. Raises an IOError if False and data is missing.
Notes
-----
The return value is a dictionary containing the following keys:
Returns
-------
train: sp.coo_matrix of shape [n_users, n_items]
Contains training set interactions.
test: sp.coo_matrix of shape [n_users, n_items]
Contains testing set interactions.
item_features: sp.csr_matrix of shape [n_items, n_item_features]
Contains item features.
item_feature_labels: np.array of strings of shape [n_item_features,]
Labels of item features.
"""
if not (indicator_features or tag_features):
raise ValueError('At least one of item_indicator_features or tag_features must be True') # depends on [control=['if'], data=[]]
if dataset not in ('crossvalidated', 'stackoverflow'):
raise ValueError('Unknown dataset') # depends on [control=['if'], data=[]]
if not 0.0 < test_set_fraction < 1.0:
raise ValueError('Test set fraction must be between 0 and 1') # depends on [control=['if'], data=[]]
urls = {'crossvalidated': 'https://github.com/maciejkula/lightfm_datasets/releases/download/v0.1.0/stackexchange_crossvalidated.npz', 'stackoverflow': 'https://github.com/maciejkula/lightfm_datasets/releases/download/v0.1.0/stackexchange_stackoverflow.npz'}
path = _common.get_data(data_home, urls[dataset], os.path.join('stackexchange', dataset), 'data.npz', download_if_missing)
data = np.load(path)
interactions = sp.coo_matrix((data['interactions_data'], (data['interactions_row'], data['interactions_col'])), shape=data['interactions_shape'].flatten())
interactions.sum_duplicates()
tag_features_mat = sp.coo_matrix((data['features_data'], (data['features_row'], data['features_col'])), shape=data['features_shape'].flatten())
tag_labels = data['labels']
test_cutoff_index = int(len(interactions.data) * (1.0 - test_set_fraction))
test_cutoff_timestamp = np.sort(interactions.data)[test_cutoff_index]
in_train = interactions.data < test_cutoff_timestamp
in_test = np.logical_not(in_train)
train = sp.coo_matrix((np.ones(in_train.sum(), dtype=np.float32), (interactions.row[in_train], interactions.col[in_train])), shape=interactions.shape)
test = sp.coo_matrix((np.ones(in_test.sum(), dtype=np.float32), (interactions.row[in_test], interactions.col[in_test])), shape=interactions.shape)
if min_training_interactions > 0:
include = np.squeeze(np.array(train.getnnz(axis=1))) > min_training_interactions
train = train.tocsr()[include].tocoo()
test = test.tocsr()[include].tocoo() # depends on [control=['if'], data=['min_training_interactions']]
if indicator_features and (not tag_features):
features = sp.identity(train.shape[1], format='csr', dtype=np.float32)
labels = np.array(['question_id:{}'.format(x) for x in range(train.shape[1])]) # depends on [control=['if'], data=[]]
elif not indicator_features and tag_features:
features = tag_features_mat.tocsr()
labels = tag_labels # depends on [control=['if'], data=[]]
else:
id_features = sp.identity(train.shape[1], format='csr', dtype=np.float32)
features = sp.hstack([id_features, tag_features_mat]).tocsr()
labels = np.concatenate([np.array(['question_id:{}'.format(x) for x in range(train.shape[1])]), tag_labels])
return {'train': train, 'test': test, 'item_features': features, 'item_feature_labels': labels} |
def _weave_layers_graft(
*, pdf_base, page_num, text, font, font_key, procset, rotation, strip_old_text, log
):
"""Insert the text layer from text page 0 on to pdf_base at page_num"""
log.debug("Grafting")
if Path(text).stat().st_size == 0:
return
# This is a pointer indicating a specific page in the base file
pdf_text = pikepdf.open(text)
pdf_text_contents = pdf_text.pages[0].Contents.read_bytes()
if not tesseract.has_textonly_pdf():
# If we don't have textonly_pdf, edit the stream to delete the
# instruction to draw the image Tesseract generated, which we do not
# use.
stream = bytearray(pdf_text_contents)
pattern = b'/Im1 Do'
idx = stream.find(pattern)
stream[idx : (idx + len(pattern))] = b' ' * len(pattern)
pdf_text_contents = bytes(stream)
base_page = pdf_base.pages.p(page_num)
# The text page always will be oriented up by this stage but the original
# content may have a rotation applied. Wrap the text stream with a rotation
# so it will be oriented the same way as the rest of the page content.
# (Previous versions OCRmyPDF rotated the content layer to match the text.)
mediabox = [float(pdf_text.pages[0].MediaBox[v]) for v in range(4)]
wt, ht = mediabox[2] - mediabox[0], mediabox[3] - mediabox[1]
mediabox = [float(base_page.MediaBox[v]) for v in range(4)]
wp, hp = mediabox[2] - mediabox[0], mediabox[3] - mediabox[1]
translate = pikepdf.PdfMatrix().translated(-wt / 2, -ht / 2)
untranslate = pikepdf.PdfMatrix().translated(wp / 2, hp / 2)
# -rotation because the input is a clockwise angle and this formula
# uses CCW
rotation = -rotation % 360
rotate = pikepdf.PdfMatrix().rotated(rotation)
# Because of rounding of DPI, we might get a text layer that is not
# identically sized to the target page. Scale to adjust. Normally this
# is within 0.998.
if rotation in (90, 270):
wt, ht = ht, wt
scale_x = wp / wt
scale_y = hp / ht
log.debug('%r', (scale_x, scale_y))
scale = pikepdf.PdfMatrix().scaled(scale_x, scale_y)
# Translate the text so it is centered at (0, 0), rotate it there, adjust
# for a size different between initial and text PDF, then untranslate
ctm = translate @ rotate @ scale @ untranslate
pdf_text_contents = b'q %s cm\n' % ctm.encode() + pdf_text_contents + b'\nQ\n'
new_text_layer = pikepdf.Stream(pdf_base, pdf_text_contents)
if strip_old_text:
strip_invisible_text(pdf_base, base_page, log)
base_page.page_contents_add(new_text_layer, prepend=True)
_update_page_resources(
page=base_page, font=font, font_key=font_key, procset=procset
)
pdf_text.close() | def function[_weave_layers_graft, parameter[]]:
constant[Insert the text layer from text page 0 on to pdf_base at page_num]
call[name[log].debug, parameter[constant[Grafting]]]
if compare[call[call[name[Path], parameter[name[text]]].stat, parameter[]].st_size equal[==] constant[0]] begin[:]
return[None]
variable[pdf_text] assign[=] call[name[pikepdf].open, parameter[name[text]]]
variable[pdf_text_contents] assign[=] call[call[name[pdf_text].pages][constant[0]].Contents.read_bytes, parameter[]]
if <ast.UnaryOp object at 0x7da1b1b19090> begin[:]
variable[stream] assign[=] call[name[bytearray], parameter[name[pdf_text_contents]]]
variable[pattern] assign[=] constant[b'/Im1 Do']
variable[idx] assign[=] call[name[stream].find, parameter[name[pattern]]]
call[name[stream]][<ast.Slice object at 0x7da1b1b19120>] assign[=] binary_operation[constant[b' '] * call[name[len], parameter[name[pattern]]]]
variable[pdf_text_contents] assign[=] call[name[bytes], parameter[name[stream]]]
variable[base_page] assign[=] call[name[pdf_base].pages.p, parameter[name[page_num]]]
variable[mediabox] assign[=] <ast.ListComp object at 0x7da1b1b18430>
<ast.Tuple object at 0x7da1b1b1aaa0> assign[=] tuple[[<ast.BinOp object at 0x7da1b1b180d0>, <ast.BinOp object at 0x7da1b1bebbe0>]]
variable[mediabox] assign[=] <ast.ListComp object at 0x7da1b1be9420>
<ast.Tuple object at 0x7da1b1beacb0> assign[=] tuple[[<ast.BinOp object at 0x7da1b1cd6530>, <ast.BinOp object at 0x7da1b1cd7640>]]
variable[translate] assign[=] call[call[name[pikepdf].PdfMatrix, parameter[]].translated, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b1cd5fc0> / constant[2]], binary_operation[<ast.UnaryOp object at 0x7da1b1cd7850> / constant[2]]]]
variable[untranslate] assign[=] call[call[name[pikepdf].PdfMatrix, parameter[]].translated, parameter[binary_operation[name[wp] / constant[2]], binary_operation[name[hp] / constant[2]]]]
variable[rotation] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b1cd6ec0> <ast.Mod object at 0x7da2590d6920> constant[360]]
variable[rotate] assign[=] call[call[name[pikepdf].PdfMatrix, parameter[]].rotated, parameter[name[rotation]]]
if compare[name[rotation] in tuple[[<ast.Constant object at 0x7da1b1cd5060>, <ast.Constant object at 0x7da1b1cd5bd0>]]] begin[:]
<ast.Tuple object at 0x7da1b1cd7220> assign[=] tuple[[<ast.Name object at 0x7da1b1cd7580>, <ast.Name object at 0x7da1b1cd5c00>]]
variable[scale_x] assign[=] binary_operation[name[wp] / name[wt]]
variable[scale_y] assign[=] binary_operation[name[hp] / name[ht]]
call[name[log].debug, parameter[constant[%r], tuple[[<ast.Name object at 0x7da1b1cd4f70>, <ast.Name object at 0x7da1b1cd5cc0>]]]]
variable[scale] assign[=] call[call[name[pikepdf].PdfMatrix, parameter[]].scaled, parameter[name[scale_x], name[scale_y]]]
variable[ctm] assign[=] binary_operation[binary_operation[binary_operation[name[translate] <ast.MatMult object at 0x7da2590d6860> name[rotate]] <ast.MatMult object at 0x7da2590d6860> name[scale]] <ast.MatMult object at 0x7da2590d6860> name[untranslate]]
variable[pdf_text_contents] assign[=] binary_operation[binary_operation[binary_operation[constant[b'q %s cm\n'] <ast.Mod object at 0x7da2590d6920> call[name[ctm].encode, parameter[]]] + name[pdf_text_contents]] + constant[b'\nQ\n']]
variable[new_text_layer] assign[=] call[name[pikepdf].Stream, parameter[name[pdf_base], name[pdf_text_contents]]]
if name[strip_old_text] begin[:]
call[name[strip_invisible_text], parameter[name[pdf_base], name[base_page], name[log]]]
call[name[base_page].page_contents_add, parameter[name[new_text_layer]]]
call[name[_update_page_resources], parameter[]]
call[name[pdf_text].close, parameter[]] | keyword[def] identifier[_weave_layers_graft] (
*, identifier[pdf_base] , identifier[page_num] , identifier[text] , identifier[font] , identifier[font_key] , identifier[procset] , identifier[rotation] , identifier[strip_old_text] , identifier[log]
):
literal[string]
identifier[log] . identifier[debug] ( literal[string] )
keyword[if] identifier[Path] ( identifier[text] ). identifier[stat] (). identifier[st_size] == literal[int] :
keyword[return]
identifier[pdf_text] = identifier[pikepdf] . identifier[open] ( identifier[text] )
identifier[pdf_text_contents] = identifier[pdf_text] . identifier[pages] [ literal[int] ]. identifier[Contents] . identifier[read_bytes] ()
keyword[if] keyword[not] identifier[tesseract] . identifier[has_textonly_pdf] ():
identifier[stream] = identifier[bytearray] ( identifier[pdf_text_contents] )
identifier[pattern] = literal[string]
identifier[idx] = identifier[stream] . identifier[find] ( identifier[pattern] )
identifier[stream] [ identifier[idx] :( identifier[idx] + identifier[len] ( identifier[pattern] ))]= literal[string] * identifier[len] ( identifier[pattern] )
identifier[pdf_text_contents] = identifier[bytes] ( identifier[stream] )
identifier[base_page] = identifier[pdf_base] . identifier[pages] . identifier[p] ( identifier[page_num] )
identifier[mediabox] =[ identifier[float] ( identifier[pdf_text] . identifier[pages] [ literal[int] ]. identifier[MediaBox] [ identifier[v] ]) keyword[for] identifier[v] keyword[in] identifier[range] ( literal[int] )]
identifier[wt] , identifier[ht] = identifier[mediabox] [ literal[int] ]- identifier[mediabox] [ literal[int] ], identifier[mediabox] [ literal[int] ]- identifier[mediabox] [ literal[int] ]
identifier[mediabox] =[ identifier[float] ( identifier[base_page] . identifier[MediaBox] [ identifier[v] ]) keyword[for] identifier[v] keyword[in] identifier[range] ( literal[int] )]
identifier[wp] , identifier[hp] = identifier[mediabox] [ literal[int] ]- identifier[mediabox] [ literal[int] ], identifier[mediabox] [ literal[int] ]- identifier[mediabox] [ literal[int] ]
identifier[translate] = identifier[pikepdf] . identifier[PdfMatrix] (). identifier[translated] (- identifier[wt] / literal[int] ,- identifier[ht] / literal[int] )
identifier[untranslate] = identifier[pikepdf] . identifier[PdfMatrix] (). identifier[translated] ( identifier[wp] / literal[int] , identifier[hp] / literal[int] )
identifier[rotation] =- identifier[rotation] % literal[int]
identifier[rotate] = identifier[pikepdf] . identifier[PdfMatrix] (). identifier[rotated] ( identifier[rotation] )
keyword[if] identifier[rotation] keyword[in] ( literal[int] , literal[int] ):
identifier[wt] , identifier[ht] = identifier[ht] , identifier[wt]
identifier[scale_x] = identifier[wp] / identifier[wt]
identifier[scale_y] = identifier[hp] / identifier[ht]
identifier[log] . identifier[debug] ( literal[string] ,( identifier[scale_x] , identifier[scale_y] ))
identifier[scale] = identifier[pikepdf] . identifier[PdfMatrix] (). identifier[scaled] ( identifier[scale_x] , identifier[scale_y] )
identifier[ctm] = identifier[translate] @ identifier[rotate] @ identifier[scale] @ identifier[untranslate]
identifier[pdf_text_contents] = literal[string] % identifier[ctm] . identifier[encode] ()+ identifier[pdf_text_contents] + literal[string]
identifier[new_text_layer] = identifier[pikepdf] . identifier[Stream] ( identifier[pdf_base] , identifier[pdf_text_contents] )
keyword[if] identifier[strip_old_text] :
identifier[strip_invisible_text] ( identifier[pdf_base] , identifier[base_page] , identifier[log] )
identifier[base_page] . identifier[page_contents_add] ( identifier[new_text_layer] , identifier[prepend] = keyword[True] )
identifier[_update_page_resources] (
identifier[page] = identifier[base_page] , identifier[font] = identifier[font] , identifier[font_key] = identifier[font_key] , identifier[procset] = identifier[procset]
)
identifier[pdf_text] . identifier[close] () | def _weave_layers_graft(*, pdf_base, page_num, text, font, font_key, procset, rotation, strip_old_text, log):
"""Insert the text layer from text page 0 on to pdf_base at page_num"""
log.debug('Grafting')
if Path(text).stat().st_size == 0:
return # depends on [control=['if'], data=[]]
# This is a pointer indicating a specific page in the base file
pdf_text = pikepdf.open(text)
pdf_text_contents = pdf_text.pages[0].Contents.read_bytes()
if not tesseract.has_textonly_pdf():
# If we don't have textonly_pdf, edit the stream to delete the
# instruction to draw the image Tesseract generated, which we do not
# use.
stream = bytearray(pdf_text_contents)
pattern = b'/Im1 Do'
idx = stream.find(pattern)
stream[idx:idx + len(pattern)] = b' ' * len(pattern)
pdf_text_contents = bytes(stream) # depends on [control=['if'], data=[]]
base_page = pdf_base.pages.p(page_num)
# The text page always will be oriented up by this stage but the original
# content may have a rotation applied. Wrap the text stream with a rotation
# so it will be oriented the same way as the rest of the page content.
# (Previous versions OCRmyPDF rotated the content layer to match the text.)
mediabox = [float(pdf_text.pages[0].MediaBox[v]) for v in range(4)]
(wt, ht) = (mediabox[2] - mediabox[0], mediabox[3] - mediabox[1])
mediabox = [float(base_page.MediaBox[v]) for v in range(4)]
(wp, hp) = (mediabox[2] - mediabox[0], mediabox[3] - mediabox[1])
translate = pikepdf.PdfMatrix().translated(-wt / 2, -ht / 2)
untranslate = pikepdf.PdfMatrix().translated(wp / 2, hp / 2)
# -rotation because the input is a clockwise angle and this formula
# uses CCW
rotation = -rotation % 360
rotate = pikepdf.PdfMatrix().rotated(rotation)
# Because of rounding of DPI, we might get a text layer that is not
# identically sized to the target page. Scale to adjust. Normally this
# is within 0.998.
if rotation in (90, 270):
(wt, ht) = (ht, wt) # depends on [control=['if'], data=[]]
scale_x = wp / wt
scale_y = hp / ht
log.debug('%r', (scale_x, scale_y))
scale = pikepdf.PdfMatrix().scaled(scale_x, scale_y)
# Translate the text so it is centered at (0, 0), rotate it there, adjust
# for a size different between initial and text PDF, then untranslate
ctm = translate @ rotate @ scale @ untranslate
pdf_text_contents = b'q %s cm\n' % ctm.encode() + pdf_text_contents + b'\nQ\n'
new_text_layer = pikepdf.Stream(pdf_base, pdf_text_contents)
if strip_old_text:
strip_invisible_text(pdf_base, base_page, log) # depends on [control=['if'], data=[]]
base_page.page_contents_add(new_text_layer, prepend=True)
_update_page_resources(page=base_page, font=font, font_key=font_key, procset=procset)
pdf_text.close() |
def scan (data, clamconf):
"""Scan data for viruses.
@return (infection msgs, errors)
@rtype ([], [])
"""
try:
scanner = ClamdScanner(clamconf)
except socket.error:
errmsg = _("Could not connect to ClamAV daemon.")
return ([], [errmsg])
try:
scanner.scan(data)
finally:
scanner.close()
return scanner.infected, scanner.errors | def function[scan, parameter[data, clamconf]]:
constant[Scan data for viruses.
@return (infection msgs, errors)
@rtype ([], [])
]
<ast.Try object at 0x7da20e963610>
<ast.Try object at 0x7da18c4cf250>
return[tuple[[<ast.Attribute object at 0x7da18c4ccc10>, <ast.Attribute object at 0x7da18c4ced40>]]] | keyword[def] identifier[scan] ( identifier[data] , identifier[clamconf] ):
literal[string]
keyword[try] :
identifier[scanner] = identifier[ClamdScanner] ( identifier[clamconf] )
keyword[except] identifier[socket] . identifier[error] :
identifier[errmsg] = identifier[_] ( literal[string] )
keyword[return] ([],[ identifier[errmsg] ])
keyword[try] :
identifier[scanner] . identifier[scan] ( identifier[data] )
keyword[finally] :
identifier[scanner] . identifier[close] ()
keyword[return] identifier[scanner] . identifier[infected] , identifier[scanner] . identifier[errors] | def scan(data, clamconf):
"""Scan data for viruses.
@return (infection msgs, errors)
@rtype ([], [])
"""
try:
scanner = ClamdScanner(clamconf) # depends on [control=['try'], data=[]]
except socket.error:
errmsg = _('Could not connect to ClamAV daemon.')
return ([], [errmsg]) # depends on [control=['except'], data=[]]
try:
scanner.scan(data) # depends on [control=['try'], data=[]]
finally:
scanner.close()
return (scanner.infected, scanner.errors) |
def _norm_slices(self, fsls):
"""Convert slices to a normalized tuple of int/slice/farray."""
# Normalize indices, and fill empty slice entries
nsls = list()
for i, fsl in enumerate(fsls):
fsl_type = type(fsl)
if fsl_type is int:
nsls.append(_norm_index(i, fsl, *self.shape[i]))
elif fsl_type is slice:
nsls.append(_norm_slice(fsl, *self.shape[i]))
# farray
else:
nsls.append(fsl)
return nsls | def function[_norm_slices, parameter[self, fsls]]:
constant[Convert slices to a normalized tuple of int/slice/farray.]
variable[nsls] assign[=] call[name[list], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0e17b50>, <ast.Name object at 0x7da1b0e161d0>]]] in starred[call[name[enumerate], parameter[name[fsls]]]] begin[:]
variable[fsl_type] assign[=] call[name[type], parameter[name[fsl]]]
if compare[name[fsl_type] is name[int]] begin[:]
call[name[nsls].append, parameter[call[name[_norm_index], parameter[name[i], name[fsl], <ast.Starred object at 0x7da1b0e16020>]]]]
return[name[nsls]] | keyword[def] identifier[_norm_slices] ( identifier[self] , identifier[fsls] ):
literal[string]
identifier[nsls] = identifier[list] ()
keyword[for] identifier[i] , identifier[fsl] keyword[in] identifier[enumerate] ( identifier[fsls] ):
identifier[fsl_type] = identifier[type] ( identifier[fsl] )
keyword[if] identifier[fsl_type] keyword[is] identifier[int] :
identifier[nsls] . identifier[append] ( identifier[_norm_index] ( identifier[i] , identifier[fsl] ,* identifier[self] . identifier[shape] [ identifier[i] ]))
keyword[elif] identifier[fsl_type] keyword[is] identifier[slice] :
identifier[nsls] . identifier[append] ( identifier[_norm_slice] ( identifier[fsl] ,* identifier[self] . identifier[shape] [ identifier[i] ]))
keyword[else] :
identifier[nsls] . identifier[append] ( identifier[fsl] )
keyword[return] identifier[nsls] | def _norm_slices(self, fsls):
"""Convert slices to a normalized tuple of int/slice/farray."""
# Normalize indices, and fill empty slice entries
nsls = list()
for (i, fsl) in enumerate(fsls):
fsl_type = type(fsl)
if fsl_type is int:
nsls.append(_norm_index(i, fsl, *self.shape[i])) # depends on [control=['if'], data=[]]
elif fsl_type is slice:
nsls.append(_norm_slice(fsl, *self.shape[i])) # depends on [control=['if'], data=[]]
else:
# farray
nsls.append(fsl) # depends on [control=['for'], data=[]]
return nsls |
def new_datetime(self):
"""Return the time the bundle was created as a datetime object"""
from datetime import datetime
try:
return datetime.fromtimestamp(self.state.new)
except TypeError:
return None | def function[new_datetime, parameter[self]]:
constant[Return the time the bundle was created as a datetime object]
from relative_module[datetime] import module[datetime]
<ast.Try object at 0x7da18eb56080> | keyword[def] identifier[new_datetime] ( identifier[self] ):
literal[string]
keyword[from] identifier[datetime] keyword[import] identifier[datetime]
keyword[try] :
keyword[return] identifier[datetime] . identifier[fromtimestamp] ( identifier[self] . identifier[state] . identifier[new] )
keyword[except] identifier[TypeError] :
keyword[return] keyword[None] | def new_datetime(self):
"""Return the time the bundle was created as a datetime object"""
from datetime import datetime
try:
return datetime.fromtimestamp(self.state.new) # depends on [control=['try'], data=[]]
except TypeError:
return None # depends on [control=['except'], data=[]] |
def __record(self, oid=None):
"""Reads and returns a dbf record row as a list of values."""
f = self.__getFileObj(self.dbf)
recordContents = self.__recStruct.unpack(f.read(self.__recStruct.size))
if recordContents[0] != b' ':
# deleted record
return None
record = []
for (name, typ, size, deci), value in zip(self.fields, recordContents):
if name == 'DeletionFlag':
continue
elif typ in ("N","F"):
# numeric or float: number stored as a string, right justified, and padded with blanks to the width of the field.
value = value.split(b'\0')[0]
value = value.replace(b'*', b'') # QGIS NULL is all '*' chars
if value == b'':
value = None
elif deci:
try:
value = float(value)
except ValueError:
#not parseable as float, set to None
value = None
else:
# force to int
try:
# first try to force directly to int.
# forcing a large int to float and back to int
# will lose information and result in wrong nr.
value = int(value)
except ValueError:
# forcing directly to int failed, so was probably a float.
try:
value = int(float(value))
except ValueError:
#not parseable as int, set to None
value = None
elif typ == 'D':
# date: 8 bytes - date stored as a string in the format YYYYMMDD.
if value.count(b'0') == len(value): # QGIS NULL is all '0' chars
value = None
else:
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = date(y, m, d)
except:
value = value.strip()
elif typ == 'L':
# logical: 1 byte - initialized to 0x20 (space) otherwise T or F.
if value == b" ":
value = None # space means missing or not yet set
else:
if value in b'YyTt1':
value = True
elif value in b'NnFf0':
value = False
else:
value = None # unknown value is set to missing
else:
# anything else is forced to string/unicode
value = u(value, self.encoding, self.encodingErrors)
value = value.strip()
record.append(value)
return _Record(self.__fieldposition_lookup, record, oid) | def function[__record, parameter[self, oid]]:
constant[Reads and returns a dbf record row as a list of values.]
variable[f] assign[=] call[name[self].__getFileObj, parameter[name[self].dbf]]
variable[recordContents] assign[=] call[name[self].__recStruct.unpack, parameter[call[name[f].read, parameter[name[self].__recStruct.size]]]]
if compare[call[name[recordContents]][constant[0]] not_equal[!=] constant[b' ']] begin[:]
return[constant[None]]
variable[record] assign[=] list[[]]
for taget[tuple[[<ast.Tuple object at 0x7da1b26ad210>, <ast.Name object at 0x7da1b26ad390>]]] in starred[call[name[zip], parameter[name[self].fields, name[recordContents]]]] begin[:]
if compare[name[name] equal[==] constant[DeletionFlag]] begin[:]
continue
call[name[record].append, parameter[name[value]]]
return[call[name[_Record], parameter[name[self].__fieldposition_lookup, name[record], name[oid]]]] | keyword[def] identifier[__record] ( identifier[self] , identifier[oid] = keyword[None] ):
literal[string]
identifier[f] = identifier[self] . identifier[__getFileObj] ( identifier[self] . identifier[dbf] )
identifier[recordContents] = identifier[self] . identifier[__recStruct] . identifier[unpack] ( identifier[f] . identifier[read] ( identifier[self] . identifier[__recStruct] . identifier[size] ))
keyword[if] identifier[recordContents] [ literal[int] ]!= literal[string] :
keyword[return] keyword[None]
identifier[record] =[]
keyword[for] ( identifier[name] , identifier[typ] , identifier[size] , identifier[deci] ), identifier[value] keyword[in] identifier[zip] ( identifier[self] . identifier[fields] , identifier[recordContents] ):
keyword[if] identifier[name] == literal[string] :
keyword[continue]
keyword[elif] identifier[typ] keyword[in] ( literal[string] , literal[string] ):
identifier[value] = identifier[value] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[value] == literal[string] :
identifier[value] = keyword[None]
keyword[elif] identifier[deci] :
keyword[try] :
identifier[value] = identifier[float] ( identifier[value] )
keyword[except] identifier[ValueError] :
identifier[value] = keyword[None]
keyword[else] :
keyword[try] :
identifier[value] = identifier[int] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[try] :
identifier[value] = identifier[int] ( identifier[float] ( identifier[value] ))
keyword[except] identifier[ValueError] :
identifier[value] = keyword[None]
keyword[elif] identifier[typ] == literal[string] :
keyword[if] identifier[value] . identifier[count] ( literal[string] )== identifier[len] ( identifier[value] ):
identifier[value] = keyword[None]
keyword[else] :
keyword[try] :
identifier[y] , identifier[m] , identifier[d] = identifier[int] ( identifier[value] [: literal[int] ]), identifier[int] ( identifier[value] [ literal[int] : literal[int] ]), identifier[int] ( identifier[value] [ literal[int] : literal[int] ])
identifier[value] = identifier[date] ( identifier[y] , identifier[m] , identifier[d] )
keyword[except] :
identifier[value] = identifier[value] . identifier[strip] ()
keyword[elif] identifier[typ] == literal[string] :
keyword[if] identifier[value] == literal[string] :
identifier[value] = keyword[None]
keyword[else] :
keyword[if] identifier[value] keyword[in] literal[string] :
identifier[value] = keyword[True]
keyword[elif] identifier[value] keyword[in] literal[string] :
identifier[value] = keyword[False]
keyword[else] :
identifier[value] = keyword[None]
keyword[else] :
identifier[value] = identifier[u] ( identifier[value] , identifier[self] . identifier[encoding] , identifier[self] . identifier[encodingErrors] )
identifier[value] = identifier[value] . identifier[strip] ()
identifier[record] . identifier[append] ( identifier[value] )
keyword[return] identifier[_Record] ( identifier[self] . identifier[__fieldposition_lookup] , identifier[record] , identifier[oid] ) | def __record(self, oid=None):
"""Reads and returns a dbf record row as a list of values."""
f = self.__getFileObj(self.dbf)
recordContents = self.__recStruct.unpack(f.read(self.__recStruct.size))
if recordContents[0] != b' ': # deleted record
return None # depends on [control=['if'], data=[]]
record = []
for ((name, typ, size, deci), value) in zip(self.fields, recordContents):
if name == 'DeletionFlag':
continue # depends on [control=['if'], data=[]]
elif typ in ('N', 'F'): # numeric or float: number stored as a string, right justified, and padded with blanks to the width of the field.
value = value.split(b'\x00')[0]
value = value.replace(b'*', b'') # QGIS NULL is all '*' chars
if value == b'':
value = None # depends on [control=['if'], data=['value']]
elif deci:
try:
value = float(value) # depends on [control=['try'], data=[]]
except ValueError: #not parseable as float, set to None
value = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else: # force to int
try: # first try to force directly to int.
# forcing a large int to float and back to int
# will lose information and result in wrong nr.
value = int(value) # depends on [control=['try'], data=[]]
except ValueError: # forcing directly to int failed, so was probably a float.
try:
value = int(float(value)) # depends on [control=['try'], data=[]]
except ValueError: #not parseable as int, set to None
value = None # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif typ == 'D': # date: 8 bytes - date stored as a string in the format YYYYMMDD.
if value.count(b'0') == len(value): # QGIS NULL is all '0' chars
value = None # depends on [control=['if'], data=[]]
else:
try:
(y, m, d) = (int(value[:4]), int(value[4:6]), int(value[6:8]))
value = date(y, m, d) # depends on [control=['try'], data=[]]
except:
value = value.strip() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif typ == 'L': # logical: 1 byte - initialized to 0x20 (space) otherwise T or F.
if value == b' ':
value = None # space means missing or not yet set # depends on [control=['if'], data=['value']]
elif value in b'YyTt1':
value = True # depends on [control=['if'], data=['value']]
elif value in b'NnFf0':
value = False # depends on [control=['if'], data=['value']]
else:
value = None # unknown value is set to missing # depends on [control=['if'], data=[]]
else: # anything else is forced to string/unicode
value = u(value, self.encoding, self.encodingErrors)
value = value.strip()
record.append(value) # depends on [control=['for'], data=[]]
return _Record(self.__fieldposition_lookup, record, oid) |
def loadbin(self, fobj, offset=0):
"""Load bin file into internal buffer. Not needed if source set in
constructor. This will overwrite addresses without warning
if object was already initialized.
@param fobj file name or file-like object
@param offset starting address offset
"""
fread = getattr(fobj, "read", None)
if fread is None:
f = open(fobj, "rb")
fread = f.read
fclose = f.close
else:
fclose = None
try:
self.frombytes(array('B', asbytes(fread())), offset=offset)
finally:
if fclose:
fclose() | def function[loadbin, parameter[self, fobj, offset]]:
constant[Load bin file into internal buffer. Not needed if source set in
constructor. This will overwrite addresses without warning
if object was already initialized.
@param fobj file name or file-like object
@param offset starting address offset
]
variable[fread] assign[=] call[name[getattr], parameter[name[fobj], constant[read], constant[None]]]
if compare[name[fread] is constant[None]] begin[:]
variable[f] assign[=] call[name[open], parameter[name[fobj], constant[rb]]]
variable[fread] assign[=] name[f].read
variable[fclose] assign[=] name[f].close
<ast.Try object at 0x7da204621720> | keyword[def] identifier[loadbin] ( identifier[self] , identifier[fobj] , identifier[offset] = literal[int] ):
literal[string]
identifier[fread] = identifier[getattr] ( identifier[fobj] , literal[string] , keyword[None] )
keyword[if] identifier[fread] keyword[is] keyword[None] :
identifier[f] = identifier[open] ( identifier[fobj] , literal[string] )
identifier[fread] = identifier[f] . identifier[read]
identifier[fclose] = identifier[f] . identifier[close]
keyword[else] :
identifier[fclose] = keyword[None]
keyword[try] :
identifier[self] . identifier[frombytes] ( identifier[array] ( literal[string] , identifier[asbytes] ( identifier[fread] ())), identifier[offset] = identifier[offset] )
keyword[finally] :
keyword[if] identifier[fclose] :
identifier[fclose] () | def loadbin(self, fobj, offset=0):
"""Load bin file into internal buffer. Not needed if source set in
constructor. This will overwrite addresses without warning
if object was already initialized.
@param fobj file name or file-like object
@param offset starting address offset
"""
fread = getattr(fobj, 'read', None)
if fread is None:
f = open(fobj, 'rb')
fread = f.read
fclose = f.close # depends on [control=['if'], data=['fread']]
else:
fclose = None
try:
self.frombytes(array('B', asbytes(fread())), offset=offset) # depends on [control=['try'], data=[]]
finally:
if fclose:
fclose() # depends on [control=['if'], data=[]] |
def parse(self, filename, code):
"""Read and parse a RiveScript document.
Returns a data structure that represents all of the useful contents of
the document, in this format::
{
"begin": { # "begin" data
"global": {}, # map of !global vars
"var": {}, # bot !var's
"sub": {}, # !sub substitutions
"person": {}, # !person substitutions
"array": {}, # !array lists
},
"topics": { # main reply data
"random": { # (topic name)
"includes": {}, # map of included topics (values=1)
"inherits": {}, # map of inherited topics
"triggers": [ # array of triggers
{
"trigger": "hello bot",
"reply": [], # array of replies
"condition": [], # array of conditions
"redirect": None, # redirect command
"previous": None, # 'previous' reply
},
# ...
]
}
}
"objects": [ # parsed object macros
{
"name": "", # object name
"language": "", # programming language
"code": [], # array of lines of code
}
]
}
Args:
filename (str): The name of the file that the code came from, for
syntax error reporting purposes.
code (str[]): The source code to parse.
Returns:
dict: The aforementioned data structure.
"""
# Eventual returned structure ("abstract syntax tree" but not really)
ast = {
"begin": {
"global": {},
"var": {},
"sub": {},
"person": {},
"array": {},
},
"topics": {},
"objects": [],
}
# Track temporary variables.
topic = 'random' # Default topic=random
lineno = 0 # Line numbers for syntax tracking
comment = False # In a multi-line comment
inobj = False # In an object
objname = '' # The name of the object we're in
objlang = '' # The programming language of the object
objbuf = [] # Object contents buffer
curtrig = None # Pointer to the current trigger in ast.topics
isThat = None # Is a %Previous trigger
# Local (file scoped) parser options.
local_options = dict(
concat="none", # Concat mode for ^Continue command
)
# Read each line.
for lp, line in enumerate(code):
lineno += 1
self.say("Line: " + line + " (topic: " + topic + ") incomment: " + str(inobj))
if len(line.strip()) == 0: # Skip blank lines
continue
# In an object?
if inobj:
if re.match(RE.objend, line):
# End the object.
if len(objname):
ast["objects"].append({
"name": objname,
"language": objlang,
"code": objbuf,
})
objname = ''
objlang = ''
objbuf = []
inobj = False
else:
objbuf.append(line)
continue
line = line.strip() # Trim excess space. We do it down here so we
# don't mess up python objects!
line = RE.ws.sub(" ", line) # Replace the multiple whitespaces by single whitespace
# Look for comments.
if line[:2] == '//': # A single-line comment.
continue
elif line[0] == '#':
self.warn("Using the # symbol for comments is deprecated", filename, lineno)
elif line[:2] == '/*': # Start of a multi-line comment.
if '*/' not in line: # Cancel if the end is here too.
comment = True
continue
elif '*/' in line:
comment = False
continue
if comment:
continue
# Separate the command from the data.
if len(line) < 2:
self.warn("Weird single-character line '" + line + "' found.", filename, lineno)
continue
cmd = line[0]
line = line[1:].strip()
# Ignore inline comments if there's a space before the // symbols.
if " //" in line:
line = line.split(" //")[0].strip()
# Run a syntax check on this line.
syntax_error = self.check_syntax(cmd, line)
if syntax_error:
# There was a syntax error! Are we enforcing strict mode?
syntax_error = "Syntax error in " + filename + " line " + str(lineno) + ": " \
+ syntax_error + " (near: " + cmd + " " + line + ")"
if self.strict:
raise Exception(syntax_error)
else:
self.warn(syntax_error)
return # Don't try to continue
# Reset the %Previous state if this is a new +Trigger.
if cmd == '+':
isThat = None
# Do a lookahead for ^Continue and %Previous commands.
for i in range(lp + 1, len(code)):
lookahead = code[i].strip()
if len(lookahead) < 2:
continue
lookCmd = lookahead[0]
lookahead = lookahead[1:].strip()
lookahead = re.sub(RE.space, ' ', lookahead) # Replace the `\s` in the message
# Only continue if the lookahead line has any data.
if len(lookahead) != 0:
# The lookahead command has to be either a % or a ^.
if lookCmd != '^' and lookCmd != '%':
break
# If the current command is a +, see if the following is
# a %.
if cmd == '+':
if lookCmd == '%':
isThat = lookahead
break
else:
isThat = None
# If the current command is a ! and the next command(s) are
# ^, we'll tack each extension on as a line break (which is
# useful information for arrays).
if cmd == '!':
if lookCmd == '^':
line += "<crlf>" + lookahead
continue
# If the current command is not a ^ and the line after is
# not a %, but the line after IS a ^, then tack it on to the
# end of the current line.
if cmd != '^' and lookCmd != '%':
if lookCmd == '^':
line += self.concat_modes.get(
local_options["concat"], ""
) + lookahead
else:
break
self.say("Command: " + cmd + "; line: " + line)
# Handle the types of RiveScript commands.
if cmd == '!':
# ! DEFINE
halves = re.split(RE.equals, line, 2)
left = re.split(RE.ws, halves[0].strip(), 2)
value, type, var = '', '', ''
if len(halves) == 2:
value = halves[1].strip()
if len(left) >= 1:
type = left[0].strip()
if len(left) >= 2:
var = ' '.join(left[1:]).strip()
# Remove 'fake' line breaks unless this is an array.
if type != 'array':
value = re.sub(RE.crlf, '', value)
# Handle version numbers.
if type == 'version':
# Verify we support it.
try:
if float(value) > rs_version:
self.warn("Unsupported RiveScript version. We only support " + rs_version, filename, lineno)
return
except:
self.warn("Error parsing RiveScript version number: not a number", filename, lineno)
continue
# All other types of defines require a variable and value name.
if len(var) == 0:
self.warn("Undefined variable name", filename, lineno)
continue
elif len(value) == 0:
self.warn("Undefined variable value", filename, lineno)
continue
# Handle the rest of the types.
if type == 'local':
# Local file-scoped parser options.
self.say("\tSet parser option " + var + " = " + value)
local_options[var] = value
elif type == 'global':
# 'Global' variables
self.say("\tSet global " + var + " = " + value)
if value == '<undef>':
try:
del(ast["begin"]["global"][var])
except:
self.warn("Failed to delete missing global variable", filename, lineno)
else:
ast["begin"]["global"][var] = value
# Handle flipping debug and depth vars.
if var == 'debug':
if value.lower() == 'true':
value = True
else:
value = False
elif var == 'depth':
try:
value = int(value)
except:
self.warn("Failed to set 'depth' because the value isn't a number!", filename, lineno)
elif var == 'strict':
if value.lower() == 'true':
value = True
else:
value = False
elif type == 'var':
# Bot variables
self.say("\tSet bot variable " + var + " = " + value)
if value == '<undef>':
try:
del(ast["begin"]["var"][var])
except:
self.warn("Failed to delete missing bot variable", filename, lineno)
else:
ast["begin"]["var"][var] = value
elif type == 'array':
# Arrays
self.say("\tArray " + var + " = " + value)
if value == '<undef>':
try:
del(ast["begin"]["array"][var])
except:
self.warn("Failed to delete missing array", filename, lineno)
continue
# Did this have multiple parts?
parts = value.split("<crlf>")
# Process each line of array data.
fields = []
for val in parts:
if '|' in val:
fields.extend(val.split('|'))
else:
fields.extend(re.split(RE.ws, val))
# Convert any remaining '\s' escape codes into spaces.
for f in fields:
f = f.replace('\s', ' ')
ast["begin"]["array"][var] = fields
elif type == 'sub':
# Substitutions
self.say("\tSubstitution " + var + " => " + value)
if value == '<undef>':
try:
del(ast["begin"]["sub"][var])
except:
self.warn("Failed to delete missing substitution", filename, lineno)
else:
ast["begin"]["sub"][var] = value
elif type == 'person':
# Person Substitutions
self.say("\tPerson Substitution " + var + " => " + value)
if value == '<undef>':
try:
del(ast["begin"]["person"][var])
except:
self.warn("Failed to delete missing person substitution", filename, lineno)
else:
ast["begin"]["person"][var] = value
else:
self.warn("Unknown definition type '" + type + "'", filename, lineno)
elif cmd == '>':
# > LABEL
temp = re.split(RE.ws, line)
type = temp[0]
name = ''
fields = []
if len(temp) >= 2:
name = temp[1]
if len(temp) >= 3:
fields = temp[2:]
# Handle the label types.
if type == 'begin':
# The BEGIN block.
self.say("\tFound the BEGIN block.")
type = 'topic'
name = '__begin__'
if type == 'topic':
# Starting a new topic.
self.say("\tSet topic to " + name)
curtrig = None
topic = name
# Initialize the topic tree.
self._init_topic(ast["topics"], topic)
# Does this topic include or inherit another one?
mode = '' # or 'inherits' or 'includes'
if len(fields) >= 2:
for field in fields:
if field == 'includes':
mode = 'includes'
elif field == 'inherits':
mode = 'inherits'
elif mode != '':
# This topic is either inherited or included.
if mode == 'includes':
ast["topics"][name]["includes"][field] = 1
else:
ast["topics"][name]["inherits"][field] = 1
elif type == 'object':
# If a field was provided, it should be the programming
# language.
lang = None
if len(fields) > 0:
lang = fields[0].lower()
# Only try to parse a language we support.
curtrig = None
if lang is None:
self.warn("Trying to parse unknown programming language", filename, lineno)
lang = 'python' # Assume it's Python.
# We have a handler, so start loading the code.
objname = name
objlang = lang
objbuf = []
inobj = True
else:
self.warn("Unknown label type '" + type + "'", filename, lineno)
elif cmd == '<':
# < LABEL
type = line
if type == 'begin' or type == 'topic':
self.say("\tEnd topic label.")
topic = 'random'
elif type == 'object':
self.say("\tEnd object label.")
inobj = False
elif cmd == '+':
# + TRIGGER
self.say("\tTrigger pattern: " + line)
# Initialize the topic tree.
self._init_topic(ast["topics"], topic)
curtrig = {
"trigger": line,
"reply": [],
"condition": [],
"redirect": None,
"previous": isThat,
}
ast["topics"][topic]["triggers"].append(curtrig)
elif cmd == '-':
# - REPLY
if curtrig is None:
self.warn("Response found before trigger", filename, lineno)
continue
self.say("\tResponse: " + line)
curtrig["reply"].append(line.strip())
elif cmd == '%':
# % PREVIOUS
pass # This was handled above.
elif cmd == '^':
# ^ CONTINUE
pass # This was handled above.
elif cmd == '@':
# @ REDIRECT
if curtrig is None:
self.warn("Redirect found before trigger", filename, lineno)
continue
self.say("\tRedirect: " + line)
curtrig["redirect"] = line.strip()
elif cmd == '*':
# * CONDITION
if curtrig is None:
self.warn("Condition found before trigger", filename, lineno)
continue
self.say("\tAdding condition: " + line)
curtrig["condition"].append(line.strip())
else:
self.warn("Unrecognized command \"" + cmd + "\"", filename, lineno)
continue
return ast | def function[parse, parameter[self, filename, code]]:
constant[Read and parse a RiveScript document.
Returns a data structure that represents all of the useful contents of
the document, in this format::
{
"begin": { # "begin" data
"global": {}, # map of !global vars
"var": {}, # bot !var's
"sub": {}, # !sub substitutions
"person": {}, # !person substitutions
"array": {}, # !array lists
},
"topics": { # main reply data
"random": { # (topic name)
"includes": {}, # map of included topics (values=1)
"inherits": {}, # map of inherited topics
"triggers": [ # array of triggers
{
"trigger": "hello bot",
"reply": [], # array of replies
"condition": [], # array of conditions
"redirect": None, # redirect command
"previous": None, # 'previous' reply
},
# ...
]
}
}
"objects": [ # parsed object macros
{
"name": "", # object name
"language": "", # programming language
"code": [], # array of lines of code
}
]
}
Args:
filename (str): The name of the file that the code came from, for
syntax error reporting purposes.
code (str[]): The source code to parse.
Returns:
dict: The aforementioned data structure.
]
variable[ast] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cc550>, <ast.Constant object at 0x7da18c4ccb80>, <ast.Constant object at 0x7da18c4ce110>], [<ast.Dict object at 0x7da18c4cd210>, <ast.Dict object at 0x7da18c4cf730>, <ast.List object at 0x7da18c4cf850>]]
variable[topic] assign[=] constant[random]
variable[lineno] assign[=] constant[0]
variable[comment] assign[=] constant[False]
variable[inobj] assign[=] constant[False]
variable[objname] assign[=] constant[]
variable[objlang] assign[=] constant[]
variable[objbuf] assign[=] list[[]]
variable[curtrig] assign[=] constant[None]
variable[isThat] assign[=] constant[None]
variable[local_options] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18c4ccb50>, <ast.Name object at 0x7da18c4cc280>]]] in starred[call[name[enumerate], parameter[name[code]]]] begin[:]
<ast.AugAssign object at 0x7da18c4cc2e0>
call[name[self].say, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[Line: ] + name[line]] + constant[ (topic: ]] + name[topic]] + constant[) incomment: ]] + call[name[str], parameter[name[inobj]]]]]]
if compare[call[name[len], parameter[call[name[line].strip, parameter[]]]] equal[==] constant[0]] begin[:]
continue
if name[inobj] begin[:]
if call[name[re].match, parameter[name[RE].objend, name[line]]] begin[:]
if call[name[len], parameter[name[objname]]] begin[:]
call[call[name[ast]][constant[objects]].append, parameter[dictionary[[<ast.Constant object at 0x7da1b0546470>, <ast.Constant object at 0x7da1b0545e10>, <ast.Constant object at 0x7da1b0544190>], [<ast.Name object at 0x7da1b0547cd0>, <ast.Name object at 0x7da1b0546230>, <ast.Name object at 0x7da1b05463b0>]]]]
variable[objname] assign[=] constant[]
variable[objlang] assign[=] constant[]
variable[objbuf] assign[=] list[[]]
variable[inobj] assign[=] constant[False]
continue
variable[line] assign[=] call[name[line].strip, parameter[]]
variable[line] assign[=] call[name[RE].ws.sub, parameter[constant[ ], name[line]]]
if compare[call[name[line]][<ast.Slice object at 0x7da18c4cec20>] equal[==] constant[//]] begin[:]
continue
if name[comment] begin[:]
continue
if compare[call[name[len], parameter[name[line]]] less[<] constant[2]] begin[:]
call[name[self].warn, parameter[binary_operation[binary_operation[constant[Weird single-character line '] + name[line]] + constant[' found.]], name[filename], name[lineno]]]
continue
variable[cmd] assign[=] call[name[line]][constant[0]]
variable[line] assign[=] call[call[name[line]][<ast.Slice object at 0x7da18c4cc490>].strip, parameter[]]
if compare[constant[ //] in name[line]] begin[:]
variable[line] assign[=] call[call[call[name[line].split, parameter[constant[ //]]]][constant[0]].strip, parameter[]]
variable[syntax_error] assign[=] call[name[self].check_syntax, parameter[name[cmd], name[line]]]
if name[syntax_error] begin[:]
variable[syntax_error] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[Syntax error in ] + name[filename]] + constant[ line ]] + call[name[str], parameter[name[lineno]]]] + constant[: ]] + name[syntax_error]] + constant[ (near: ]] + name[cmd]] + constant[ ]] + name[line]] + constant[)]]
if name[self].strict begin[:]
<ast.Raise object at 0x7da18c4cd720>
if compare[name[cmd] equal[==] constant[+]] begin[:]
variable[isThat] assign[=] constant[None]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[lp] + constant[1]], call[name[len], parameter[name[code]]]]]] begin[:]
variable[lookahead] assign[=] call[call[name[code]][name[i]].strip, parameter[]]
if compare[call[name[len], parameter[name[lookahead]]] less[<] constant[2]] begin[:]
continue
variable[lookCmd] assign[=] call[name[lookahead]][constant[0]]
variable[lookahead] assign[=] call[call[name[lookahead]][<ast.Slice object at 0x7da204623970>].strip, parameter[]]
variable[lookahead] assign[=] call[name[re].sub, parameter[name[RE].space, constant[ ], name[lookahead]]]
if compare[call[name[len], parameter[name[lookahead]]] not_equal[!=] constant[0]] begin[:]
if <ast.BoolOp object at 0x7da2046239a0> begin[:]
break
if compare[name[cmd] equal[==] constant[+]] begin[:]
if compare[name[lookCmd] equal[==] constant[%]] begin[:]
variable[isThat] assign[=] name[lookahead]
break
if compare[name[cmd] equal[==] constant[!]] begin[:]
if compare[name[lookCmd] equal[==] constant[^]] begin[:]
<ast.AugAssign object at 0x7da204623940>
continue
if <ast.BoolOp object at 0x7da204620f40> begin[:]
if compare[name[lookCmd] equal[==] constant[^]] begin[:]
<ast.AugAssign object at 0x7da2046233d0>
call[name[self].say, parameter[binary_operation[binary_operation[binary_operation[constant[Command: ] + name[cmd]] + constant[; line: ]] + name[line]]]]
if compare[name[cmd] equal[==] constant[!]] begin[:]
variable[halves] assign[=] call[name[re].split, parameter[name[RE].equals, name[line], constant[2]]]
variable[left] assign[=] call[name[re].split, parameter[name[RE].ws, call[call[name[halves]][constant[0]].strip, parameter[]], constant[2]]]
<ast.Tuple object at 0x7da204623220> assign[=] tuple[[<ast.Constant object at 0x7da204620a90>, <ast.Constant object at 0x7da2046212a0>, <ast.Constant object at 0x7da204620850>]]
if compare[call[name[len], parameter[name[halves]]] equal[==] constant[2]] begin[:]
variable[value] assign[=] call[call[name[halves]][constant[1]].strip, parameter[]]
if compare[call[name[len], parameter[name[left]]] greater_or_equal[>=] constant[1]] begin[:]
variable[type] assign[=] call[call[name[left]][constant[0]].strip, parameter[]]
if compare[call[name[len], parameter[name[left]]] greater_or_equal[>=] constant[2]] begin[:]
variable[var] assign[=] call[call[constant[ ].join, parameter[call[name[left]][<ast.Slice object at 0x7da204623040>]]].strip, parameter[]]
if compare[name[type] not_equal[!=] constant[array]] begin[:]
variable[value] assign[=] call[name[re].sub, parameter[name[RE].crlf, constant[], name[value]]]
if compare[name[type] equal[==] constant[version]] begin[:]
<ast.Try object at 0x7da2046218a0>
continue
if compare[call[name[len], parameter[name[var]]] equal[==] constant[0]] begin[:]
call[name[self].warn, parameter[constant[Undefined variable name], name[filename], name[lineno]]]
continue
if compare[name[type] equal[==] constant[local]] begin[:]
call[name[self].say, parameter[binary_operation[binary_operation[binary_operation[constant[ Set parser option ] + name[var]] + constant[ = ]] + name[value]]]]
call[name[local_options]][name[var]] assign[=] name[value]
return[name[ast]] | keyword[def] identifier[parse] ( identifier[self] , identifier[filename] , identifier[code] ):
literal[string]
identifier[ast] ={
literal[string] :{
literal[string] :{},
literal[string] :{},
literal[string] :{},
literal[string] :{},
literal[string] :{},
},
literal[string] :{},
literal[string] :[],
}
identifier[topic] = literal[string]
identifier[lineno] = literal[int]
identifier[comment] = keyword[False]
identifier[inobj] = keyword[False]
identifier[objname] = literal[string]
identifier[objlang] = literal[string]
identifier[objbuf] =[]
identifier[curtrig] = keyword[None]
identifier[isThat] = keyword[None]
identifier[local_options] = identifier[dict] (
identifier[concat] = literal[string] ,
)
keyword[for] identifier[lp] , identifier[line] keyword[in] identifier[enumerate] ( identifier[code] ):
identifier[lineno] += literal[int]
identifier[self] . identifier[say] ( literal[string] + identifier[line] + literal[string] + identifier[topic] + literal[string] + identifier[str] ( identifier[inobj] ))
keyword[if] identifier[len] ( identifier[line] . identifier[strip] ())== literal[int] :
keyword[continue]
keyword[if] identifier[inobj] :
keyword[if] identifier[re] . identifier[match] ( identifier[RE] . identifier[objend] , identifier[line] ):
keyword[if] identifier[len] ( identifier[objname] ):
identifier[ast] [ literal[string] ]. identifier[append] ({
literal[string] : identifier[objname] ,
literal[string] : identifier[objlang] ,
literal[string] : identifier[objbuf] ,
})
identifier[objname] = literal[string]
identifier[objlang] = literal[string]
identifier[objbuf] =[]
identifier[inobj] = keyword[False]
keyword[else] :
identifier[objbuf] . identifier[append] ( identifier[line] )
keyword[continue]
identifier[line] = identifier[line] . identifier[strip] ()
identifier[line] = identifier[RE] . identifier[ws] . identifier[sub] ( literal[string] , identifier[line] )
keyword[if] identifier[line] [: literal[int] ]== literal[string] :
keyword[continue]
keyword[elif] identifier[line] [ literal[int] ]== literal[string] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[elif] identifier[line] [: literal[int] ]== literal[string] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[line] :
identifier[comment] = keyword[True]
keyword[continue]
keyword[elif] literal[string] keyword[in] identifier[line] :
identifier[comment] = keyword[False]
keyword[continue]
keyword[if] identifier[comment] :
keyword[continue]
keyword[if] identifier[len] ( identifier[line] )< literal[int] :
identifier[self] . identifier[warn] ( literal[string] + identifier[line] + literal[string] , identifier[filename] , identifier[lineno] )
keyword[continue]
identifier[cmd] = identifier[line] [ literal[int] ]
identifier[line] = identifier[line] [ literal[int] :]. identifier[strip] ()
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[line] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
identifier[syntax_error] = identifier[self] . identifier[check_syntax] ( identifier[cmd] , identifier[line] )
keyword[if] identifier[syntax_error] :
identifier[syntax_error] = literal[string] + identifier[filename] + literal[string] + identifier[str] ( identifier[lineno] )+ literal[string] + identifier[syntax_error] + literal[string] + identifier[cmd] + literal[string] + identifier[line] + literal[string]
keyword[if] identifier[self] . identifier[strict] :
keyword[raise] identifier[Exception] ( identifier[syntax_error] )
keyword[else] :
identifier[self] . identifier[warn] ( identifier[syntax_error] )
keyword[return]
keyword[if] identifier[cmd] == literal[string] :
identifier[isThat] = keyword[None]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[lp] + literal[int] , identifier[len] ( identifier[code] )):
identifier[lookahead] = identifier[code] [ identifier[i] ]. identifier[strip] ()
keyword[if] identifier[len] ( identifier[lookahead] )< literal[int] :
keyword[continue]
identifier[lookCmd] = identifier[lookahead] [ literal[int] ]
identifier[lookahead] = identifier[lookahead] [ literal[int] :]. identifier[strip] ()
identifier[lookahead] = identifier[re] . identifier[sub] ( identifier[RE] . identifier[space] , literal[string] , identifier[lookahead] )
keyword[if] identifier[len] ( identifier[lookahead] )!= literal[int] :
keyword[if] identifier[lookCmd] != literal[string] keyword[and] identifier[lookCmd] != literal[string] :
keyword[break]
keyword[if] identifier[cmd] == literal[string] :
keyword[if] identifier[lookCmd] == literal[string] :
identifier[isThat] = identifier[lookahead]
keyword[break]
keyword[else] :
identifier[isThat] = keyword[None]
keyword[if] identifier[cmd] == literal[string] :
keyword[if] identifier[lookCmd] == literal[string] :
identifier[line] += literal[string] + identifier[lookahead]
keyword[continue]
keyword[if] identifier[cmd] != literal[string] keyword[and] identifier[lookCmd] != literal[string] :
keyword[if] identifier[lookCmd] == literal[string] :
identifier[line] += identifier[self] . identifier[concat_modes] . identifier[get] (
identifier[local_options] [ literal[string] ], literal[string]
)+ identifier[lookahead]
keyword[else] :
keyword[break]
identifier[self] . identifier[say] ( literal[string] + identifier[cmd] + literal[string] + identifier[line] )
keyword[if] identifier[cmd] == literal[string] :
identifier[halves] = identifier[re] . identifier[split] ( identifier[RE] . identifier[equals] , identifier[line] , literal[int] )
identifier[left] = identifier[re] . identifier[split] ( identifier[RE] . identifier[ws] , identifier[halves] [ literal[int] ]. identifier[strip] (), literal[int] )
identifier[value] , identifier[type] , identifier[var] = literal[string] , literal[string] , literal[string]
keyword[if] identifier[len] ( identifier[halves] )== literal[int] :
identifier[value] = identifier[halves] [ literal[int] ]. identifier[strip] ()
keyword[if] identifier[len] ( identifier[left] )>= literal[int] :
identifier[type] = identifier[left] [ literal[int] ]. identifier[strip] ()
keyword[if] identifier[len] ( identifier[left] )>= literal[int] :
identifier[var] = literal[string] . identifier[join] ( identifier[left] [ literal[int] :]). identifier[strip] ()
keyword[if] identifier[type] != literal[string] :
identifier[value] = identifier[re] . identifier[sub] ( identifier[RE] . identifier[crlf] , literal[string] , identifier[value] )
keyword[if] identifier[type] == literal[string] :
keyword[try] :
keyword[if] identifier[float] ( identifier[value] )> identifier[rs_version] :
identifier[self] . identifier[warn] ( literal[string] + identifier[rs_version] , identifier[filename] , identifier[lineno] )
keyword[return]
keyword[except] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[continue]
keyword[if] identifier[len] ( identifier[var] )== literal[int] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[continue]
keyword[elif] identifier[len] ( identifier[value] )== literal[int] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[continue]
keyword[if] identifier[type] == literal[string] :
identifier[self] . identifier[say] ( literal[string] + identifier[var] + literal[string] + identifier[value] )
identifier[local_options] [ identifier[var] ]= identifier[value]
keyword[elif] identifier[type] == literal[string] :
identifier[self] . identifier[say] ( literal[string] + identifier[var] + literal[string] + identifier[value] )
keyword[if] identifier[value] == literal[string] :
keyword[try] :
keyword[del] ( identifier[ast] [ literal[string] ][ literal[string] ][ identifier[var] ])
keyword[except] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[else] :
identifier[ast] [ literal[string] ][ literal[string] ][ identifier[var] ]= identifier[value]
keyword[if] identifier[var] == literal[string] :
keyword[if] identifier[value] . identifier[lower] ()== literal[string] :
identifier[value] = keyword[True]
keyword[else] :
identifier[value] = keyword[False]
keyword[elif] identifier[var] == literal[string] :
keyword[try] :
identifier[value] = identifier[int] ( identifier[value] )
keyword[except] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[elif] identifier[var] == literal[string] :
keyword[if] identifier[value] . identifier[lower] ()== literal[string] :
identifier[value] = keyword[True]
keyword[else] :
identifier[value] = keyword[False]
keyword[elif] identifier[type] == literal[string] :
identifier[self] . identifier[say] ( literal[string] + identifier[var] + literal[string] + identifier[value] )
keyword[if] identifier[value] == literal[string] :
keyword[try] :
keyword[del] ( identifier[ast] [ literal[string] ][ literal[string] ][ identifier[var] ])
keyword[except] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[else] :
identifier[ast] [ literal[string] ][ literal[string] ][ identifier[var] ]= identifier[value]
keyword[elif] identifier[type] == literal[string] :
identifier[self] . identifier[say] ( literal[string] + identifier[var] + literal[string] + identifier[value] )
keyword[if] identifier[value] == literal[string] :
keyword[try] :
keyword[del] ( identifier[ast] [ literal[string] ][ literal[string] ][ identifier[var] ])
keyword[except] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[continue]
identifier[parts] = identifier[value] . identifier[split] ( literal[string] )
identifier[fields] =[]
keyword[for] identifier[val] keyword[in] identifier[parts] :
keyword[if] literal[string] keyword[in] identifier[val] :
identifier[fields] . identifier[extend] ( identifier[val] . identifier[split] ( literal[string] ))
keyword[else] :
identifier[fields] . identifier[extend] ( identifier[re] . identifier[split] ( identifier[RE] . identifier[ws] , identifier[val] ))
keyword[for] identifier[f] keyword[in] identifier[fields] :
identifier[f] = identifier[f] . identifier[replace] ( literal[string] , literal[string] )
identifier[ast] [ literal[string] ][ literal[string] ][ identifier[var] ]= identifier[fields]
keyword[elif] identifier[type] == literal[string] :
identifier[self] . identifier[say] ( literal[string] + identifier[var] + literal[string] + identifier[value] )
keyword[if] identifier[value] == literal[string] :
keyword[try] :
keyword[del] ( identifier[ast] [ literal[string] ][ literal[string] ][ identifier[var] ])
keyword[except] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[else] :
identifier[ast] [ literal[string] ][ literal[string] ][ identifier[var] ]= identifier[value]
keyword[elif] identifier[type] == literal[string] :
identifier[self] . identifier[say] ( literal[string] + identifier[var] + literal[string] + identifier[value] )
keyword[if] identifier[value] == literal[string] :
keyword[try] :
keyword[del] ( identifier[ast] [ literal[string] ][ literal[string] ][ identifier[var] ])
keyword[except] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[else] :
identifier[ast] [ literal[string] ][ literal[string] ][ identifier[var] ]= identifier[value]
keyword[else] :
identifier[self] . identifier[warn] ( literal[string] + identifier[type] + literal[string] , identifier[filename] , identifier[lineno] )
keyword[elif] identifier[cmd] == literal[string] :
identifier[temp] = identifier[re] . identifier[split] ( identifier[RE] . identifier[ws] , identifier[line] )
identifier[type] = identifier[temp] [ literal[int] ]
identifier[name] = literal[string]
identifier[fields] =[]
keyword[if] identifier[len] ( identifier[temp] )>= literal[int] :
identifier[name] = identifier[temp] [ literal[int] ]
keyword[if] identifier[len] ( identifier[temp] )>= literal[int] :
identifier[fields] = identifier[temp] [ literal[int] :]
keyword[if] identifier[type] == literal[string] :
identifier[self] . identifier[say] ( literal[string] )
identifier[type] = literal[string]
identifier[name] = literal[string]
keyword[if] identifier[type] == literal[string] :
identifier[self] . identifier[say] ( literal[string] + identifier[name] )
identifier[curtrig] = keyword[None]
identifier[topic] = identifier[name]
identifier[self] . identifier[_init_topic] ( identifier[ast] [ literal[string] ], identifier[topic] )
identifier[mode] = literal[string]
keyword[if] identifier[len] ( identifier[fields] )>= literal[int] :
keyword[for] identifier[field] keyword[in] identifier[fields] :
keyword[if] identifier[field] == literal[string] :
identifier[mode] = literal[string]
keyword[elif] identifier[field] == literal[string] :
identifier[mode] = literal[string]
keyword[elif] identifier[mode] != literal[string] :
keyword[if] identifier[mode] == literal[string] :
identifier[ast] [ literal[string] ][ identifier[name] ][ literal[string] ][ identifier[field] ]= literal[int]
keyword[else] :
identifier[ast] [ literal[string] ][ identifier[name] ][ literal[string] ][ identifier[field] ]= literal[int]
keyword[elif] identifier[type] == literal[string] :
identifier[lang] = keyword[None]
keyword[if] identifier[len] ( identifier[fields] )> literal[int] :
identifier[lang] = identifier[fields] [ literal[int] ]. identifier[lower] ()
identifier[curtrig] = keyword[None]
keyword[if] identifier[lang] keyword[is] keyword[None] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
identifier[lang] = literal[string]
identifier[objname] = identifier[name]
identifier[objlang] = identifier[lang]
identifier[objbuf] =[]
identifier[inobj] = keyword[True]
keyword[else] :
identifier[self] . identifier[warn] ( literal[string] + identifier[type] + literal[string] , identifier[filename] , identifier[lineno] )
keyword[elif] identifier[cmd] == literal[string] :
identifier[type] = identifier[line]
keyword[if] identifier[type] == literal[string] keyword[or] identifier[type] == literal[string] :
identifier[self] . identifier[say] ( literal[string] )
identifier[topic] = literal[string]
keyword[elif] identifier[type] == literal[string] :
identifier[self] . identifier[say] ( literal[string] )
identifier[inobj] = keyword[False]
keyword[elif] identifier[cmd] == literal[string] :
identifier[self] . identifier[say] ( literal[string] + identifier[line] )
identifier[self] . identifier[_init_topic] ( identifier[ast] [ literal[string] ], identifier[topic] )
identifier[curtrig] ={
literal[string] : identifier[line] ,
literal[string] :[],
literal[string] :[],
literal[string] : keyword[None] ,
literal[string] : identifier[isThat] ,
}
identifier[ast] [ literal[string] ][ identifier[topic] ][ literal[string] ]. identifier[append] ( identifier[curtrig] )
keyword[elif] identifier[cmd] == literal[string] :
keyword[if] identifier[curtrig] keyword[is] keyword[None] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[continue]
identifier[self] . identifier[say] ( literal[string] + identifier[line] )
identifier[curtrig] [ literal[string] ]. identifier[append] ( identifier[line] . identifier[strip] ())
keyword[elif] identifier[cmd] == literal[string] :
keyword[pass]
keyword[elif] identifier[cmd] == literal[string] :
keyword[pass]
keyword[elif] identifier[cmd] == literal[string] :
keyword[if] identifier[curtrig] keyword[is] keyword[None] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[continue]
identifier[self] . identifier[say] ( literal[string] + identifier[line] )
identifier[curtrig] [ literal[string] ]= identifier[line] . identifier[strip] ()
keyword[elif] identifier[cmd] == literal[string] :
keyword[if] identifier[curtrig] keyword[is] keyword[None] :
identifier[self] . identifier[warn] ( literal[string] , identifier[filename] , identifier[lineno] )
keyword[continue]
identifier[self] . identifier[say] ( literal[string] + identifier[line] )
identifier[curtrig] [ literal[string] ]. identifier[append] ( identifier[line] . identifier[strip] ())
keyword[else] :
identifier[self] . identifier[warn] ( literal[string] + identifier[cmd] + literal[string] , identifier[filename] , identifier[lineno] )
keyword[continue]
keyword[return] identifier[ast] | def parse(self, filename, code):
"""Read and parse a RiveScript document.
Returns a data structure that represents all of the useful contents of
the document, in this format::
{
"begin": { # "begin" data
"global": {}, # map of !global vars
"var": {}, # bot !var's
"sub": {}, # !sub substitutions
"person": {}, # !person substitutions
"array": {}, # !array lists
},
"topics": { # main reply data
"random": { # (topic name)
"includes": {}, # map of included topics (values=1)
"inherits": {}, # map of inherited topics
"triggers": [ # array of triggers
{
"trigger": "hello bot",
"reply": [], # array of replies
"condition": [], # array of conditions
"redirect": None, # redirect command
"previous": None, # 'previous' reply
},
# ...
]
}
}
"objects": [ # parsed object macros
{
"name": "", # object name
"language": "", # programming language
"code": [], # array of lines of code
}
]
}
Args:
filename (str): The name of the file that the code came from, for
syntax error reporting purposes.
code (str[]): The source code to parse.
Returns:
dict: The aforementioned data structure.
"""
# Eventual returned structure ("abstract syntax tree" but not really)
ast = {'begin': {'global': {}, 'var': {}, 'sub': {}, 'person': {}, 'array': {}}, 'topics': {}, 'objects': []}
# Track temporary variables.
topic = 'random' # Default topic=random
lineno = 0 # Line numbers for syntax tracking
comment = False # In a multi-line comment
inobj = False # In an object
objname = '' # The name of the object we're in
objlang = '' # The programming language of the object
objbuf = [] # Object contents buffer
curtrig = None # Pointer to the current trigger in ast.topics
isThat = None # Is a %Previous trigger
# Local (file scoped) parser options.
# Concat mode for ^Continue command
local_options = dict(concat='none')
# Read each line.
for (lp, line) in enumerate(code):
lineno += 1
self.say('Line: ' + line + ' (topic: ' + topic + ') incomment: ' + str(inobj))
if len(line.strip()) == 0: # Skip blank lines
continue # depends on [control=['if'], data=[]]
# In an object?
if inobj:
if re.match(RE.objend, line):
# End the object.
if len(objname):
ast['objects'].append({'name': objname, 'language': objlang, 'code': objbuf}) # depends on [control=['if'], data=[]]
objname = ''
objlang = ''
objbuf = []
inobj = False # depends on [control=['if'], data=[]]
else:
objbuf.append(line)
continue # depends on [control=['if'], data=[]]
line = line.strip() # Trim excess space. We do it down here so we
# don't mess up python objects!
line = RE.ws.sub(' ', line) # Replace the multiple whitespaces by single whitespace
# Look for comments.
if line[:2] == '//': # A single-line comment.
continue # depends on [control=['if'], data=[]]
elif line[0] == '#':
self.warn('Using the # symbol for comments is deprecated', filename, lineno) # depends on [control=['if'], data=[]]
elif line[:2] == '/*': # Start of a multi-line comment.
if '*/' not in line: # Cancel if the end is here too.
comment = True # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
elif '*/' in line:
comment = False
continue # depends on [control=['if'], data=[]]
if comment:
continue # depends on [control=['if'], data=[]]
# Separate the command from the data.
if len(line) < 2:
self.warn("Weird single-character line '" + line + "' found.", filename, lineno)
continue # depends on [control=['if'], data=[]]
cmd = line[0]
line = line[1:].strip()
# Ignore inline comments if there's a space before the // symbols.
if ' //' in line:
line = line.split(' //')[0].strip() # depends on [control=['if'], data=['line']]
# Run a syntax check on this line.
syntax_error = self.check_syntax(cmd, line)
if syntax_error:
# There was a syntax error! Are we enforcing strict mode?
syntax_error = 'Syntax error in ' + filename + ' line ' + str(lineno) + ': ' + syntax_error + ' (near: ' + cmd + ' ' + line + ')'
if self.strict:
raise Exception(syntax_error) # depends on [control=['if'], data=[]]
else:
self.warn(syntax_error)
return # Don't try to continue # depends on [control=['if'], data=[]]
# Reset the %Previous state if this is a new +Trigger.
if cmd == '+':
isThat = None # depends on [control=['if'], data=[]]
# Do a lookahead for ^Continue and %Previous commands.
for i in range(lp + 1, len(code)):
lookahead = code[i].strip()
if len(lookahead) < 2:
continue # depends on [control=['if'], data=[]]
lookCmd = lookahead[0]
lookahead = lookahead[1:].strip()
lookahead = re.sub(RE.space, ' ', lookahead) # Replace the `\s` in the message
# Only continue if the lookahead line has any data.
if len(lookahead) != 0:
# The lookahead command has to be either a % or a ^.
if lookCmd != '^' and lookCmd != '%':
break # depends on [control=['if'], data=[]]
# If the current command is a +, see if the following is
# a %.
if cmd == '+':
if lookCmd == '%':
isThat = lookahead
break # depends on [control=['if'], data=[]]
else:
isThat = None # depends on [control=['if'], data=[]]
# If the current command is a ! and the next command(s) are
# ^, we'll tack each extension on as a line break (which is
# useful information for arrays).
if cmd == '!':
if lookCmd == '^':
line += '<crlf>' + lookahead # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
# If the current command is not a ^ and the line after is
# not a %, but the line after IS a ^, then tack it on to the
# end of the current line.
if cmd != '^' and lookCmd != '%':
if lookCmd == '^':
line += self.concat_modes.get(local_options['concat'], '') + lookahead # depends on [control=['if'], data=[]]
else:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
self.say('Command: ' + cmd + '; line: ' + line)
# Handle the types of RiveScript commands.
if cmd == '!':
# ! DEFINE
halves = re.split(RE.equals, line, 2)
left = re.split(RE.ws, halves[0].strip(), 2)
(value, type, var) = ('', '', '')
if len(halves) == 2:
value = halves[1].strip() # depends on [control=['if'], data=[]]
if len(left) >= 1:
type = left[0].strip()
if len(left) >= 2:
var = ' '.join(left[1:]).strip() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Remove 'fake' line breaks unless this is an array.
if type != 'array':
value = re.sub(RE.crlf, '', value) # depends on [control=['if'], data=[]]
# Handle version numbers.
if type == 'version':
# Verify we support it.
try:
if float(value) > rs_version:
self.warn('Unsupported RiveScript version. We only support ' + rs_version, filename, lineno)
return # depends on [control=['if'], data=['rs_version']] # depends on [control=['try'], data=[]]
except:
self.warn('Error parsing RiveScript version number: not a number', filename, lineno) # depends on [control=['except'], data=[]]
continue # depends on [control=['if'], data=[]]
# All other types of defines require a variable and value name.
if len(var) == 0:
self.warn('Undefined variable name', filename, lineno)
continue # depends on [control=['if'], data=[]]
elif len(value) == 0:
self.warn('Undefined variable value', filename, lineno)
continue # depends on [control=['if'], data=[]]
# Handle the rest of the types.
if type == 'local':
# Local file-scoped parser options.
self.say('\tSet parser option ' + var + ' = ' + value)
local_options[var] = value # depends on [control=['if'], data=[]]
elif type == 'global':
# 'Global' variables
self.say('\tSet global ' + var + ' = ' + value)
if value == '<undef>':
try:
del ast['begin']['global'][var] # depends on [control=['try'], data=[]]
except:
self.warn('Failed to delete missing global variable', filename, lineno) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
ast['begin']['global'][var] = value
# Handle flipping debug and depth vars.
if var == 'debug':
if value.lower() == 'true':
value = True # depends on [control=['if'], data=[]]
else:
value = False # depends on [control=['if'], data=[]]
elif var == 'depth':
try:
value = int(value) # depends on [control=['try'], data=[]]
except:
self.warn("Failed to set 'depth' because the value isn't a number!", filename, lineno) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif var == 'strict':
if value.lower() == 'true':
value = True # depends on [control=['if'], data=[]]
else:
value = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif type == 'var':
# Bot variables
self.say('\tSet bot variable ' + var + ' = ' + value)
if value == '<undef>':
try:
del ast['begin']['var'][var] # depends on [control=['try'], data=[]]
except:
self.warn('Failed to delete missing bot variable', filename, lineno) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
ast['begin']['var'][var] = value # depends on [control=['if'], data=[]]
elif type == 'array':
# Arrays
self.say('\tArray ' + var + ' = ' + value)
if value == '<undef>':
try:
del ast['begin']['array'][var] # depends on [control=['try'], data=[]]
except:
self.warn('Failed to delete missing array', filename, lineno) # depends on [control=['except'], data=[]]
continue # depends on [control=['if'], data=[]]
# Did this have multiple parts?
parts = value.split('<crlf>')
# Process each line of array data.
fields = []
for val in parts:
if '|' in val:
fields.extend(val.split('|')) # depends on [control=['if'], data=['val']]
else:
fields.extend(re.split(RE.ws, val)) # depends on [control=['for'], data=['val']]
# Convert any remaining '\s' escape codes into spaces.
for f in fields:
f = f.replace('\\s', ' ') # depends on [control=['for'], data=['f']]
ast['begin']['array'][var] = fields # depends on [control=['if'], data=[]]
elif type == 'sub':
# Substitutions
self.say('\tSubstitution ' + var + ' => ' + value)
if value == '<undef>':
try:
del ast['begin']['sub'][var] # depends on [control=['try'], data=[]]
except:
self.warn('Failed to delete missing substitution', filename, lineno) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
ast['begin']['sub'][var] = value # depends on [control=['if'], data=[]]
elif type == 'person':
# Person Substitutions
self.say('\tPerson Substitution ' + var + ' => ' + value)
if value == '<undef>':
try:
del ast['begin']['person'][var] # depends on [control=['try'], data=[]]
except:
self.warn('Failed to delete missing person substitution', filename, lineno) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
ast['begin']['person'][var] = value # depends on [control=['if'], data=[]]
else:
self.warn("Unknown definition type '" + type + "'", filename, lineno) # depends on [control=['if'], data=[]]
elif cmd == '>':
# > LABEL
temp = re.split(RE.ws, line)
type = temp[0]
name = ''
fields = []
if len(temp) >= 2:
name = temp[1] # depends on [control=['if'], data=[]]
if len(temp) >= 3:
fields = temp[2:] # depends on [control=['if'], data=[]]
# Handle the label types.
if type == 'begin':
# The BEGIN block.
self.say('\tFound the BEGIN block.')
type = 'topic'
name = '__begin__' # depends on [control=['if'], data=['type']]
if type == 'topic':
# Starting a new topic.
self.say('\tSet topic to ' + name)
curtrig = None
topic = name
# Initialize the topic tree.
self._init_topic(ast['topics'], topic)
# Does this topic include or inherit another one?
mode = '' # or 'inherits' or 'includes'
if len(fields) >= 2:
for field in fields:
if field == 'includes':
mode = 'includes' # depends on [control=['if'], data=[]]
elif field == 'inherits':
mode = 'inherits' # depends on [control=['if'], data=[]]
elif mode != '':
# This topic is either inherited or included.
if mode == 'includes':
ast['topics'][name]['includes'][field] = 1 # depends on [control=['if'], data=[]]
else:
ast['topics'][name]['inherits'][field] = 1 # depends on [control=['if'], data=['mode']] # depends on [control=['for'], data=['field']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif type == 'object':
# If a field was provided, it should be the programming
# language.
lang = None
if len(fields) > 0:
lang = fields[0].lower() # depends on [control=['if'], data=[]]
# Only try to parse a language we support.
curtrig = None
if lang is None:
self.warn('Trying to parse unknown programming language', filename, lineno)
lang = 'python' # Assume it's Python. # depends on [control=['if'], data=['lang']]
# We have a handler, so start loading the code.
objname = name
objlang = lang
objbuf = []
inobj = True # depends on [control=['if'], data=[]]
else:
self.warn("Unknown label type '" + type + "'", filename, lineno) # depends on [control=['if'], data=[]]
elif cmd == '<':
# < LABEL
type = line
if type == 'begin' or type == 'topic':
self.say('\tEnd topic label.')
topic = 'random' # depends on [control=['if'], data=[]]
elif type == 'object':
self.say('\tEnd object label.')
inobj = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif cmd == '+':
# + TRIGGER
self.say('\tTrigger pattern: ' + line)
# Initialize the topic tree.
self._init_topic(ast['topics'], topic)
curtrig = {'trigger': line, 'reply': [], 'condition': [], 'redirect': None, 'previous': isThat}
ast['topics'][topic]['triggers'].append(curtrig) # depends on [control=['if'], data=[]]
elif cmd == '-':
# - REPLY
if curtrig is None:
self.warn('Response found before trigger', filename, lineno)
continue # depends on [control=['if'], data=[]]
self.say('\tResponse: ' + line)
curtrig['reply'].append(line.strip()) # depends on [control=['if'], data=[]]
elif cmd == '%':
# % PREVIOUS
pass # This was handled above. # depends on [control=['if'], data=[]]
elif cmd == '^':
# ^ CONTINUE
pass # This was handled above. # depends on [control=['if'], data=[]]
elif cmd == '@':
# @ REDIRECT
if curtrig is None:
self.warn('Redirect found before trigger', filename, lineno)
continue # depends on [control=['if'], data=[]]
self.say('\tRedirect: ' + line)
curtrig['redirect'] = line.strip() # depends on [control=['if'], data=[]]
elif cmd == '*':
# * CONDITION
if curtrig is None:
self.warn('Condition found before trigger', filename, lineno)
continue # depends on [control=['if'], data=[]]
self.say('\tAdding condition: ' + line)
curtrig['condition'].append(line.strip()) # depends on [control=['if'], data=[]]
else:
self.warn('Unrecognized command "' + cmd + '"', filename, lineno)
continue # depends on [control=['for'], data=[]]
return ast |
def query(self, table_name, hash_key_value, range_key_conditions=None,
attributes_to_get=None, limit=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
object_hook=None):
"""
Perform a query of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to query.
:type hash_key_value: dict
:param key: A DynamoDB-style HashKeyValue.
:type range_key_conditions: dict
:param range_key_conditions: A Python version of the
RangeKeyConditions data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to return.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name,
'HashKeyValue': hash_key_value}
if range_key_conditions:
data['RangeKeyCondition'] = range_key_conditions
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if limit:
data['Limit'] = limit
if consistent_read:
data['ConsistentRead'] = True
if scan_index_forward:
data['ScanIndexForward'] = True
else:
data['ScanIndexForward'] = False
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key
json_input = json.dumps(data)
return self.make_request('Query', json_input,
object_hook=object_hook) | def function[query, parameter[self, table_name, hash_key_value, range_key_conditions, attributes_to_get, limit, consistent_read, scan_index_forward, exclusive_start_key, object_hook]]:
constant[
Perform a query of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to query.
:type hash_key_value: dict
:param key: A DynamoDB-style HashKeyValue.
:type range_key_conditions: dict
:param range_key_conditions: A Python version of the
RangeKeyConditions data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to return.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b253cf70>, <ast.Constant object at 0x7da1b253c340>], [<ast.Name object at 0x7da1b253ec80>, <ast.Name object at 0x7da1b253ee30>]]
if name[range_key_conditions] begin[:]
call[name[data]][constant[RangeKeyCondition]] assign[=] name[range_key_conditions]
if name[attributes_to_get] begin[:]
call[name[data]][constant[AttributesToGet]] assign[=] name[attributes_to_get]
if name[limit] begin[:]
call[name[data]][constant[Limit]] assign[=] name[limit]
if name[consistent_read] begin[:]
call[name[data]][constant[ConsistentRead]] assign[=] constant[True]
if name[scan_index_forward] begin[:]
call[name[data]][constant[ScanIndexForward]] assign[=] constant[True]
if name[exclusive_start_key] begin[:]
call[name[data]][constant[ExclusiveStartKey]] assign[=] name[exclusive_start_key]
variable[json_input] assign[=] call[name[json].dumps, parameter[name[data]]]
return[call[name[self].make_request, parameter[constant[Query], name[json_input]]]] | keyword[def] identifier[query] ( identifier[self] , identifier[table_name] , identifier[hash_key_value] , identifier[range_key_conditions] = keyword[None] ,
identifier[attributes_to_get] = keyword[None] , identifier[limit] = keyword[None] , identifier[consistent_read] = keyword[False] ,
identifier[scan_index_forward] = keyword[True] , identifier[exclusive_start_key] = keyword[None] ,
identifier[object_hook] = keyword[None] ):
literal[string]
identifier[data] ={ literal[string] : identifier[table_name] ,
literal[string] : identifier[hash_key_value] }
keyword[if] identifier[range_key_conditions] :
identifier[data] [ literal[string] ]= identifier[range_key_conditions]
keyword[if] identifier[attributes_to_get] :
identifier[data] [ literal[string] ]= identifier[attributes_to_get]
keyword[if] identifier[limit] :
identifier[data] [ literal[string] ]= identifier[limit]
keyword[if] identifier[consistent_read] :
identifier[data] [ literal[string] ]= keyword[True]
keyword[if] identifier[scan_index_forward] :
identifier[data] [ literal[string] ]= keyword[True]
keyword[else] :
identifier[data] [ literal[string] ]= keyword[False]
keyword[if] identifier[exclusive_start_key] :
identifier[data] [ literal[string] ]= identifier[exclusive_start_key]
identifier[json_input] = identifier[json] . identifier[dumps] ( identifier[data] )
keyword[return] identifier[self] . identifier[make_request] ( literal[string] , identifier[json_input] ,
identifier[object_hook] = identifier[object_hook] ) | def query(self, table_name, hash_key_value, range_key_conditions=None, attributes_to_get=None, limit=None, consistent_read=False, scan_index_forward=True, exclusive_start_key=None, object_hook=None):
"""
Perform a query of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to query.
:type hash_key_value: dict
:param key: A DynamoDB-style HashKeyValue.
:type range_key_conditions: dict
:param range_key_conditions: A Python version of the
RangeKeyConditions data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to return.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name, 'HashKeyValue': hash_key_value}
if range_key_conditions:
data['RangeKeyCondition'] = range_key_conditions # depends on [control=['if'], data=[]]
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get # depends on [control=['if'], data=[]]
if limit:
data['Limit'] = limit # depends on [control=['if'], data=[]]
if consistent_read:
data['ConsistentRead'] = True # depends on [control=['if'], data=[]]
if scan_index_forward:
data['ScanIndexForward'] = True # depends on [control=['if'], data=[]]
else:
data['ScanIndexForward'] = False
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key # depends on [control=['if'], data=[]]
json_input = json.dumps(data)
return self.make_request('Query', json_input, object_hook=object_hook) |
def tobinarray(self, start=None, end=None, pad=_DEPRECATED, size=None):
''' Convert this object to binary form as array. If start and end
unspecified, they will be inferred from the data.
@param start start address of output bytes.
@param end end address of output bytes (inclusive).
@param pad [DEPRECATED PARAMETER, please use self.padding instead]
fill empty spaces with this value
(if pad is None then this method uses self.padding).
@param size size of the block, used with start or end parameter.
@return array of unsigned char data.
'''
if not isinstance(pad, _DeprecatedParam):
print ("IntelHex.tobinarray: 'pad' parameter is deprecated.")
if pad is not None:
print ("Please, use IntelHex.padding attribute instead.")
else:
print ("Please, don't pass it explicitly.")
print ("Use syntax like this: ih.tobinarray(start=xxx, end=yyy, size=zzz)")
else:
pad = None
return self._tobinarray_really(start, end, pad, size) | def function[tobinarray, parameter[self, start, end, pad, size]]:
constant[ Convert this object to binary form as array. If start and end
unspecified, they will be inferred from the data.
@param start start address of output bytes.
@param end end address of output bytes (inclusive).
@param pad [DEPRECATED PARAMETER, please use self.padding instead]
fill empty spaces with this value
(if pad is None then this method uses self.padding).
@param size size of the block, used with start or end parameter.
@return array of unsigned char data.
]
if <ast.UnaryOp object at 0x7da20e9b31f0> begin[:]
call[name[print], parameter[constant[IntelHex.tobinarray: 'pad' parameter is deprecated.]]]
if compare[name[pad] is_not constant[None]] begin[:]
call[name[print], parameter[constant[Please, use IntelHex.padding attribute instead.]]]
return[call[name[self]._tobinarray_really, parameter[name[start], name[end], name[pad], name[size]]]] | keyword[def] identifier[tobinarray] ( identifier[self] , identifier[start] = keyword[None] , identifier[end] = keyword[None] , identifier[pad] = identifier[_DEPRECATED] , identifier[size] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[pad] , identifier[_DeprecatedParam] ):
identifier[print] ( literal[string] )
keyword[if] identifier[pad] keyword[is] keyword[not] keyword[None] :
identifier[print] ( literal[string] )
keyword[else] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[else] :
identifier[pad] = keyword[None]
keyword[return] identifier[self] . identifier[_tobinarray_really] ( identifier[start] , identifier[end] , identifier[pad] , identifier[size] ) | def tobinarray(self, start=None, end=None, pad=_DEPRECATED, size=None):
""" Convert this object to binary form as array. If start and end
unspecified, they will be inferred from the data.
@param start start address of output bytes.
@param end end address of output bytes (inclusive).
@param pad [DEPRECATED PARAMETER, please use self.padding instead]
fill empty spaces with this value
(if pad is None then this method uses self.padding).
@param size size of the block, used with start or end parameter.
@return array of unsigned char data.
"""
if not isinstance(pad, _DeprecatedParam):
print("IntelHex.tobinarray: 'pad' parameter is deprecated.")
if pad is not None:
print('Please, use IntelHex.padding attribute instead.') # depends on [control=['if'], data=[]]
else:
print("Please, don't pass it explicitly.")
print('Use syntax like this: ih.tobinarray(start=xxx, end=yyy, size=zzz)') # depends on [control=['if'], data=[]]
else:
pad = None
return self._tobinarray_really(start, end, pad, size) |
def get_account_info(self, fields=None):
""" Get information about a Telegraph account
:param fields: List of account fields to return. Available fields:
short_name, author_name, author_url, auth_url, page_count
Default: [“short_name”,“author_name”,“author_url”]
"""
return self._telegraph.method('getAccountInfo', {
'fields': json.dumps(fields) if fields else None
}) | def function[get_account_info, parameter[self, fields]]:
constant[ Get information about a Telegraph account
:param fields: List of account fields to return. Available fields:
short_name, author_name, author_url, auth_url, page_count
Default: [“short_name”,“author_name”,“author_url”]
]
return[call[name[self]._telegraph.method, parameter[constant[getAccountInfo], dictionary[[<ast.Constant object at 0x7da1b0d8d3f0>], [<ast.IfExp object at 0x7da1b0d8c790>]]]]] | keyword[def] identifier[get_account_info] ( identifier[self] , identifier[fields] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_telegraph] . identifier[method] ( literal[string] ,{
literal[string] : identifier[json] . identifier[dumps] ( identifier[fields] ) keyword[if] identifier[fields] keyword[else] keyword[None]
}) | def get_account_info(self, fields=None):
""" Get information about a Telegraph account
:param fields: List of account fields to return. Available fields:
short_name, author_name, author_url, auth_url, page_count
Default: [“short_name”,“author_name”,“author_url”]
"""
return self._telegraph.method('getAccountInfo', {'fields': json.dumps(fields) if fields else None}) |
def consume(self, limit=None):
"""Returns an iterator that waits for one message at a time."""
for total_message_count in count():
if limit and total_message_count >= limit:
raise StopIteration
if not self.channel.is_open:
raise StopIteration
self.channel.conn.drain_events()
yield True | def function[consume, parameter[self, limit]]:
constant[Returns an iterator that waits for one message at a time.]
for taget[name[total_message_count]] in starred[call[name[count], parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b10c78b0> begin[:]
<ast.Raise object at 0x7da1b10c7160>
if <ast.UnaryOp object at 0x7da1b10c7610> begin[:]
<ast.Raise object at 0x7da1b10c5930>
call[name[self].channel.conn.drain_events, parameter[]]
<ast.Yield object at 0x7da1b10c67d0> | keyword[def] identifier[consume] ( identifier[self] , identifier[limit] = keyword[None] ):
literal[string]
keyword[for] identifier[total_message_count] keyword[in] identifier[count] ():
keyword[if] identifier[limit] keyword[and] identifier[total_message_count] >= identifier[limit] :
keyword[raise] identifier[StopIteration]
keyword[if] keyword[not] identifier[self] . identifier[channel] . identifier[is_open] :
keyword[raise] identifier[StopIteration]
identifier[self] . identifier[channel] . identifier[conn] . identifier[drain_events] ()
keyword[yield] keyword[True] | def consume(self, limit=None):
"""Returns an iterator that waits for one message at a time."""
for total_message_count in count():
if limit and total_message_count >= limit:
raise StopIteration # depends on [control=['if'], data=[]]
if not self.channel.is_open:
raise StopIteration # depends on [control=['if'], data=[]]
self.channel.conn.drain_events()
yield True # depends on [control=['for'], data=['total_message_count']] |
def getEstimatedNodeCounts(self, queuedJobShapes, currentNodeCounts):
"""
Given the resource requirements of queued jobs and the current size of the cluster, returns
a dict mapping from nodeShape to the number of nodes we want in the cluster right now.
"""
nodesToRunQueuedJobs = binPacking(jobShapes=queuedJobShapes,
nodeShapes=self.nodeShapes,
goalTime=self.targetTime)
estimatedNodeCounts = {}
for nodeShape in self.nodeShapes:
nodeType = self.nodeShapeToType[nodeShape]
logger.debug("Nodes of type %s to run queued jobs = "
"%s" % (nodeType, nodesToRunQueuedJobs[nodeShape]))
# Actual calculation of the estimated number of nodes required
estimatedNodeCount = 0 if nodesToRunQueuedJobs[nodeShape] == 0 \
else max(1, self._round(nodesToRunQueuedJobs[nodeShape]))
logger.debug("Estimating %i nodes of shape %s" % (estimatedNodeCount, nodeShape))
# Use inertia parameter to smooth out fluctuations according to an exponentially
# weighted moving average.
estimatedNodeCount = self.smoothEstimate(nodeShape, estimatedNodeCount)
# If we're scaling a non-preemptable node type, we need to see if we have a
# deficit of preemptable nodes of this type that we should compensate for.
if not nodeShape.preemptable:
compensation = self.config.preemptableCompensation
assert 0.0 <= compensation <= 1.0
# The number of nodes we provision as compensation for missing preemptable
# nodes is the product of the deficit (the number of preemptable nodes we did
# _not_ allocate) and configuration preference.
compensationNodes = self._round(self.preemptableNodeDeficit[nodeType] * compensation)
if compensationNodes > 0:
logger.debug('Adding %d non-preemptable nodes of type %s to compensate for a '
'deficit of %d preemptable ones.', compensationNodes,
nodeType,
self.preemptableNodeDeficit[nodeType])
estimatedNodeCount += compensationNodes
logger.debug("Currently %i nodes of type %s in cluster" % (currentNodeCounts[nodeShape],
nodeType))
if self.leader.toilMetrics:
self.leader.toilMetrics.logClusterSize(nodeType=nodeType,
currentSize=currentNodeCounts[nodeShape],
desiredSize=estimatedNodeCount)
# Bound number using the max and min node parameters
if estimatedNodeCount > self.maxNodes[nodeShape]:
logger.debug('Limiting the estimated number of necessary %s (%s) to the '
'configured maximum (%s).', nodeType,
estimatedNodeCount,
self.maxNodes[nodeShape])
estimatedNodeCount = self.maxNodes[nodeShape]
elif estimatedNodeCount < self.minNodes[nodeShape]:
logger.debug('Raising the estimated number of necessary %s (%s) to the '
'configured minimum (%s).', nodeType,
estimatedNodeCount,
self.minNodes[nodeShape])
estimatedNodeCount = self.minNodes[nodeShape]
estimatedNodeCounts[nodeShape] = estimatedNodeCount
return estimatedNodeCounts | def function[getEstimatedNodeCounts, parameter[self, queuedJobShapes, currentNodeCounts]]:
constant[
Given the resource requirements of queued jobs and the current size of the cluster, returns
a dict mapping from nodeShape to the number of nodes we want in the cluster right now.
]
variable[nodesToRunQueuedJobs] assign[=] call[name[binPacking], parameter[]]
variable[estimatedNodeCounts] assign[=] dictionary[[], []]
for taget[name[nodeShape]] in starred[name[self].nodeShapes] begin[:]
variable[nodeType] assign[=] call[name[self].nodeShapeToType][name[nodeShape]]
call[name[logger].debug, parameter[binary_operation[constant[Nodes of type %s to run queued jobs = %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc996f0>, <ast.Subscript object at 0x7da18dc98880>]]]]]
variable[estimatedNodeCount] assign[=] <ast.IfExp object at 0x7da18dc9b190>
call[name[logger].debug, parameter[binary_operation[constant[Estimating %i nodes of shape %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc9bd90>, <ast.Name object at 0x7da18dc9a650>]]]]]
variable[estimatedNodeCount] assign[=] call[name[self].smoothEstimate, parameter[name[nodeShape], name[estimatedNodeCount]]]
if <ast.UnaryOp object at 0x7da18dc98550> begin[:]
variable[compensation] assign[=] name[self].config.preemptableCompensation
assert[compare[constant[0.0] less_or_equal[<=] name[compensation]]]
variable[compensationNodes] assign[=] call[name[self]._round, parameter[binary_operation[call[name[self].preemptableNodeDeficit][name[nodeType]] * name[compensation]]]]
if compare[name[compensationNodes] greater[>] constant[0]] begin[:]
call[name[logger].debug, parameter[constant[Adding %d non-preemptable nodes of type %s to compensate for a deficit of %d preemptable ones.], name[compensationNodes], name[nodeType], call[name[self].preemptableNodeDeficit][name[nodeType]]]]
<ast.AugAssign object at 0x7da18dc99ff0>
call[name[logger].debug, parameter[binary_operation[constant[Currently %i nodes of type %s in cluster] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18dc9ad10>, <ast.Name object at 0x7da18dc99360>]]]]]
if name[self].leader.toilMetrics begin[:]
call[name[self].leader.toilMetrics.logClusterSize, parameter[]]
if compare[name[estimatedNodeCount] greater[>] call[name[self].maxNodes][name[nodeShape]]] begin[:]
call[name[logger].debug, parameter[constant[Limiting the estimated number of necessary %s (%s) to the configured maximum (%s).], name[nodeType], name[estimatedNodeCount], call[name[self].maxNodes][name[nodeShape]]]]
variable[estimatedNodeCount] assign[=] call[name[self].maxNodes][name[nodeShape]]
call[name[estimatedNodeCounts]][name[nodeShape]] assign[=] name[estimatedNodeCount]
return[name[estimatedNodeCounts]] | keyword[def] identifier[getEstimatedNodeCounts] ( identifier[self] , identifier[queuedJobShapes] , identifier[currentNodeCounts] ):
literal[string]
identifier[nodesToRunQueuedJobs] = identifier[binPacking] ( identifier[jobShapes] = identifier[queuedJobShapes] ,
identifier[nodeShapes] = identifier[self] . identifier[nodeShapes] ,
identifier[goalTime] = identifier[self] . identifier[targetTime] )
identifier[estimatedNodeCounts] ={}
keyword[for] identifier[nodeShape] keyword[in] identifier[self] . identifier[nodeShapes] :
identifier[nodeType] = identifier[self] . identifier[nodeShapeToType] [ identifier[nodeShape] ]
identifier[logger] . identifier[debug] ( literal[string]
literal[string] %( identifier[nodeType] , identifier[nodesToRunQueuedJobs] [ identifier[nodeShape] ]))
identifier[estimatedNodeCount] = literal[int] keyword[if] identifier[nodesToRunQueuedJobs] [ identifier[nodeShape] ]== literal[int] keyword[else] identifier[max] ( literal[int] , identifier[self] . identifier[_round] ( identifier[nodesToRunQueuedJobs] [ identifier[nodeShape] ]))
identifier[logger] . identifier[debug] ( literal[string] %( identifier[estimatedNodeCount] , identifier[nodeShape] ))
identifier[estimatedNodeCount] = identifier[self] . identifier[smoothEstimate] ( identifier[nodeShape] , identifier[estimatedNodeCount] )
keyword[if] keyword[not] identifier[nodeShape] . identifier[preemptable] :
identifier[compensation] = identifier[self] . identifier[config] . identifier[preemptableCompensation]
keyword[assert] literal[int] <= identifier[compensation] <= literal[int]
identifier[compensationNodes] = identifier[self] . identifier[_round] ( identifier[self] . identifier[preemptableNodeDeficit] [ identifier[nodeType] ]* identifier[compensation] )
keyword[if] identifier[compensationNodes] > literal[int] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] , identifier[compensationNodes] ,
identifier[nodeType] ,
identifier[self] . identifier[preemptableNodeDeficit] [ identifier[nodeType] ])
identifier[estimatedNodeCount] += identifier[compensationNodes]
identifier[logger] . identifier[debug] ( literal[string] %( identifier[currentNodeCounts] [ identifier[nodeShape] ],
identifier[nodeType] ))
keyword[if] identifier[self] . identifier[leader] . identifier[toilMetrics] :
identifier[self] . identifier[leader] . identifier[toilMetrics] . identifier[logClusterSize] ( identifier[nodeType] = identifier[nodeType] ,
identifier[currentSize] = identifier[currentNodeCounts] [ identifier[nodeShape] ],
identifier[desiredSize] = identifier[estimatedNodeCount] )
keyword[if] identifier[estimatedNodeCount] > identifier[self] . identifier[maxNodes] [ identifier[nodeShape] ]:
identifier[logger] . identifier[debug] ( literal[string]
literal[string] , identifier[nodeType] ,
identifier[estimatedNodeCount] ,
identifier[self] . identifier[maxNodes] [ identifier[nodeShape] ])
identifier[estimatedNodeCount] = identifier[self] . identifier[maxNodes] [ identifier[nodeShape] ]
keyword[elif] identifier[estimatedNodeCount] < identifier[self] . identifier[minNodes] [ identifier[nodeShape] ]:
identifier[logger] . identifier[debug] ( literal[string]
literal[string] , identifier[nodeType] ,
identifier[estimatedNodeCount] ,
identifier[self] . identifier[minNodes] [ identifier[nodeShape] ])
identifier[estimatedNodeCount] = identifier[self] . identifier[minNodes] [ identifier[nodeShape] ]
identifier[estimatedNodeCounts] [ identifier[nodeShape] ]= identifier[estimatedNodeCount]
keyword[return] identifier[estimatedNodeCounts] | def getEstimatedNodeCounts(self, queuedJobShapes, currentNodeCounts):
"""
Given the resource requirements of queued jobs and the current size of the cluster, returns
a dict mapping from nodeShape to the number of nodes we want in the cluster right now.
"""
nodesToRunQueuedJobs = binPacking(jobShapes=queuedJobShapes, nodeShapes=self.nodeShapes, goalTime=self.targetTime)
estimatedNodeCounts = {}
for nodeShape in self.nodeShapes:
nodeType = self.nodeShapeToType[nodeShape]
logger.debug('Nodes of type %s to run queued jobs = %s' % (nodeType, nodesToRunQueuedJobs[nodeShape]))
# Actual calculation of the estimated number of nodes required
estimatedNodeCount = 0 if nodesToRunQueuedJobs[nodeShape] == 0 else max(1, self._round(nodesToRunQueuedJobs[nodeShape]))
logger.debug('Estimating %i nodes of shape %s' % (estimatedNodeCount, nodeShape))
# Use inertia parameter to smooth out fluctuations according to an exponentially
# weighted moving average.
estimatedNodeCount = self.smoothEstimate(nodeShape, estimatedNodeCount) # If we're scaling a non-preemptable node type, we need to see if we have a
# deficit of preemptable nodes of this type that we should compensate for.
if not nodeShape.preemptable:
compensation = self.config.preemptableCompensation
assert 0.0 <= compensation <= 1.0
# The number of nodes we provision as compensation for missing preemptable
# nodes is the product of the deficit (the number of preemptable nodes we did
# _not_ allocate) and configuration preference.
compensationNodes = self._round(self.preemptableNodeDeficit[nodeType] * compensation)
if compensationNodes > 0:
logger.debug('Adding %d non-preemptable nodes of type %s to compensate for a deficit of %d preemptable ones.', compensationNodes, nodeType, self.preemptableNodeDeficit[nodeType]) # depends on [control=['if'], data=['compensationNodes']]
estimatedNodeCount += compensationNodes # depends on [control=['if'], data=[]]
logger.debug('Currently %i nodes of type %s in cluster' % (currentNodeCounts[nodeShape], nodeType))
if self.leader.toilMetrics:
self.leader.toilMetrics.logClusterSize(nodeType=nodeType, currentSize=currentNodeCounts[nodeShape], desiredSize=estimatedNodeCount) # depends on [control=['if'], data=[]]
# Bound number using the max and min node parameters
if estimatedNodeCount > self.maxNodes[nodeShape]:
logger.debug('Limiting the estimated number of necessary %s (%s) to the configured maximum (%s).', nodeType, estimatedNodeCount, self.maxNodes[nodeShape])
estimatedNodeCount = self.maxNodes[nodeShape] # depends on [control=['if'], data=['estimatedNodeCount']]
elif estimatedNodeCount < self.minNodes[nodeShape]:
logger.debug('Raising the estimated number of necessary %s (%s) to the configured minimum (%s).', nodeType, estimatedNodeCount, self.minNodes[nodeShape])
estimatedNodeCount = self.minNodes[nodeShape] # depends on [control=['if'], data=['estimatedNodeCount']]
estimatedNodeCounts[nodeShape] = estimatedNodeCount # depends on [control=['for'], data=['nodeShape']]
return estimatedNodeCounts |
def select_regexp_char(char):
"""
Select correct regex depending the char
"""
regexp = '{}'.format(char)
if not isinstance(char, str) and not isinstance(char, int):
regexp = ''
if isinstance(char, str) and not char.isalpha() and not char.isdigit():
regexp = r"\{}".format(char)
return regexp | def function[select_regexp_char, parameter[char]]:
constant[
Select correct regex depending the char
]
variable[regexp] assign[=] call[constant[{}].format, parameter[name[char]]]
if <ast.BoolOp object at 0x7da18f00f5e0> begin[:]
variable[regexp] assign[=] constant[]
if <ast.BoolOp object at 0x7da18f00de10> begin[:]
variable[regexp] assign[=] call[constant[\{}].format, parameter[name[char]]]
return[name[regexp]] | keyword[def] identifier[select_regexp_char] ( identifier[char] ):
literal[string]
identifier[regexp] = literal[string] . identifier[format] ( identifier[char] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[char] , identifier[str] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[char] , identifier[int] ):
identifier[regexp] = literal[string]
keyword[if] identifier[isinstance] ( identifier[char] , identifier[str] ) keyword[and] keyword[not] identifier[char] . identifier[isalpha] () keyword[and] keyword[not] identifier[char] . identifier[isdigit] ():
identifier[regexp] = literal[string] . identifier[format] ( identifier[char] )
keyword[return] identifier[regexp] | def select_regexp_char(char):
"""
Select correct regex depending the char
"""
regexp = '{}'.format(char)
if not isinstance(char, str) and (not isinstance(char, int)):
regexp = '' # depends on [control=['if'], data=[]]
if isinstance(char, str) and (not char.isalpha()) and (not char.isdigit()):
regexp = '\\{}'.format(char) # depends on [control=['if'], data=[]]
return regexp |
def _nodes_slots_to_slots_nodes(self, mapping):
"""
Converts a mapping of
{id: <node>, slots: (slot1, slot2)}
to
{slot1: <node>, slot2: <node>}
Operation is expensive so use with caution
"""
out = {}
for node in mapping:
for slot in node['slots']:
out[str(slot)] = node['id']
return out | def function[_nodes_slots_to_slots_nodes, parameter[self, mapping]]:
constant[
Converts a mapping of
{id: <node>, slots: (slot1, slot2)}
to
{slot1: <node>, slot2: <node>}
Operation is expensive so use with caution
]
variable[out] assign[=] dictionary[[], []]
for taget[name[node]] in starred[name[mapping]] begin[:]
for taget[name[slot]] in starred[call[name[node]][constant[slots]]] begin[:]
call[name[out]][call[name[str], parameter[name[slot]]]] assign[=] call[name[node]][constant[id]]
return[name[out]] | keyword[def] identifier[_nodes_slots_to_slots_nodes] ( identifier[self] , identifier[mapping] ):
literal[string]
identifier[out] ={}
keyword[for] identifier[node] keyword[in] identifier[mapping] :
keyword[for] identifier[slot] keyword[in] identifier[node] [ literal[string] ]:
identifier[out] [ identifier[str] ( identifier[slot] )]= identifier[node] [ literal[string] ]
keyword[return] identifier[out] | def _nodes_slots_to_slots_nodes(self, mapping):
"""
Converts a mapping of
{id: <node>, slots: (slot1, slot2)}
to
{slot1: <node>, slot2: <node>}
Operation is expensive so use with caution
"""
out = {}
for node in mapping:
for slot in node['slots']:
out[str(slot)] = node['id'] # depends on [control=['for'], data=['slot']] # depends on [control=['for'], data=['node']]
return out |
def create_segments(self, segments):
"""Enqueue segment creates"""
for segment in segments:
s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE,
a_const.CREATE)
self.provision_queue.put(s_res) | def function[create_segments, parameter[self, segments]]:
constant[Enqueue segment creates]
for taget[name[segment]] in starred[name[segments]] begin[:]
variable[s_res] assign[=] call[name[MechResource], parameter[call[name[segment]][constant[id]], name[a_const].SEGMENT_RESOURCE, name[a_const].CREATE]]
call[name[self].provision_queue.put, parameter[name[s_res]]] | keyword[def] identifier[create_segments] ( identifier[self] , identifier[segments] ):
literal[string]
keyword[for] identifier[segment] keyword[in] identifier[segments] :
identifier[s_res] = identifier[MechResource] ( identifier[segment] [ literal[string] ], identifier[a_const] . identifier[SEGMENT_RESOURCE] ,
identifier[a_const] . identifier[CREATE] )
identifier[self] . identifier[provision_queue] . identifier[put] ( identifier[s_res] ) | def create_segments(self, segments):
"""Enqueue segment creates"""
for segment in segments:
s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE, a_const.CREATE)
self.provision_queue.put(s_res) # depends on [control=['for'], data=['segment']] |
def convert_datetime_to_utc(dt: PotentialDatetimeType) -> DateTime:
"""
Convert date/time with timezone to UTC (with UTC timezone).
"""
dt = coerce_to_pendulum(dt)
tz = get_tz_utc()
return dt.in_tz(tz) | def function[convert_datetime_to_utc, parameter[dt]]:
constant[
Convert date/time with timezone to UTC (with UTC timezone).
]
variable[dt] assign[=] call[name[coerce_to_pendulum], parameter[name[dt]]]
variable[tz] assign[=] call[name[get_tz_utc], parameter[]]
return[call[name[dt].in_tz, parameter[name[tz]]]] | keyword[def] identifier[convert_datetime_to_utc] ( identifier[dt] : identifier[PotentialDatetimeType] )-> identifier[DateTime] :
literal[string]
identifier[dt] = identifier[coerce_to_pendulum] ( identifier[dt] )
identifier[tz] = identifier[get_tz_utc] ()
keyword[return] identifier[dt] . identifier[in_tz] ( identifier[tz] ) | def convert_datetime_to_utc(dt: PotentialDatetimeType) -> DateTime:
"""
Convert date/time with timezone to UTC (with UTC timezone).
"""
dt = coerce_to_pendulum(dt)
tz = get_tz_utc()
return dt.in_tz(tz) |
def _peek_table(self): # type: () -> Tuple[bool, str]
"""
Peeks ahead non-intrusively by cloning then restoring the
initial state of the parser.
Returns the name of the table about to be parsed,
as well as whether it is part of an AoT.
"""
# we always want to restore after exiting this scope
with self._state(save_marker=True, restore=True):
if self._current != "[":
raise self.parse_error(
InternalParserError,
"_peek_table() entered on non-bracket character",
)
# AoT
self.inc()
is_aot = False
if self._current == "[":
self.inc()
is_aot = True
self.mark()
while self._current != "]" and self.inc():
table_name = self.extract()
return is_aot, table_name | def function[_peek_table, parameter[self]]:
constant[
Peeks ahead non-intrusively by cloning then restoring the
initial state of the parser.
Returns the name of the table about to be parsed,
as well as whether it is part of an AoT.
]
with call[name[self]._state, parameter[]] begin[:]
if compare[name[self]._current not_equal[!=] constant[[]] begin[:]
<ast.Raise object at 0x7da1b2040730>
call[name[self].inc, parameter[]]
variable[is_aot] assign[=] constant[False]
if compare[name[self]._current equal[==] constant[[]] begin[:]
call[name[self].inc, parameter[]]
variable[is_aot] assign[=] constant[True]
call[name[self].mark, parameter[]]
while <ast.BoolOp object at 0x7da1b2041f90> begin[:]
variable[table_name] assign[=] call[name[self].extract, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b2042aa0>, <ast.Name object at 0x7da1b2042980>]]] | keyword[def] identifier[_peek_table] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[_state] ( identifier[save_marker] = keyword[True] , identifier[restore] = keyword[True] ):
keyword[if] identifier[self] . identifier[_current] != literal[string] :
keyword[raise] identifier[self] . identifier[parse_error] (
identifier[InternalParserError] ,
literal[string] ,
)
identifier[self] . identifier[inc] ()
identifier[is_aot] = keyword[False]
keyword[if] identifier[self] . identifier[_current] == literal[string] :
identifier[self] . identifier[inc] ()
identifier[is_aot] = keyword[True]
identifier[self] . identifier[mark] ()
keyword[while] identifier[self] . identifier[_current] != literal[string] keyword[and] identifier[self] . identifier[inc] ():
identifier[table_name] = identifier[self] . identifier[extract] ()
keyword[return] identifier[is_aot] , identifier[table_name] | def _peek_table(self): # type: () -> Tuple[bool, str]
'\n Peeks ahead non-intrusively by cloning then restoring the\n initial state of the parser.\n\n Returns the name of the table about to be parsed,\n as well as whether it is part of an AoT.\n '
# we always want to restore after exiting this scope
with self._state(save_marker=True, restore=True):
if self._current != '[':
raise self.parse_error(InternalParserError, '_peek_table() entered on non-bracket character') # depends on [control=['if'], data=[]]
# AoT
self.inc()
is_aot = False
if self._current == '[':
self.inc()
is_aot = True # depends on [control=['if'], data=[]]
self.mark()
while self._current != ']' and self.inc():
table_name = self.extract() # depends on [control=['while'], data=[]]
return (is_aot, table_name) # depends on [control=['with'], data=[]] |
def token(self, adata, load):
'''
Determine if token auth is valid and yield the adata
'''
try:
token = self.loadauth.get_tok(load['token'])
except Exception as exc:
log.error('Exception occurred when generating auth token: %s', exc)
yield {}
if not token:
log.warning('Authentication failure of type "token" occurred.')
yield {}
for sub_auth in adata:
for sub_adata in adata:
if token['eauth'] not in adata:
continue
if not ((token['name'] in adata[token['eauth']]) |
('*' in adata[token['eauth']])):
continue
yield {'sub_auth': sub_auth, 'token': token}
yield {} | def function[token, parameter[self, adata, load]]:
constant[
Determine if token auth is valid and yield the adata
]
<ast.Try object at 0x7da18ede50c0>
if <ast.UnaryOp object at 0x7da18ede7a00> begin[:]
call[name[log].warning, parameter[constant[Authentication failure of type "token" occurred.]]]
<ast.Yield object at 0x7da18ede4340>
for taget[name[sub_auth]] in starred[name[adata]] begin[:]
for taget[name[sub_adata]] in starred[name[adata]] begin[:]
if compare[call[name[token]][constant[eauth]] <ast.NotIn object at 0x7da2590d7190> name[adata]] begin[:]
continue
if <ast.UnaryOp object at 0x7da1b1f79c00> begin[:]
continue
<ast.Yield object at 0x7da1b1f7ae60>
<ast.Yield object at 0x7da1b1f79a20> | keyword[def] identifier[token] ( identifier[self] , identifier[adata] , identifier[load] ):
literal[string]
keyword[try] :
identifier[token] = identifier[self] . identifier[loadauth] . identifier[get_tok] ( identifier[load] [ literal[string] ])
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[log] . identifier[error] ( literal[string] , identifier[exc] )
keyword[yield] {}
keyword[if] keyword[not] identifier[token] :
identifier[log] . identifier[warning] ( literal[string] )
keyword[yield] {}
keyword[for] identifier[sub_auth] keyword[in] identifier[adata] :
keyword[for] identifier[sub_adata] keyword[in] identifier[adata] :
keyword[if] identifier[token] [ literal[string] ] keyword[not] keyword[in] identifier[adata] :
keyword[continue]
keyword[if] keyword[not] (( identifier[token] [ literal[string] ] keyword[in] identifier[adata] [ identifier[token] [ literal[string] ]])|
( literal[string] keyword[in] identifier[adata] [ identifier[token] [ literal[string] ]])):
keyword[continue]
keyword[yield] { literal[string] : identifier[sub_auth] , literal[string] : identifier[token] }
keyword[yield] {} | def token(self, adata, load):
"""
Determine if token auth is valid and yield the adata
"""
try:
token = self.loadauth.get_tok(load['token']) # depends on [control=['try'], data=[]]
except Exception as exc:
log.error('Exception occurred when generating auth token: %s', exc)
yield {} # depends on [control=['except'], data=['exc']]
if not token:
log.warning('Authentication failure of type "token" occurred.')
yield {} # depends on [control=['if'], data=[]]
for sub_auth in adata:
for sub_adata in adata:
if token['eauth'] not in adata:
continue # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not (token['name'] in adata[token['eauth']]) | ('*' in adata[token['eauth']]):
continue # depends on [control=['if'], data=[]]
yield {'sub_auth': sub_auth, 'token': token} # depends on [control=['for'], data=['sub_auth']]
yield {} |
def text_from_affiliation_elements(department, institution, city, country):
"format an author affiliation from details"
return ', '.join(element for element in [department, institution, city, country] if element) | def function[text_from_affiliation_elements, parameter[department, institution, city, country]]:
constant[format an author affiliation from details]
return[call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da1b14d7190>]]] | keyword[def] identifier[text_from_affiliation_elements] ( identifier[department] , identifier[institution] , identifier[city] , identifier[country] ):
literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[element] keyword[for] identifier[element] keyword[in] [ identifier[department] , identifier[institution] , identifier[city] , identifier[country] ] keyword[if] identifier[element] ) | def text_from_affiliation_elements(department, institution, city, country):
"""format an author affiliation from details"""
return ', '.join((element for element in [department, institution, city, country] if element)) |
def get_interface(name):
'''
Return the serialize function.
'''
try:
log.debug('Using %s as buffer interface', name)
return BUFFER_LOOKUP[name]
except KeyError:
msg = 'Buffer interface {} is not available'.format(name)
log.error(msg, exc_info=True)
raise InvalidBufferException(msg) | def function[get_interface, parameter[name]]:
constant[
Return the serialize function.
]
<ast.Try object at 0x7da1b13c22f0> | keyword[def] identifier[get_interface] ( identifier[name] ):
literal[string]
keyword[try] :
identifier[log] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] identifier[BUFFER_LOOKUP] [ identifier[name] ]
keyword[except] identifier[KeyError] :
identifier[msg] = literal[string] . identifier[format] ( identifier[name] )
identifier[log] . identifier[error] ( identifier[msg] , identifier[exc_info] = keyword[True] )
keyword[raise] identifier[InvalidBufferException] ( identifier[msg] ) | def get_interface(name):
"""
Return the serialize function.
"""
try:
log.debug('Using %s as buffer interface', name)
return BUFFER_LOOKUP[name] # depends on [control=['try'], data=[]]
except KeyError:
msg = 'Buffer interface {} is not available'.format(name)
log.error(msg, exc_info=True)
raise InvalidBufferException(msg) # depends on [control=['except'], data=[]] |
def save(path, ndarray, min_val=None, max_val=None):
"""
Save an image, represented as an ndarray, to the filesystem
:param path: string, filepath
:param ndarray: The image as an ndarray
:param min_val: The minimum pixel value in the image format
:param max_val: The maximum pixel valie in the image format
If min_val and max_val are not specified, attempts to
infer whether the image is in any of the common ranges:
[0, 1], [-1, 1], [0, 255]
This can be ambiguous, so it is better to specify if known.
"""
as_pil(ndarray, min_val, max_val).save(path) | def function[save, parameter[path, ndarray, min_val, max_val]]:
constant[
Save an image, represented as an ndarray, to the filesystem
:param path: string, filepath
:param ndarray: The image as an ndarray
:param min_val: The minimum pixel value in the image format
:param max_val: The maximum pixel valie in the image format
If min_val and max_val are not specified, attempts to
infer whether the image is in any of the common ranges:
[0, 1], [-1, 1], [0, 255]
This can be ambiguous, so it is better to specify if known.
]
call[call[name[as_pil], parameter[name[ndarray], name[min_val], name[max_val]]].save, parameter[name[path]]] | keyword[def] identifier[save] ( identifier[path] , identifier[ndarray] , identifier[min_val] = keyword[None] , identifier[max_val] = keyword[None] ):
literal[string]
identifier[as_pil] ( identifier[ndarray] , identifier[min_val] , identifier[max_val] ). identifier[save] ( identifier[path] ) | def save(path, ndarray, min_val=None, max_val=None):
"""
Save an image, represented as an ndarray, to the filesystem
:param path: string, filepath
:param ndarray: The image as an ndarray
:param min_val: The minimum pixel value in the image format
:param max_val: The maximum pixel valie in the image format
If min_val and max_val are not specified, attempts to
infer whether the image is in any of the common ranges:
[0, 1], [-1, 1], [0, 255]
This can be ambiguous, so it is better to specify if known.
"""
as_pil(ndarray, min_val, max_val).save(path) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.