code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def escape_dictionary(dictionary, datetime_format='%Y-%m-%d %H:%M:%S'):
"""Escape dictionary values with keys as column names and values column values
@type dictionary: dict
@param dictionary: Key-values
"""
for k, v in dictionary.iteritems():
if isinstance(v, datetime.datetime):
v = v.strftime(datetime_format)
if isinstance(v, basestring):
v = CoyoteDb.db_escape(str(v))
v = '"{}"'.format(v)
if v is True:
v = 1
if v is False:
v = 0
if v is None:
v = 'NULL'
dictionary[k] = v | def function[escape_dictionary, parameter[dictionary, datetime_format]]:
constant[Escape dictionary values with keys as column names and values column values
@type dictionary: dict
@param dictionary: Key-values
]
for taget[tuple[[<ast.Name object at 0x7da20c76d5d0>, <ast.Name object at 0x7da20c76f640>]]] in starred[call[name[dictionary].iteritems, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[v], name[datetime].datetime]] begin[:]
variable[v] assign[=] call[name[v].strftime, parameter[name[datetime_format]]]
if call[name[isinstance], parameter[name[v], name[basestring]]] begin[:]
variable[v] assign[=] call[name[CoyoteDb].db_escape, parameter[call[name[str], parameter[name[v]]]]]
variable[v] assign[=] call[constant["{}"].format, parameter[name[v]]]
if compare[name[v] is constant[True]] begin[:]
variable[v] assign[=] constant[1]
if compare[name[v] is constant[False]] begin[:]
variable[v] assign[=] constant[0]
if compare[name[v] is constant[None]] begin[:]
variable[v] assign[=] constant[NULL]
call[name[dictionary]][name[k]] assign[=] name[v] | keyword[def] identifier[escape_dictionary] ( identifier[dictionary] , identifier[datetime_format] = literal[string] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dictionary] . identifier[iteritems] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[datetime] . identifier[datetime] ):
identifier[v] = identifier[v] . identifier[strftime] ( identifier[datetime_format] )
keyword[if] identifier[isinstance] ( identifier[v] , identifier[basestring] ):
identifier[v] = identifier[CoyoteDb] . identifier[db_escape] ( identifier[str] ( identifier[v] ))
identifier[v] = literal[string] . identifier[format] ( identifier[v] )
keyword[if] identifier[v] keyword[is] keyword[True] :
identifier[v] = literal[int]
keyword[if] identifier[v] keyword[is] keyword[False] :
identifier[v] = literal[int]
keyword[if] identifier[v] keyword[is] keyword[None] :
identifier[v] = literal[string]
identifier[dictionary] [ identifier[k] ]= identifier[v] | def escape_dictionary(dictionary, datetime_format='%Y-%m-%d %H:%M:%S'):
"""Escape dictionary values with keys as column names and values column values
@type dictionary: dict
@param dictionary: Key-values
"""
for (k, v) in dictionary.iteritems():
if isinstance(v, datetime.datetime):
v = v.strftime(datetime_format) # depends on [control=['if'], data=[]]
if isinstance(v, basestring):
v = CoyoteDb.db_escape(str(v))
v = '"{}"'.format(v) # depends on [control=['if'], data=[]]
if v is True:
v = 1 # depends on [control=['if'], data=['v']]
if v is False:
v = 0 # depends on [control=['if'], data=['v']]
if v is None:
v = 'NULL' # depends on [control=['if'], data=['v']]
dictionary[k] = v # depends on [control=['for'], data=[]] |
def _batch_norm(name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(
inputs=x,
decay=.9,
center=True,
scale=True,
activation_fn=None,
updates_collections=None,
is_training=False) | def function[_batch_norm, parameter[name, x]]:
constant[Batch normalization.]
with call[name[tf].name_scope, parameter[name[name]]] begin[:]
return[call[name[tf].contrib.layers.batch_norm, parameter[]]] | keyword[def] identifier[_batch_norm] ( identifier[name] , identifier[x] ):
literal[string]
keyword[with] identifier[tf] . identifier[name_scope] ( identifier[name] ):
keyword[return] identifier[tf] . identifier[contrib] . identifier[layers] . identifier[batch_norm] (
identifier[inputs] = identifier[x] ,
identifier[decay] = literal[int] ,
identifier[center] = keyword[True] ,
identifier[scale] = keyword[True] ,
identifier[activation_fn] = keyword[None] ,
identifier[updates_collections] = keyword[None] ,
identifier[is_training] = keyword[False] ) | def _batch_norm(name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=0.9, center=True, scale=True, activation_fn=None, updates_collections=None, is_training=False) # depends on [control=['with'], data=[]] |
def stats (self, antnames):
"""XXX may be out of date."""
nbyant = np.zeros (self.nants, dtype=np.int)
sum = np.zeros (self.nants, dtype=np.complex)
sumsq = np.zeros (self.nants)
q = np.abs (self.normvis - 1)
for i in range (self.nsamps):
i1, i2 = self.blidxs[i]
nbyant[i1] += 1
nbyant[i2] += 1
sum[i1] += q[i]
sum[i2] += q[i]
sumsq[i1] += q[i]**2
sumsq[i2] += q[i]**2
avg = sum / nbyant
std = np.sqrt (sumsq / nbyant - avg**2)
navg = 1. / np.median (avg)
nstd = 1. / np.median (std)
for i in range (self.nants):
print (' %2d %10s %3d %f %f %f %f' %
(i, antnames[i], nbyant[i], avg[i], std[i], avg[i] * navg, std[i] * nstd)) | def function[stats, parameter[self, antnames]]:
constant[XXX may be out of date.]
variable[nbyant] assign[=] call[name[np].zeros, parameter[name[self].nants]]
variable[sum] assign[=] call[name[np].zeros, parameter[name[self].nants]]
variable[sumsq] assign[=] call[name[np].zeros, parameter[name[self].nants]]
variable[q] assign[=] call[name[np].abs, parameter[binary_operation[name[self].normvis - constant[1]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[self].nsamps]]] begin[:]
<ast.Tuple object at 0x7da18bcc9210> assign[=] call[name[self].blidxs][name[i]]
<ast.AugAssign object at 0x7da18bcc9510>
<ast.AugAssign object at 0x7da18bcc9b40>
<ast.AugAssign object at 0x7da18bcca1d0>
<ast.AugAssign object at 0x7da18bcca0e0>
<ast.AugAssign object at 0x7da1b26b6a10>
<ast.AugAssign object at 0x7da1b26b6860>
variable[avg] assign[=] binary_operation[name[sum] / name[nbyant]]
variable[std] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[sumsq] / name[nbyant]] - binary_operation[name[avg] ** constant[2]]]]]
variable[navg] assign[=] binary_operation[constant[1.0] / call[name[np].median, parameter[name[avg]]]]
variable[nstd] assign[=] binary_operation[constant[1.0] / call[name[np].median, parameter[name[std]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[self].nants]]] begin[:]
call[name[print], parameter[binary_operation[constant[ %2d %10s %3d %f %f %f %f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26b7010>, <ast.Subscript object at 0x7da1b26b6ef0>, <ast.Subscript object at 0x7da1b26b77f0>, <ast.Subscript object at 0x7da1b26b6830>, <ast.Subscript object at 0x7da1b263a980>, <ast.BinOp object at 0x7da1b2638370>, <ast.BinOp object at 0x7da1b263a290>]]]]] | keyword[def] identifier[stats] ( identifier[self] , identifier[antnames] ):
literal[string]
identifier[nbyant] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[nants] , identifier[dtype] = identifier[np] . identifier[int] )
identifier[sum] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[nants] , identifier[dtype] = identifier[np] . identifier[complex] )
identifier[sumsq] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[nants] )
identifier[q] = identifier[np] . identifier[abs] ( identifier[self] . identifier[normvis] - literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[nsamps] ):
identifier[i1] , identifier[i2] = identifier[self] . identifier[blidxs] [ identifier[i] ]
identifier[nbyant] [ identifier[i1] ]+= literal[int]
identifier[nbyant] [ identifier[i2] ]+= literal[int]
identifier[sum] [ identifier[i1] ]+= identifier[q] [ identifier[i] ]
identifier[sum] [ identifier[i2] ]+= identifier[q] [ identifier[i] ]
identifier[sumsq] [ identifier[i1] ]+= identifier[q] [ identifier[i] ]** literal[int]
identifier[sumsq] [ identifier[i2] ]+= identifier[q] [ identifier[i] ]** literal[int]
identifier[avg] = identifier[sum] / identifier[nbyant]
identifier[std] = identifier[np] . identifier[sqrt] ( identifier[sumsq] / identifier[nbyant] - identifier[avg] ** literal[int] )
identifier[navg] = literal[int] / identifier[np] . identifier[median] ( identifier[avg] )
identifier[nstd] = literal[int] / identifier[np] . identifier[median] ( identifier[std] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[nants] ):
identifier[print] ( literal[string] %
( identifier[i] , identifier[antnames] [ identifier[i] ], identifier[nbyant] [ identifier[i] ], identifier[avg] [ identifier[i] ], identifier[std] [ identifier[i] ], identifier[avg] [ identifier[i] ]* identifier[navg] , identifier[std] [ identifier[i] ]* identifier[nstd] )) | def stats(self, antnames):
"""XXX may be out of date."""
nbyant = np.zeros(self.nants, dtype=np.int)
sum = np.zeros(self.nants, dtype=np.complex)
sumsq = np.zeros(self.nants)
q = np.abs(self.normvis - 1)
for i in range(self.nsamps):
(i1, i2) = self.blidxs[i]
nbyant[i1] += 1
nbyant[i2] += 1
sum[i1] += q[i]
sum[i2] += q[i]
sumsq[i1] += q[i] ** 2
sumsq[i2] += q[i] ** 2 # depends on [control=['for'], data=['i']]
avg = sum / nbyant
std = np.sqrt(sumsq / nbyant - avg ** 2)
navg = 1.0 / np.median(avg)
nstd = 1.0 / np.median(std)
for i in range(self.nants):
print(' %2d %10s %3d %f %f %f %f' % (i, antnames[i], nbyant[i], avg[i], std[i], avg[i] * navg, std[i] * nstd)) # depends on [control=['for'], data=['i']] |
def send(remote_host=None):
""" Send local nagios data to a remote nago instance """
my_data = get()
if not remote_host:
remote_host = nago.extensions.settings.get('server')
remote_node = nago.core.get_node(remote_host)
remote_node.send_command('checkresults', 'post', **my_data)
return "checkresults sent to %s" % remote_host | def function[send, parameter[remote_host]]:
constant[ Send local nagios data to a remote nago instance ]
variable[my_data] assign[=] call[name[get], parameter[]]
if <ast.UnaryOp object at 0x7da1b13092a0> begin[:]
variable[remote_host] assign[=] call[name[nago].extensions.settings.get, parameter[constant[server]]]
variable[remote_node] assign[=] call[name[nago].core.get_node, parameter[name[remote_host]]]
call[name[remote_node].send_command, parameter[constant[checkresults], constant[post]]]
return[binary_operation[constant[checkresults sent to %s] <ast.Mod object at 0x7da2590d6920> name[remote_host]]] | keyword[def] identifier[send] ( identifier[remote_host] = keyword[None] ):
literal[string]
identifier[my_data] = identifier[get] ()
keyword[if] keyword[not] identifier[remote_host] :
identifier[remote_host] = identifier[nago] . identifier[extensions] . identifier[settings] . identifier[get] ( literal[string] )
identifier[remote_node] = identifier[nago] . identifier[core] . identifier[get_node] ( identifier[remote_host] )
identifier[remote_node] . identifier[send_command] ( literal[string] , literal[string] ,** identifier[my_data] )
keyword[return] literal[string] % identifier[remote_host] | def send(remote_host=None):
""" Send local nagios data to a remote nago instance """
my_data = get()
if not remote_host:
remote_host = nago.extensions.settings.get('server') # depends on [control=['if'], data=[]]
remote_node = nago.core.get_node(remote_host)
remote_node.send_command('checkresults', 'post', **my_data)
return 'checkresults sent to %s' % remote_host |
def obfn_f(self, X=None):
r"""Compute data fidelity term :math:`(1/2) \| D \mathbf{x} -
\mathbf{s} \|_2^2`.
"""
if X is None:
X = self.X
return 0.5 * np.linalg.norm((self.D.dot(X) - self.S).ravel())**2 | def function[obfn_f, parameter[self, X]]:
constant[Compute data fidelity term :math:`(1/2) \| D \mathbf{x} -
\mathbf{s} \|_2^2`.
]
if compare[name[X] is constant[None]] begin[:]
variable[X] assign[=] name[self].X
return[binary_operation[constant[0.5] * binary_operation[call[name[np].linalg.norm, parameter[call[binary_operation[call[name[self].D.dot, parameter[name[X]]] - name[self].S].ravel, parameter[]]]] ** constant[2]]]] | keyword[def] identifier[obfn_f] ( identifier[self] , identifier[X] = keyword[None] ):
literal[string]
keyword[if] identifier[X] keyword[is] keyword[None] :
identifier[X] = identifier[self] . identifier[X]
keyword[return] literal[int] * identifier[np] . identifier[linalg] . identifier[norm] (( identifier[self] . identifier[D] . identifier[dot] ( identifier[X] )- identifier[self] . identifier[S] ). identifier[ravel] ())** literal[int] | def obfn_f(self, X=None):
"""Compute data fidelity term :math:`(1/2) \\| D \\mathbf{x} -
\\mathbf{s} \\|_2^2`.
"""
if X is None:
X = self.X # depends on [control=['if'], data=['X']]
return 0.5 * np.linalg.norm((self.D.dot(X) - self.S).ravel()) ** 2 |
def call_operation(self, operation, **kwargs):
"""
A generic method to call any operation supported by the Lambda handler
"""
data = {'operation': operation}
data.update(kwargs)
return self.invoke(data) | def function[call_operation, parameter[self, operation]]:
constant[
A generic method to call any operation supported by the Lambda handler
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b26989d0>], [<ast.Name object at 0x7da1b269ab30>]]
call[name[data].update, parameter[name[kwargs]]]
return[call[name[self].invoke, parameter[name[data]]]] | keyword[def] identifier[call_operation] ( identifier[self] , identifier[operation] ,** identifier[kwargs] ):
literal[string]
identifier[data] ={ literal[string] : identifier[operation] }
identifier[data] . identifier[update] ( identifier[kwargs] )
keyword[return] identifier[self] . identifier[invoke] ( identifier[data] ) | def call_operation(self, operation, **kwargs):
"""
A generic method to call any operation supported by the Lambda handler
"""
data = {'operation': operation}
data.update(kwargs)
return self.invoke(data) |
def alloc_data(self, value):
"""
Allocate a piece of data that will be included in the shellcode body.
Arguments:
value(...): The value to add to the shellcode. Can be bytes or
string type.
Returns:
~pwnypack.types.Offset: The offset used to address the data.
"""
if isinstance(value, six.binary_type):
return self._alloc_data(value)
elif isinstance(value, six.text_type):
return self._alloc_data(value.encode('utf-8') + b'\0')
else:
raise TypeError('No idea how to encode %s' % repr(value)) | def function[alloc_data, parameter[self, value]]:
constant[
Allocate a piece of data that will be included in the shellcode body.
Arguments:
value(...): The value to add to the shellcode. Can be bytes or
string type.
Returns:
~pwnypack.types.Offset: The offset used to address the data.
]
if call[name[isinstance], parameter[name[value], name[six].binary_type]] begin[:]
return[call[name[self]._alloc_data, parameter[name[value]]]] | keyword[def] identifier[alloc_data] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[binary_type] ):
keyword[return] identifier[self] . identifier[_alloc_data] ( identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[text_type] ):
keyword[return] identifier[self] . identifier[_alloc_data] ( identifier[value] . identifier[encode] ( literal[string] )+ literal[string] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[repr] ( identifier[value] )) | def alloc_data(self, value):
"""
Allocate a piece of data that will be included in the shellcode body.
Arguments:
value(...): The value to add to the shellcode. Can be bytes or
string type.
Returns:
~pwnypack.types.Offset: The offset used to address the data.
"""
if isinstance(value, six.binary_type):
return self._alloc_data(value) # depends on [control=['if'], data=[]]
elif isinstance(value, six.text_type):
return self._alloc_data(value.encode('utf-8') + b'\x00') # depends on [control=['if'], data=[]]
else:
raise TypeError('No idea how to encode %s' % repr(value)) |
def disable_logger(logger_name: str, propagate: bool = False):
"""Disable output for the logger of the specified name."""
log = logging.getLogger(logger_name)
log.propagate = propagate
for handler in log.handlers:
log.removeHandler(handler) | def function[disable_logger, parameter[logger_name, propagate]]:
constant[Disable output for the logger of the specified name.]
variable[log] assign[=] call[name[logging].getLogger, parameter[name[logger_name]]]
name[log].propagate assign[=] name[propagate]
for taget[name[handler]] in starred[name[log].handlers] begin[:]
call[name[log].removeHandler, parameter[name[handler]]] | keyword[def] identifier[disable_logger] ( identifier[logger_name] : identifier[str] , identifier[propagate] : identifier[bool] = keyword[False] ):
literal[string]
identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[logger_name] )
identifier[log] . identifier[propagate] = identifier[propagate]
keyword[for] identifier[handler] keyword[in] identifier[log] . identifier[handlers] :
identifier[log] . identifier[removeHandler] ( identifier[handler] ) | def disable_logger(logger_name: str, propagate: bool=False):
"""Disable output for the logger of the specified name."""
log = logging.getLogger(logger_name)
log.propagate = propagate
for handler in log.handlers:
log.removeHandler(handler) # depends on [control=['for'], data=['handler']] |
def auto_invalidate(self):
"""
Invalidate the cache if the current time is past the time to live.
"""
current = datetime.now()
if current > self._invalidated + timedelta(seconds=self._timetolive):
self.invalidate() | def function[auto_invalidate, parameter[self]]:
constant[
Invalidate the cache if the current time is past the time to live.
]
variable[current] assign[=] call[name[datetime].now, parameter[]]
if compare[name[current] greater[>] binary_operation[name[self]._invalidated + call[name[timedelta], parameter[]]]] begin[:]
call[name[self].invalidate, parameter[]] | keyword[def] identifier[auto_invalidate] ( identifier[self] ):
literal[string]
identifier[current] = identifier[datetime] . identifier[now] ()
keyword[if] identifier[current] > identifier[self] . identifier[_invalidated] + identifier[timedelta] ( identifier[seconds] = identifier[self] . identifier[_timetolive] ):
identifier[self] . identifier[invalidate] () | def auto_invalidate(self):
"""
Invalidate the cache if the current time is past the time to live.
"""
current = datetime.now()
if current > self._invalidated + timedelta(seconds=self._timetolive):
self.invalidate() # depends on [control=['if'], data=[]] |
def _process_deriv_args(f, kwargs):
"""Handle common processing of arguments for derivative functions."""
n = f.ndim
axis = normalize_axis_index(kwargs.get('axis', 0), n)
if f.shape[axis] < 3:
raise ValueError('f must have at least 3 point along the desired axis.')
if 'delta' in kwargs:
if 'x' in kwargs:
raise ValueError('Cannot specify both "x" and "delta".')
delta = atleast_1d(kwargs['delta'])
if delta.size == 1:
diff_size = list(f.shape)
diff_size[axis] -= 1
delta_units = getattr(delta, 'units', None)
delta = np.broadcast_to(delta, diff_size, subok=True)
if delta_units is not None:
delta = delta * delta_units
else:
delta = _broadcast_to_axis(delta, axis, n)
elif 'x' in kwargs:
x = _broadcast_to_axis(kwargs['x'], axis, n)
delta = diff(x, axis=axis)
else:
raise ValueError('Must specify either "x" or "delta" for value positions.')
return n, axis, delta | def function[_process_deriv_args, parameter[f, kwargs]]:
constant[Handle common processing of arguments for derivative functions.]
variable[n] assign[=] name[f].ndim
variable[axis] assign[=] call[name[normalize_axis_index], parameter[call[name[kwargs].get, parameter[constant[axis], constant[0]]], name[n]]]
if compare[call[name[f].shape][name[axis]] less[<] constant[3]] begin[:]
<ast.Raise object at 0x7da1b1d14c70>
if compare[constant[delta] in name[kwargs]] begin[:]
if compare[constant[x] in name[kwargs]] begin[:]
<ast.Raise object at 0x7da1b1d16170>
variable[delta] assign[=] call[name[atleast_1d], parameter[call[name[kwargs]][constant[delta]]]]
if compare[name[delta].size equal[==] constant[1]] begin[:]
variable[diff_size] assign[=] call[name[list], parameter[name[f].shape]]
<ast.AugAssign object at 0x7da1b1d14a90>
variable[delta_units] assign[=] call[name[getattr], parameter[name[delta], constant[units], constant[None]]]
variable[delta] assign[=] call[name[np].broadcast_to, parameter[name[delta], name[diff_size]]]
if compare[name[delta_units] is_not constant[None]] begin[:]
variable[delta] assign[=] binary_operation[name[delta] * name[delta_units]]
return[tuple[[<ast.Name object at 0x7da1b22972e0>, <ast.Name object at 0x7da1b2295a80>, <ast.Name object at 0x7da1b2295870>]]] | keyword[def] identifier[_process_deriv_args] ( identifier[f] , identifier[kwargs] ):
literal[string]
identifier[n] = identifier[f] . identifier[ndim]
identifier[axis] = identifier[normalize_axis_index] ( identifier[kwargs] . identifier[get] ( literal[string] , literal[int] ), identifier[n] )
keyword[if] identifier[f] . identifier[shape] [ identifier[axis] ]< literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[delta] = identifier[atleast_1d] ( identifier[kwargs] [ literal[string] ])
keyword[if] identifier[delta] . identifier[size] == literal[int] :
identifier[diff_size] = identifier[list] ( identifier[f] . identifier[shape] )
identifier[diff_size] [ identifier[axis] ]-= literal[int]
identifier[delta_units] = identifier[getattr] ( identifier[delta] , literal[string] , keyword[None] )
identifier[delta] = identifier[np] . identifier[broadcast_to] ( identifier[delta] , identifier[diff_size] , identifier[subok] = keyword[True] )
keyword[if] identifier[delta_units] keyword[is] keyword[not] keyword[None] :
identifier[delta] = identifier[delta] * identifier[delta_units]
keyword[else] :
identifier[delta] = identifier[_broadcast_to_axis] ( identifier[delta] , identifier[axis] , identifier[n] )
keyword[elif] literal[string] keyword[in] identifier[kwargs] :
identifier[x] = identifier[_broadcast_to_axis] ( identifier[kwargs] [ literal[string] ], identifier[axis] , identifier[n] )
identifier[delta] = identifier[diff] ( identifier[x] , identifier[axis] = identifier[axis] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[n] , identifier[axis] , identifier[delta] | def _process_deriv_args(f, kwargs):
"""Handle common processing of arguments for derivative functions."""
n = f.ndim
axis = normalize_axis_index(kwargs.get('axis', 0), n)
if f.shape[axis] < 3:
raise ValueError('f must have at least 3 point along the desired axis.') # depends on [control=['if'], data=[]]
if 'delta' in kwargs:
if 'x' in kwargs:
raise ValueError('Cannot specify both "x" and "delta".') # depends on [control=['if'], data=[]]
delta = atleast_1d(kwargs['delta'])
if delta.size == 1:
diff_size = list(f.shape)
diff_size[axis] -= 1
delta_units = getattr(delta, 'units', None)
delta = np.broadcast_to(delta, diff_size, subok=True)
if delta_units is not None:
delta = delta * delta_units # depends on [control=['if'], data=['delta_units']] # depends on [control=['if'], data=[]]
else:
delta = _broadcast_to_axis(delta, axis, n) # depends on [control=['if'], data=['kwargs']]
elif 'x' in kwargs:
x = _broadcast_to_axis(kwargs['x'], axis, n)
delta = diff(x, axis=axis) # depends on [control=['if'], data=['kwargs']]
else:
raise ValueError('Must specify either "x" or "delta" for value positions.')
return (n, axis, delta) |
def gather_and_vote(self, voting_method, validate=False, winners=1,
**kwargs):
"""Convenience function to gathering candidates and votes and
performing voting using them.
Additional ``**kwargs`` are passed down to voting method.
:param voting_method:
The voting method to use, see
:meth:`~creamas.vote.VoteOrganizer.compute_results` for details.
:param bool validate: Validate gathered candidates before voting.
:param int winners: The number of vote winners
:returns: Winner(s) of the vote.
"""
self.gather_candidates()
if validate:
self.validate_candidates()
self.gather_votes()
r = self.compute_results(voting_method, self.votes, winners=winners,
**kwargs)
return r | def function[gather_and_vote, parameter[self, voting_method, validate, winners]]:
constant[Convenience function to gathering candidates and votes and
performing voting using them.
Additional ``**kwargs`` are passed down to voting method.
:param voting_method:
The voting method to use, see
:meth:`~creamas.vote.VoteOrganizer.compute_results` for details.
:param bool validate: Validate gathered candidates before voting.
:param int winners: The number of vote winners
:returns: Winner(s) of the vote.
]
call[name[self].gather_candidates, parameter[]]
if name[validate] begin[:]
call[name[self].validate_candidates, parameter[]]
call[name[self].gather_votes, parameter[]]
variable[r] assign[=] call[name[self].compute_results, parameter[name[voting_method], name[self].votes]]
return[name[r]] | keyword[def] identifier[gather_and_vote] ( identifier[self] , identifier[voting_method] , identifier[validate] = keyword[False] , identifier[winners] = literal[int] ,
** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[gather_candidates] ()
keyword[if] identifier[validate] :
identifier[self] . identifier[validate_candidates] ()
identifier[self] . identifier[gather_votes] ()
identifier[r] = identifier[self] . identifier[compute_results] ( identifier[voting_method] , identifier[self] . identifier[votes] , identifier[winners] = identifier[winners] ,
** identifier[kwargs] )
keyword[return] identifier[r] | def gather_and_vote(self, voting_method, validate=False, winners=1, **kwargs):
"""Convenience function to gathering candidates and votes and
performing voting using them.
Additional ``**kwargs`` are passed down to voting method.
:param voting_method:
The voting method to use, see
:meth:`~creamas.vote.VoteOrganizer.compute_results` for details.
:param bool validate: Validate gathered candidates before voting.
:param int winners: The number of vote winners
:returns: Winner(s) of the vote.
"""
self.gather_candidates()
if validate:
self.validate_candidates() # depends on [control=['if'], data=[]]
self.gather_votes()
r = self.compute_results(voting_method, self.votes, winners=winners, **kwargs)
return r |
def findLayer( self, layerName ):
"""
Looks up the layer for this node based on the inputed layer name.
:param layerName | <str>
:return <XNodeLayer>
"""
for layer in self._layers:
if ( layer.name() == layerName ):
return layer
return None | def function[findLayer, parameter[self, layerName]]:
constant[
Looks up the layer for this node based on the inputed layer name.
:param layerName | <str>
:return <XNodeLayer>
]
for taget[name[layer]] in starred[name[self]._layers] begin[:]
if compare[call[name[layer].name, parameter[]] equal[==] name[layerName]] begin[:]
return[name[layer]]
return[constant[None]] | keyword[def] identifier[findLayer] ( identifier[self] , identifier[layerName] ):
literal[string]
keyword[for] identifier[layer] keyword[in] identifier[self] . identifier[_layers] :
keyword[if] ( identifier[layer] . identifier[name] ()== identifier[layerName] ):
keyword[return] identifier[layer]
keyword[return] keyword[None] | def findLayer(self, layerName):
"""
Looks up the layer for this node based on the inputed layer name.
:param layerName | <str>
:return <XNodeLayer>
"""
for layer in self._layers:
if layer.name() == layerName:
return layer # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['layer']]
return None |
def fromarray(values, labels=None, npartitions=None, engine=None):
"""
Load images from an array.
First dimension will be used to index images,
so remaining dimensions after the first should
be the dimensions of the images,
e.g. (3, 100, 200) for 3 x (100, 200) images
Parameters
----------
values : array-like
The array of images. Can be a numpy array,
a bolt array, or an array-like.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int, default = None
Number of partitions for parallelization (spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for spark)
"""
from .images import Images
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Images(values)
values = asarray(values)
if values.ndim < 2:
raise ValueError('Array for images must have at least 2 dimensions, got %g' % values.ndim)
if values.ndim == 2:
values = expand_dims(values, 0)
shape = None
dtype = None
for im in values:
if shape is None:
shape = im.shape
dtype = im.dtype
if not im.shape == shape:
raise ValueError('Arrays must all be of same shape; got both %s and %s' %
(str(shape), str(im.shape)))
if not im.dtype == dtype:
raise ValueError('Arrays must all be of same data type; got both %s and %s' %
(str(dtype), str(im.dtype)))
if spark and isinstance(engine, spark):
if not npartitions:
npartitions = engine.defaultParallelism
values = bolt.array(values, context=engine, npartitions=npartitions, axis=(0,))
values._ordered = True
return Images(values)
return Images(values, labels=labels) | def function[fromarray, parameter[values, labels, npartitions, engine]]:
constant[
Load images from an array.
First dimension will be used to index images,
so remaining dimensions after the first should
be the dimensions of the images,
e.g. (3, 100, 200) for 3 x (100, 200) images
Parameters
----------
values : array-like
The array of images. Can be a numpy array,
a bolt array, or an array-like.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int, default = None
Number of partitions for parallelization (spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for spark)
]
from relative_module[images] import module[Images]
import module[bolt]
if call[name[isinstance], parameter[name[values], name[bolt].spark.array.BoltArraySpark]] begin[:]
return[call[name[Images], parameter[name[values]]]]
variable[values] assign[=] call[name[asarray], parameter[name[values]]]
if compare[name[values].ndim less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da18dc9ad10>
if compare[name[values].ndim equal[==] constant[2]] begin[:]
variable[values] assign[=] call[name[expand_dims], parameter[name[values], constant[0]]]
variable[shape] assign[=] constant[None]
variable[dtype] assign[=] constant[None]
for taget[name[im]] in starred[name[values]] begin[:]
if compare[name[shape] is constant[None]] begin[:]
variable[shape] assign[=] name[im].shape
variable[dtype] assign[=] name[im].dtype
if <ast.UnaryOp object at 0x7da18dc9a950> begin[:]
<ast.Raise object at 0x7da18dc992a0>
if <ast.UnaryOp object at 0x7da18dc99030> begin[:]
<ast.Raise object at 0x7da18dc9b5e0>
if <ast.BoolOp object at 0x7da18dc9bee0> begin[:]
if <ast.UnaryOp object at 0x7da18dc98250> begin[:]
variable[npartitions] assign[=] name[engine].defaultParallelism
variable[values] assign[=] call[name[bolt].array, parameter[name[values]]]
name[values]._ordered assign[=] constant[True]
return[call[name[Images], parameter[name[values]]]]
return[call[name[Images], parameter[name[values]]]] | keyword[def] identifier[fromarray] ( identifier[values] , identifier[labels] = keyword[None] , identifier[npartitions] = keyword[None] , identifier[engine] = keyword[None] ):
literal[string]
keyword[from] . identifier[images] keyword[import] identifier[Images]
keyword[import] identifier[bolt]
keyword[if] identifier[isinstance] ( identifier[values] , identifier[bolt] . identifier[spark] . identifier[array] . identifier[BoltArraySpark] ):
keyword[return] identifier[Images] ( identifier[values] )
identifier[values] = identifier[asarray] ( identifier[values] )
keyword[if] identifier[values] . identifier[ndim] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[values] . identifier[ndim] )
keyword[if] identifier[values] . identifier[ndim] == literal[int] :
identifier[values] = identifier[expand_dims] ( identifier[values] , literal[int] )
identifier[shape] = keyword[None]
identifier[dtype] = keyword[None]
keyword[for] identifier[im] keyword[in] identifier[values] :
keyword[if] identifier[shape] keyword[is] keyword[None] :
identifier[shape] = identifier[im] . identifier[shape]
identifier[dtype] = identifier[im] . identifier[dtype]
keyword[if] keyword[not] identifier[im] . identifier[shape] == identifier[shape] :
keyword[raise] identifier[ValueError] ( literal[string] %
( identifier[str] ( identifier[shape] ), identifier[str] ( identifier[im] . identifier[shape] )))
keyword[if] keyword[not] identifier[im] . identifier[dtype] == identifier[dtype] :
keyword[raise] identifier[ValueError] ( literal[string] %
( identifier[str] ( identifier[dtype] ), identifier[str] ( identifier[im] . identifier[dtype] )))
keyword[if] identifier[spark] keyword[and] identifier[isinstance] ( identifier[engine] , identifier[spark] ):
keyword[if] keyword[not] identifier[npartitions] :
identifier[npartitions] = identifier[engine] . identifier[defaultParallelism]
identifier[values] = identifier[bolt] . identifier[array] ( identifier[values] , identifier[context] = identifier[engine] , identifier[npartitions] = identifier[npartitions] , identifier[axis] =( literal[int] ,))
identifier[values] . identifier[_ordered] = keyword[True]
keyword[return] identifier[Images] ( identifier[values] )
keyword[return] identifier[Images] ( identifier[values] , identifier[labels] = identifier[labels] ) | def fromarray(values, labels=None, npartitions=None, engine=None):
"""
Load images from an array.
First dimension will be used to index images,
so remaining dimensions after the first should
be the dimensions of the images,
e.g. (3, 100, 200) for 3 x (100, 200) images
Parameters
----------
values : array-like
The array of images. Can be a numpy array,
a bolt array, or an array-like.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int, default = None
Number of partitions for parallelization (spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for spark)
"""
from .images import Images
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Images(values) # depends on [control=['if'], data=[]]
values = asarray(values)
if values.ndim < 2:
raise ValueError('Array for images must have at least 2 dimensions, got %g' % values.ndim) # depends on [control=['if'], data=[]]
if values.ndim == 2:
values = expand_dims(values, 0) # depends on [control=['if'], data=[]]
shape = None
dtype = None
for im in values:
if shape is None:
shape = im.shape
dtype = im.dtype # depends on [control=['if'], data=['shape']]
if not im.shape == shape:
raise ValueError('Arrays must all be of same shape; got both %s and %s' % (str(shape), str(im.shape))) # depends on [control=['if'], data=[]]
if not im.dtype == dtype:
raise ValueError('Arrays must all be of same data type; got both %s and %s' % (str(dtype), str(im.dtype))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['im']]
if spark and isinstance(engine, spark):
if not npartitions:
npartitions = engine.defaultParallelism # depends on [control=['if'], data=[]]
values = bolt.array(values, context=engine, npartitions=npartitions, axis=(0,))
values._ordered = True
return Images(values) # depends on [control=['if'], data=[]]
return Images(values, labels=labels) |
def register(model_or_iterable, **options):
"""
Registers the given model(s) with the given translation options.
The model(s) should be Model classes, not instances.
Fields declared for translation on a base class are inherited by
subclasses. If the model or one of its subclasses is already
registered for translation, this will raise an exception.
@register(Author)
class AuthorTranslation(TranslationOptions):
pass
"""
from modeltranslation.translator import translator, TranslationOptions
def wrapper(opts_class):
if not issubclass(opts_class, TranslationOptions):
raise ValueError('Wrapped class must subclass TranslationOptions.')
translator.register(model_or_iterable, opts_class, **options)
return opts_class
return wrapper | def function[register, parameter[model_or_iterable]]:
constant[
Registers the given model(s) with the given translation options.
The model(s) should be Model classes, not instances.
Fields declared for translation on a base class are inherited by
subclasses. If the model or one of its subclasses is already
registered for translation, this will raise an exception.
@register(Author)
class AuthorTranslation(TranslationOptions):
pass
]
from relative_module[modeltranslation.translator] import module[translator], module[TranslationOptions]
def function[wrapper, parameter[opts_class]]:
if <ast.UnaryOp object at 0x7da1b1e00bb0> begin[:]
<ast.Raise object at 0x7da1b1e006a0>
call[name[translator].register, parameter[name[model_or_iterable], name[opts_class]]]
return[name[opts_class]]
return[name[wrapper]] | keyword[def] identifier[register] ( identifier[model_or_iterable] ,** identifier[options] ):
literal[string]
keyword[from] identifier[modeltranslation] . identifier[translator] keyword[import] identifier[translator] , identifier[TranslationOptions]
keyword[def] identifier[wrapper] ( identifier[opts_class] ):
keyword[if] keyword[not] identifier[issubclass] ( identifier[opts_class] , identifier[TranslationOptions] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[translator] . identifier[register] ( identifier[model_or_iterable] , identifier[opts_class] ,** identifier[options] )
keyword[return] identifier[opts_class]
keyword[return] identifier[wrapper] | def register(model_or_iterable, **options):
"""
Registers the given model(s) with the given translation options.
The model(s) should be Model classes, not instances.
Fields declared for translation on a base class are inherited by
subclasses. If the model or one of its subclasses is already
registered for translation, this will raise an exception.
@register(Author)
class AuthorTranslation(TranslationOptions):
pass
"""
from modeltranslation.translator import translator, TranslationOptions
def wrapper(opts_class):
if not issubclass(opts_class, TranslationOptions):
raise ValueError('Wrapped class must subclass TranslationOptions.') # depends on [control=['if'], data=[]]
translator.register(model_or_iterable, opts_class, **options)
return opts_class
return wrapper |
def job_started_message(self, job, queue):
"""
Return the message to log just befre the execution of the job
"""
return '[%s|%s|%s] starting' % (queue._cached_name, job.pk.get(),
job._cached_identifier) | def function[job_started_message, parameter[self, job, queue]]:
constant[
Return the message to log just befre the execution of the job
]
return[binary_operation[constant[[%s|%s|%s] starting] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1302380>, <ast.Call object at 0x7da1b1302ad0>, <ast.Attribute object at 0x7da20c76cf10>]]]] | keyword[def] identifier[job_started_message] ( identifier[self] , identifier[job] , identifier[queue] ):
literal[string]
keyword[return] literal[string] %( identifier[queue] . identifier[_cached_name] , identifier[job] . identifier[pk] . identifier[get] (),
identifier[job] . identifier[_cached_identifier] ) | def job_started_message(self, job, queue):
"""
Return the message to log just befre the execution of the job
"""
return '[%s|%s|%s] starting' % (queue._cached_name, job.pk.get(), job._cached_identifier) |
def cut(string, characters=2, trailing="normal"):
"""
Split a string into a list of N characters each.
.. code:: python
reusables.cut("abcdefghi")
# ['ab', 'cd', 'ef', 'gh', 'i']
trailing gives you the following options:
* normal: leaves remaining characters in their own last position
* remove: return the list without the remainder characters
* combine: add the remainder characters to the previous set
* error: raise an IndexError if there are remaining characters
.. code:: python
reusables.cut("abcdefghi", 2, "error")
# Traceback (most recent call last):
# ...
# IndexError: String of length 9 not divisible by 2 to splice
reusables.cut("abcdefghi", 2, "remove")
# ['ab', 'cd', 'ef', 'gh']
reusables.cut("abcdefghi", 2, "combine")
# ['ab', 'cd', 'ef', 'ghi']
:param string: string to modify
:param characters: how many characters to split it into
:param trailing: "normal", "remove", "combine", or "error"
:return: list of the cut string
"""
split_str = [string[i:i + characters] for
i in range(0, len(string), characters)]
if trailing != "normal" and len(split_str[-1]) != characters:
if trailing.lower() == "remove":
return split_str[:-1]
if trailing.lower() == "combine" and len(split_str) >= 2:
return split_str[:-2] + [split_str[-2] + split_str[-1]]
if trailing.lower() == "error":
raise IndexError("String of length {0} not divisible by {1} to"
" cut".format(len(string), characters))
return split_str | def function[cut, parameter[string, characters, trailing]]:
constant[
Split a string into a list of N characters each.
.. code:: python
reusables.cut("abcdefghi")
# ['ab', 'cd', 'ef', 'gh', 'i']
trailing gives you the following options:
* normal: leaves remaining characters in their own last position
* remove: return the list without the remainder characters
* combine: add the remainder characters to the previous set
* error: raise an IndexError if there are remaining characters
.. code:: python
reusables.cut("abcdefghi", 2, "error")
# Traceback (most recent call last):
# ...
# IndexError: String of length 9 not divisible by 2 to splice
reusables.cut("abcdefghi", 2, "remove")
# ['ab', 'cd', 'ef', 'gh']
reusables.cut("abcdefghi", 2, "combine")
# ['ab', 'cd', 'ef', 'ghi']
:param string: string to modify
:param characters: how many characters to split it into
:param trailing: "normal", "remove", "combine", or "error"
:return: list of the cut string
]
variable[split_str] assign[=] <ast.ListComp object at 0x7da18dc98c10>
if <ast.BoolOp object at 0x7da18dc982e0> begin[:]
if compare[call[name[trailing].lower, parameter[]] equal[==] constant[remove]] begin[:]
return[call[name[split_str]][<ast.Slice object at 0x7da18dc9a1d0>]]
if <ast.BoolOp object at 0x7da18dc99ed0> begin[:]
return[binary_operation[call[name[split_str]][<ast.Slice object at 0x7da1b0fdf520>] + list[[<ast.BinOp object at 0x7da1b0fdd0f0>]]]]
if compare[call[name[trailing].lower, parameter[]] equal[==] constant[error]] begin[:]
<ast.Raise object at 0x7da1b0fdf160>
return[name[split_str]] | keyword[def] identifier[cut] ( identifier[string] , identifier[characters] = literal[int] , identifier[trailing] = literal[string] ):
literal[string]
identifier[split_str] =[ identifier[string] [ identifier[i] : identifier[i] + identifier[characters] ] keyword[for]
identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[string] ), identifier[characters] )]
keyword[if] identifier[trailing] != literal[string] keyword[and] identifier[len] ( identifier[split_str] [- literal[int] ])!= identifier[characters] :
keyword[if] identifier[trailing] . identifier[lower] ()== literal[string] :
keyword[return] identifier[split_str] [:- literal[int] ]
keyword[if] identifier[trailing] . identifier[lower] ()== literal[string] keyword[and] identifier[len] ( identifier[split_str] )>= literal[int] :
keyword[return] identifier[split_str] [:- literal[int] ]+[ identifier[split_str] [- literal[int] ]+ identifier[split_str] [- literal[int] ]]
keyword[if] identifier[trailing] . identifier[lower] ()== literal[string] :
keyword[raise] identifier[IndexError] ( literal[string]
literal[string] . identifier[format] ( identifier[len] ( identifier[string] ), identifier[characters] ))
keyword[return] identifier[split_str] | def cut(string, characters=2, trailing='normal'):
"""
Split a string into a list of N characters each.
.. code:: python
reusables.cut("abcdefghi")
# ['ab', 'cd', 'ef', 'gh', 'i']
trailing gives you the following options:
* normal: leaves remaining characters in their own last position
* remove: return the list without the remainder characters
* combine: add the remainder characters to the previous set
* error: raise an IndexError if there are remaining characters
.. code:: python
reusables.cut("abcdefghi", 2, "error")
# Traceback (most recent call last):
# ...
# IndexError: String of length 9 not divisible by 2 to splice
reusables.cut("abcdefghi", 2, "remove")
# ['ab', 'cd', 'ef', 'gh']
reusables.cut("abcdefghi", 2, "combine")
# ['ab', 'cd', 'ef', 'ghi']
:param string: string to modify
:param characters: how many characters to split it into
:param trailing: "normal", "remove", "combine", or "error"
:return: list of the cut string
"""
split_str = [string[i:i + characters] for i in range(0, len(string), characters)]
if trailing != 'normal' and len(split_str[-1]) != characters:
if trailing.lower() == 'remove':
return split_str[:-1] # depends on [control=['if'], data=[]]
if trailing.lower() == 'combine' and len(split_str) >= 2:
return split_str[:-2] + [split_str[-2] + split_str[-1]] # depends on [control=['if'], data=[]]
if trailing.lower() == 'error':
raise IndexError('String of length {0} not divisible by {1} to cut'.format(len(string), characters)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return split_str |
def combineIndepDstns(*distributions):
'''
Given n lists (or tuples) whose elements represent n independent, discrete
probability spaces (probabilities and values), construct a joint pmf over
all combinations of these independent points. Can take multivariate discrete
distributions as inputs.
Parameters
----------
distributions : [np.array]
Arbitrary number of distributions (pmfs). Each pmf is a list or tuple.
For each pmf, the first vector is probabilities and all subsequent vectors
are values. For each pmf, this should be true:
len(X_pmf[0]) == len(X_pmf[j]) for j in range(1,len(distributions))
Returns
-------
List of arrays, consisting of:
P_out: np.array
Probability associated with each point in X_out.
X_out: np.array (as many as in *distributions)
Discrete points for the joint discrete probability mass function.
Written by Nathan Palmer
Latest update: 5 July August 2017 by Matthew N White
'''
# Very quick and incomplete parameter check:
for dist in distributions:
assert len(dist[0]) == len(dist[-1]), "len(dist[0]) != len(dist[-1])"
# Get information on the distributions
dist_lengths = ()
dist_dims = ()
for dist in distributions:
dist_lengths += (len(dist[0]),)
dist_dims += (len(dist)-1,)
number_of_distributions = len(distributions)
# Initialize lists we will use
X_out = []
P_temp = []
# Now loop through the distributions, tiling and flattening as necessary.
for dd,dist in enumerate(distributions):
# The shape we want before we tile
dist_newshape = (1,) * dd + (len(dist[0]),) + \
(1,) * (number_of_distributions - dd)
# The tiling we want to do
dist_tiles = dist_lengths[:dd] + (1,) + dist_lengths[dd+1:]
# Now we are ready to tile.
# We don't use the np.meshgrid commands, because they do not
# easily support non-symmetric grids.
# First deal with probabilities
Pmesh = np.tile(dist[0].reshape(dist_newshape),dist_tiles) # Tiling
flatP = Pmesh.ravel() # Flatten the tiled arrays
P_temp += [flatP,] #Add the flattened arrays to the output lists
# Then loop through each value variable
for n in range(1,dist_dims[dd]+1):
Xmesh = np.tile(dist[n].reshape(dist_newshape),dist_tiles)
flatX = Xmesh.ravel()
X_out += [flatX,]
# We're done getting the flattened X_out arrays we wanted.
# However, we have a bunch of flattened P_temp arrays, and just want one
# probability array. So get the probability array, P_out, here.
P_out = np.prod(np.array(P_temp),axis=0)
assert np.isclose(np.sum(P_out),1),'Probabilities do not sum to 1!'
return [P_out,] + X_out | def function[combineIndepDstns, parameter[]]:
constant[
Given n lists (or tuples) whose elements represent n independent, discrete
probability spaces (probabilities and values), construct a joint pmf over
all combinations of these independent points. Can take multivariate discrete
distributions as inputs.
Parameters
----------
distributions : [np.array]
Arbitrary number of distributions (pmfs). Each pmf is a list or tuple.
For each pmf, the first vector is probabilities and all subsequent vectors
are values. For each pmf, this should be true:
len(X_pmf[0]) == len(X_pmf[j]) for j in range(1,len(distributions))
Returns
-------
List of arrays, consisting of:
P_out: np.array
Probability associated with each point in X_out.
X_out: np.array (as many as in *distributions)
Discrete points for the joint discrete probability mass function.
Written by Nathan Palmer
Latest update: 5 July August 2017 by Matthew N White
]
for taget[name[dist]] in starred[name[distributions]] begin[:]
assert[compare[call[name[len], parameter[call[name[dist]][constant[0]]]] equal[==] call[name[len], parameter[call[name[dist]][<ast.UnaryOp object at 0x7da204346260>]]]]]
variable[dist_lengths] assign[=] tuple[[]]
variable[dist_dims] assign[=] tuple[[]]
for taget[name[dist]] in starred[name[distributions]] begin[:]
<ast.AugAssign object at 0x7da204347f10>
<ast.AugAssign object at 0x7da204346bf0>
variable[number_of_distributions] assign[=] call[name[len], parameter[name[distributions]]]
variable[X_out] assign[=] list[[]]
variable[P_temp] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da204346d40>, <ast.Name object at 0x7da204344220>]]] in starred[call[name[enumerate], parameter[name[distributions]]]] begin[:]
variable[dist_newshape] assign[=] binary_operation[binary_operation[binary_operation[tuple[[<ast.Constant object at 0x7da204347a00>]] * name[dd]] + tuple[[<ast.Call object at 0x7da2043460b0>]]] + binary_operation[tuple[[<ast.Constant object at 0x7da204345750>]] * binary_operation[name[number_of_distributions] - name[dd]]]]
variable[dist_tiles] assign[=] binary_operation[binary_operation[call[name[dist_lengths]][<ast.Slice object at 0x7da204347220>] + tuple[[<ast.Constant object at 0x7da204346f20>]]] + call[name[dist_lengths]][<ast.Slice object at 0x7da204344100>]]
variable[Pmesh] assign[=] call[name[np].tile, parameter[call[call[name[dist]][constant[0]].reshape, parameter[name[dist_newshape]]], name[dist_tiles]]]
variable[flatP] assign[=] call[name[Pmesh].ravel, parameter[]]
<ast.AugAssign object at 0x7da204347850>
for taget[name[n]] in starred[call[name[range], parameter[constant[1], binary_operation[call[name[dist_dims]][name[dd]] + constant[1]]]]] begin[:]
variable[Xmesh] assign[=] call[name[np].tile, parameter[call[call[name[dist]][name[n]].reshape, parameter[name[dist_newshape]]], name[dist_tiles]]]
variable[flatX] assign[=] call[name[Xmesh].ravel, parameter[]]
<ast.AugAssign object at 0x7da1b2345600>
variable[P_out] assign[=] call[name[np].prod, parameter[call[name[np].array, parameter[name[P_temp]]]]]
assert[call[name[np].isclose, parameter[call[name[np].sum, parameter[name[P_out]]], constant[1]]]]
return[binary_operation[list[[<ast.Name object at 0x7da1b23464d0>]] + name[X_out]]] | keyword[def] identifier[combineIndepDstns] (* identifier[distributions] ):
literal[string]
keyword[for] identifier[dist] keyword[in] identifier[distributions] :
keyword[assert] identifier[len] ( identifier[dist] [ literal[int] ])== identifier[len] ( identifier[dist] [- literal[int] ]), literal[string]
identifier[dist_lengths] =()
identifier[dist_dims] =()
keyword[for] identifier[dist] keyword[in] identifier[distributions] :
identifier[dist_lengths] +=( identifier[len] ( identifier[dist] [ literal[int] ]),)
identifier[dist_dims] +=( identifier[len] ( identifier[dist] )- literal[int] ,)
identifier[number_of_distributions] = identifier[len] ( identifier[distributions] )
identifier[X_out] =[]
identifier[P_temp] =[]
keyword[for] identifier[dd] , identifier[dist] keyword[in] identifier[enumerate] ( identifier[distributions] ):
identifier[dist_newshape] =( literal[int] ,)* identifier[dd] +( identifier[len] ( identifier[dist] [ literal[int] ]),)+( literal[int] ,)*( identifier[number_of_distributions] - identifier[dd] )
identifier[dist_tiles] = identifier[dist_lengths] [: identifier[dd] ]+( literal[int] ,)+ identifier[dist_lengths] [ identifier[dd] + literal[int] :]
identifier[Pmesh] = identifier[np] . identifier[tile] ( identifier[dist] [ literal[int] ]. identifier[reshape] ( identifier[dist_newshape] ), identifier[dist_tiles] )
identifier[flatP] = identifier[Pmesh] . identifier[ravel] ()
identifier[P_temp] +=[ identifier[flatP] ,]
keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] , identifier[dist_dims] [ identifier[dd] ]+ literal[int] ):
identifier[Xmesh] = identifier[np] . identifier[tile] ( identifier[dist] [ identifier[n] ]. identifier[reshape] ( identifier[dist_newshape] ), identifier[dist_tiles] )
identifier[flatX] = identifier[Xmesh] . identifier[ravel] ()
identifier[X_out] +=[ identifier[flatX] ,]
identifier[P_out] = identifier[np] . identifier[prod] ( identifier[np] . identifier[array] ( identifier[P_temp] ), identifier[axis] = literal[int] )
keyword[assert] identifier[np] . identifier[isclose] ( identifier[np] . identifier[sum] ( identifier[P_out] ), literal[int] ), literal[string]
keyword[return] [ identifier[P_out] ,]+ identifier[X_out] | def combineIndepDstns(*distributions):
"""
Given n lists (or tuples) whose elements represent n independent, discrete
probability spaces (probabilities and values), construct a joint pmf over
all combinations of these independent points. Can take multivariate discrete
distributions as inputs.
Parameters
----------
distributions : [np.array]
Arbitrary number of distributions (pmfs). Each pmf is a list or tuple.
For each pmf, the first vector is probabilities and all subsequent vectors
are values. For each pmf, this should be true:
len(X_pmf[0]) == len(X_pmf[j]) for j in range(1,len(distributions))
Returns
-------
List of arrays, consisting of:
P_out: np.array
Probability associated with each point in X_out.
X_out: np.array (as many as in *distributions)
Discrete points for the joint discrete probability mass function.
Written by Nathan Palmer
Latest update: 5 July August 2017 by Matthew N White
"""
# Very quick and incomplete parameter check:
for dist in distributions:
assert len(dist[0]) == len(dist[-1]), 'len(dist[0]) != len(dist[-1])' # depends on [control=['for'], data=['dist']]
# Get information on the distributions
dist_lengths = ()
dist_dims = ()
for dist in distributions:
dist_lengths += (len(dist[0]),)
dist_dims += (len(dist) - 1,) # depends on [control=['for'], data=['dist']]
number_of_distributions = len(distributions)
# Initialize lists we will use
X_out = []
P_temp = []
# Now loop through the distributions, tiling and flattening as necessary.
for (dd, dist) in enumerate(distributions):
# The shape we want before we tile
dist_newshape = (1,) * dd + (len(dist[0]),) + (1,) * (number_of_distributions - dd)
# The tiling we want to do
dist_tiles = dist_lengths[:dd] + (1,) + dist_lengths[dd + 1:]
# Now we are ready to tile.
# We don't use the np.meshgrid commands, because they do not
# easily support non-symmetric grids.
# First deal with probabilities
Pmesh = np.tile(dist[0].reshape(dist_newshape), dist_tiles) # Tiling
flatP = Pmesh.ravel() # Flatten the tiled arrays
P_temp += [flatP] #Add the flattened arrays to the output lists
# Then loop through each value variable
for n in range(1, dist_dims[dd] + 1):
Xmesh = np.tile(dist[n].reshape(dist_newshape), dist_tiles)
flatX = Xmesh.ravel()
X_out += [flatX] # depends on [control=['for'], data=['n']] # depends on [control=['for'], data=[]]
# We're done getting the flattened X_out arrays we wanted.
# However, we have a bunch of flattened P_temp arrays, and just want one
# probability array. So get the probability array, P_out, here.
P_out = np.prod(np.array(P_temp), axis=0)
assert np.isclose(np.sum(P_out), 1), 'Probabilities do not sum to 1!'
return [P_out] + X_out |
def plot_element_profile(self, element, comp, show_label_index=None,
xlim=5):
"""
Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element.
"""
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]["reaction"].reactants[0].num_atoms
element_energy = evolution[0]['chempot']
for i, d in enumerate(evolution):
v = -(d["chempot"] - element_energy)
print ("index= %s, -\u0394\u03BC=%.4f(eV)," % (i, v), d["reaction"])
if i != 0:
plt.plot([x2, x2], [y1, d["evolution"] / num_atoms],
'k', linewidth=2.5)
x1 = v
y1 = d["evolution"] / num_atoms
if i != len(evolution) - 1:
x2 = - (evolution[i + 1]["chempot"] - element_energy)
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula)
for p in d["reaction"].products
if p.reduced_formula != element.symbol]
plt.annotate(", ".join(products), xy=(v + 0.05, y1 + 0.05),
fontsize=24, color='r')
plt.plot([x1, x2], [y1, y1], 'r', linewidth=3)
else:
plt.plot([x1, x2], [y1, y1], 'k', linewidth=2.5)
plt.xlim((0, xlim))
plt.xlabel("-$\\Delta{\\mu}$ (eV)")
plt.ylabel("Uptake per atom")
return plt | def function[plot_element_profile, parameter[self, element, comp, show_label_index, xlim]]:
constant[
Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element.
]
variable[plt] assign[=] call[name[pretty_plot], parameter[constant[12], constant[8]]]
variable[pd] assign[=] name[self]._pd
variable[evolution] assign[=] call[name[pd].get_element_profile, parameter[name[element], name[comp]]]
variable[num_atoms] assign[=] call[call[call[name[evolution]][constant[0]]][constant[reaction]].reactants][constant[0]].num_atoms
variable[element_energy] assign[=] call[call[name[evolution]][constant[0]]][constant[chempot]]
for taget[tuple[[<ast.Name object at 0x7da20c990f10>, <ast.Name object at 0x7da20c990490>]]] in starred[call[name[enumerate], parameter[name[evolution]]]] begin[:]
variable[v] assign[=] <ast.UnaryOp object at 0x7da20c9937c0>
call[name[print], parameter[binary_operation[constant[index= %s, -Δμ=%.4f(eV),] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c9921a0>, <ast.Name object at 0x7da20c990be0>]]], call[name[d]][constant[reaction]]]]
if compare[name[i] not_equal[!=] constant[0]] begin[:]
call[name[plt].plot, parameter[list[[<ast.Name object at 0x7da20c990f70>, <ast.Name object at 0x7da20c992c20>]], list[[<ast.Name object at 0x7da20c993100>, <ast.BinOp object at 0x7da20c993d30>]], constant[k]]]
variable[x1] assign[=] name[v]
variable[y1] assign[=] binary_operation[call[name[d]][constant[evolution]] / name[num_atoms]]
if compare[name[i] not_equal[!=] binary_operation[call[name[len], parameter[name[evolution]]] - constant[1]]] begin[:]
variable[x2] assign[=] <ast.UnaryOp object at 0x7da20c993670>
if <ast.BoolOp object at 0x7da20c993850> begin[:]
variable[products] assign[=] <ast.ListComp object at 0x7da20c9904f0>
call[name[plt].annotate, parameter[call[constant[, ].join, parameter[name[products]]]]]
call[name[plt].plot, parameter[list[[<ast.Name object at 0x7da20c9910f0>, <ast.Name object at 0x7da20c992680>]], list[[<ast.Name object at 0x7da20c992c80>, <ast.Name object at 0x7da20c993a60>]], constant[r]]]
call[name[plt].xlim, parameter[tuple[[<ast.Constant object at 0x7da20c991150>, <ast.Name object at 0x7da20c993220>]]]]
call[name[plt].xlabel, parameter[constant[-$\Delta{\mu}$ (eV)]]]
call[name[plt].ylabel, parameter[constant[Uptake per atom]]]
return[name[plt]] | keyword[def] identifier[plot_element_profile] ( identifier[self] , identifier[element] , identifier[comp] , identifier[show_label_index] = keyword[None] ,
identifier[xlim] = literal[int] ):
literal[string]
identifier[plt] = identifier[pretty_plot] ( literal[int] , literal[int] )
identifier[pd] = identifier[self] . identifier[_pd]
identifier[evolution] = identifier[pd] . identifier[get_element_profile] ( identifier[element] , identifier[comp] )
identifier[num_atoms] = identifier[evolution] [ literal[int] ][ literal[string] ]. identifier[reactants] [ literal[int] ]. identifier[num_atoms]
identifier[element_energy] = identifier[evolution] [ literal[int] ][ literal[string] ]
keyword[for] identifier[i] , identifier[d] keyword[in] identifier[enumerate] ( identifier[evolution] ):
identifier[v] =-( identifier[d] [ literal[string] ]- identifier[element_energy] )
identifier[print] ( literal[string] %( identifier[i] , identifier[v] ), identifier[d] [ literal[string] ])
keyword[if] identifier[i] != literal[int] :
identifier[plt] . identifier[plot] ([ identifier[x2] , identifier[x2] ],[ identifier[y1] , identifier[d] [ literal[string] ]/ identifier[num_atoms] ],
literal[string] , identifier[linewidth] = literal[int] )
identifier[x1] = identifier[v]
identifier[y1] = identifier[d] [ literal[string] ]/ identifier[num_atoms]
keyword[if] identifier[i] != identifier[len] ( identifier[evolution] )- literal[int] :
identifier[x2] =-( identifier[evolution] [ identifier[i] + literal[int] ][ literal[string] ]- identifier[element_energy] )
keyword[else] :
identifier[x2] = literal[int]
keyword[if] identifier[show_label_index] keyword[is] keyword[not] keyword[None] keyword[and] identifier[i] keyword[in] identifier[show_label_index] :
identifier[products] =[ identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[p] . identifier[reduced_formula] )
keyword[for] identifier[p] keyword[in] identifier[d] [ literal[string] ]. identifier[products]
keyword[if] identifier[p] . identifier[reduced_formula] != identifier[element] . identifier[symbol] ]
identifier[plt] . identifier[annotate] ( literal[string] . identifier[join] ( identifier[products] ), identifier[xy] =( identifier[v] + literal[int] , identifier[y1] + literal[int] ),
identifier[fontsize] = literal[int] , identifier[color] = literal[string] )
identifier[plt] . identifier[plot] ([ identifier[x1] , identifier[x2] ],[ identifier[y1] , identifier[y1] ], literal[string] , identifier[linewidth] = literal[int] )
keyword[else] :
identifier[plt] . identifier[plot] ([ identifier[x1] , identifier[x2] ],[ identifier[y1] , identifier[y1] ], literal[string] , identifier[linewidth] = literal[int] )
identifier[plt] . identifier[xlim] (( literal[int] , identifier[xlim] ))
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
keyword[return] identifier[plt] | def plot_element_profile(self, element, comp, show_label_index=None, xlim=5):
"""
Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element.
"""
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]['reaction'].reactants[0].num_atoms
element_energy = evolution[0]['chempot']
for (i, d) in enumerate(evolution):
v = -(d['chempot'] - element_energy)
print('index= %s, -Δμ=%.4f(eV),' % (i, v), d['reaction'])
if i != 0:
plt.plot([x2, x2], [y1, d['evolution'] / num_atoms], 'k', linewidth=2.5) # depends on [control=['if'], data=[]]
x1 = v
y1 = d['evolution'] / num_atoms
if i != len(evolution) - 1:
x2 = -(evolution[i + 1]['chempot'] - element_energy) # depends on [control=['if'], data=['i']]
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [re.sub('(\\d+)', '$_{\\1}$', p.reduced_formula) for p in d['reaction'].products if p.reduced_formula != element.symbol]
plt.annotate(', '.join(products), xy=(v + 0.05, y1 + 0.05), fontsize=24, color='r')
plt.plot([x1, x2], [y1, y1], 'r', linewidth=3) # depends on [control=['if'], data=[]]
else:
plt.plot([x1, x2], [y1, y1], 'k', linewidth=2.5) # depends on [control=['for'], data=[]]
plt.xlim((0, xlim))
plt.xlabel('-$\\Delta{\\mu}$ (eV)')
plt.ylabel('Uptake per atom')
return plt |
def on_sigabrt(self, signal_unused, frame_unused):
"""Stop the HTTP Server and IO Loop, shutting down the process
:param int signal_unused: Unused signal number
:param frame frame_unused: Unused frame the signal was caught in
"""
LOGGER.info('Stopping HTTP Server and IOLoop')
self.http_server.stop()
self.ioloop.stop() | def function[on_sigabrt, parameter[self, signal_unused, frame_unused]]:
constant[Stop the HTTP Server and IO Loop, shutting down the process
:param int signal_unused: Unused signal number
:param frame frame_unused: Unused frame the signal was caught in
]
call[name[LOGGER].info, parameter[constant[Stopping HTTP Server and IOLoop]]]
call[name[self].http_server.stop, parameter[]]
call[name[self].ioloop.stop, parameter[]] | keyword[def] identifier[on_sigabrt] ( identifier[self] , identifier[signal_unused] , identifier[frame_unused] ):
literal[string]
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[self] . identifier[http_server] . identifier[stop] ()
identifier[self] . identifier[ioloop] . identifier[stop] () | def on_sigabrt(self, signal_unused, frame_unused):
"""Stop the HTTP Server and IO Loop, shutting down the process
:param int signal_unused: Unused signal number
:param frame frame_unused: Unused frame the signal was caught in
"""
LOGGER.info('Stopping HTTP Server and IOLoop')
self.http_server.stop()
self.ioloop.stop() |
def proba2labels(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> List[List]:
"""
Convert vectors of probabilities to labels using confident threshold
(if probability to belong with the class is bigger than confident_threshold, sample belongs with the class;
if no probabilities bigger than confident threshold, sample belongs with the class with the biggest probability)
Args:
proba: list of samples where each sample is a vector of probabilities to belong with given classes
confident_threshold (float): boundary of probability to belong with a class
classes: array of classes' names
Returns:
list of lists of labels for each sample
"""
y = []
for sample in proba:
to_add = np.where(sample > confident_threshold)[0]
if len(to_add) > 0:
y.append(np.array(classes)[to_add].tolist())
else:
y.append(np.array([np.array(classes)[np.argmax(sample)]]).tolist())
return y | def function[proba2labels, parameter[proba, confident_threshold, classes]]:
constant[
Convert vectors of probabilities to labels using confident threshold
(if probability to belong with the class is bigger than confident_threshold, sample belongs with the class;
if no probabilities bigger than confident threshold, sample belongs with the class with the biggest probability)
Args:
proba: list of samples where each sample is a vector of probabilities to belong with given classes
confident_threshold (float): boundary of probability to belong with a class
classes: array of classes' names
Returns:
list of lists of labels for each sample
]
variable[y] assign[=] list[[]]
for taget[name[sample]] in starred[name[proba]] begin[:]
variable[to_add] assign[=] call[call[name[np].where, parameter[compare[name[sample] greater[>] name[confident_threshold]]]]][constant[0]]
if compare[call[name[len], parameter[name[to_add]]] greater[>] constant[0]] begin[:]
call[name[y].append, parameter[call[call[call[name[np].array, parameter[name[classes]]]][name[to_add]].tolist, parameter[]]]]
return[name[y]] | keyword[def] identifier[proba2labels] ( identifier[proba] :[ identifier[list] , identifier[np] . identifier[ndarray] ], identifier[confident_threshold] : identifier[float] , identifier[classes] :[ identifier[list] , identifier[np] . identifier[ndarray] ])-> identifier[List] [ identifier[List] ]:
literal[string]
identifier[y] =[]
keyword[for] identifier[sample] keyword[in] identifier[proba] :
identifier[to_add] = identifier[np] . identifier[where] ( identifier[sample] > identifier[confident_threshold] )[ literal[int] ]
keyword[if] identifier[len] ( identifier[to_add] )> literal[int] :
identifier[y] . identifier[append] ( identifier[np] . identifier[array] ( identifier[classes] )[ identifier[to_add] ]. identifier[tolist] ())
keyword[else] :
identifier[y] . identifier[append] ( identifier[np] . identifier[array] ([ identifier[np] . identifier[array] ( identifier[classes] )[ identifier[np] . identifier[argmax] ( identifier[sample] )]]). identifier[tolist] ())
keyword[return] identifier[y] | def proba2labels(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> List[List]:
"""
Convert vectors of probabilities to labels using confident threshold
(if probability to belong with the class is bigger than confident_threshold, sample belongs with the class;
if no probabilities bigger than confident threshold, sample belongs with the class with the biggest probability)
Args:
proba: list of samples where each sample is a vector of probabilities to belong with given classes
confident_threshold (float): boundary of probability to belong with a class
classes: array of classes' names
Returns:
list of lists of labels for each sample
"""
y = []
for sample in proba:
to_add = np.where(sample > confident_threshold)[0]
if len(to_add) > 0:
y.append(np.array(classes)[to_add].tolist()) # depends on [control=['if'], data=[]]
else:
y.append(np.array([np.array(classes)[np.argmax(sample)]]).tolist()) # depends on [control=['for'], data=['sample']]
return y |
def from_config(cls, cfg, **kwargs):
"""return an instance configured with the ``cfg`` dict"""
cfg = dict(cfg, **kwargs)
pythonpath = cfg.get('pythonpath', [])
if 'here' in cfg:
pythonpath.append(cfg['here'])
for path in pythonpath:
sys.path.append(os.path.expanduser(path))
prog = cls.server and 'irc3d' or 'irc3'
if cfg.get('debug'):
cls.venusian_categories.append(prog + '.debug')
if cfg.get('interactive'): # pragma: no cover
import irc3.testing
context = getattr(irc3.testing, cls.__name__)(**cfg)
else:
context = cls(**cfg)
if cfg.get('raw'):
context.include('irc3.plugins.log',
venusian_categories=[prog + '.debug'])
return context | def function[from_config, parameter[cls, cfg]]:
constant[return an instance configured with the ``cfg`` dict]
variable[cfg] assign[=] call[name[dict], parameter[name[cfg]]]
variable[pythonpath] assign[=] call[name[cfg].get, parameter[constant[pythonpath], list[[]]]]
if compare[constant[here] in name[cfg]] begin[:]
call[name[pythonpath].append, parameter[call[name[cfg]][constant[here]]]]
for taget[name[path]] in starred[name[pythonpath]] begin[:]
call[name[sys].path.append, parameter[call[name[os].path.expanduser, parameter[name[path]]]]]
variable[prog] assign[=] <ast.BoolOp object at 0x7da1b047a9e0>
if call[name[cfg].get, parameter[constant[debug]]] begin[:]
call[name[cls].venusian_categories.append, parameter[binary_operation[name[prog] + constant[.debug]]]]
if call[name[cfg].get, parameter[constant[interactive]]] begin[:]
import module[irc3.testing]
variable[context] assign[=] call[call[name[getattr], parameter[name[irc3].testing, name[cls].__name__]], parameter[]]
if call[name[cfg].get, parameter[constant[raw]]] begin[:]
call[name[context].include, parameter[constant[irc3.plugins.log]]]
return[name[context]] | keyword[def] identifier[from_config] ( identifier[cls] , identifier[cfg] ,** identifier[kwargs] ):
literal[string]
identifier[cfg] = identifier[dict] ( identifier[cfg] ,** identifier[kwargs] )
identifier[pythonpath] = identifier[cfg] . identifier[get] ( literal[string] ,[])
keyword[if] literal[string] keyword[in] identifier[cfg] :
identifier[pythonpath] . identifier[append] ( identifier[cfg] [ literal[string] ])
keyword[for] identifier[path] keyword[in] identifier[pythonpath] :
identifier[sys] . identifier[path] . identifier[append] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] ))
identifier[prog] = identifier[cls] . identifier[server] keyword[and] literal[string] keyword[or] literal[string]
keyword[if] identifier[cfg] . identifier[get] ( literal[string] ):
identifier[cls] . identifier[venusian_categories] . identifier[append] ( identifier[prog] + literal[string] )
keyword[if] identifier[cfg] . identifier[get] ( literal[string] ):
keyword[import] identifier[irc3] . identifier[testing]
identifier[context] = identifier[getattr] ( identifier[irc3] . identifier[testing] , identifier[cls] . identifier[__name__] )(** identifier[cfg] )
keyword[else] :
identifier[context] = identifier[cls] (** identifier[cfg] )
keyword[if] identifier[cfg] . identifier[get] ( literal[string] ):
identifier[context] . identifier[include] ( literal[string] ,
identifier[venusian_categories] =[ identifier[prog] + literal[string] ])
keyword[return] identifier[context] | def from_config(cls, cfg, **kwargs):
"""return an instance configured with the ``cfg`` dict"""
cfg = dict(cfg, **kwargs)
pythonpath = cfg.get('pythonpath', [])
if 'here' in cfg:
pythonpath.append(cfg['here']) # depends on [control=['if'], data=['cfg']]
for path in pythonpath:
sys.path.append(os.path.expanduser(path)) # depends on [control=['for'], data=['path']]
prog = cls.server and 'irc3d' or 'irc3'
if cfg.get('debug'):
cls.venusian_categories.append(prog + '.debug') # depends on [control=['if'], data=[]]
if cfg.get('interactive'): # pragma: no cover
import irc3.testing
context = getattr(irc3.testing, cls.__name__)(**cfg) # depends on [control=['if'], data=[]]
else:
context = cls(**cfg)
if cfg.get('raw'):
context.include('irc3.plugins.log', venusian_categories=[prog + '.debug']) # depends on [control=['if'], data=[]]
return context |
def analyze_symbol(l, sym, from_ver, to_ver, do_reads=False):
"""
This is a utility function to produce text output with details about the versions of a given symbol.
It is useful for debugging corruption issues and to mark corrupted versions.
Parameters
----------
l : `arctic.store.version_store.VersionStore`
The VersionStore instance against which the analysis will be run.
sym : `str`
The symbol to analyze
from_ver : `int` or `None`
The lower bound for the version number we wish to analyze. If None then start from the earliest version.
to_ver : `int` or `None`
The upper bound for the version number we wish to analyze. If None then stop at the latest version.
do_reads : `bool`
If this flag is set to true, then the corruption check will actually try to read the symbol (slower).
"""
logging.info('Analyzing symbol {}. Versions range is [v{}, v{}]'.format(sym, from_ver, to_ver))
prev_rows = 0
prev_n = 0
prev_v = None
logging.info('\nVersions for {}:'.format(sym))
for v in l._versions.find({'symbol': sym, 'version': {'$gte': from_ver, '$lte': to_ver}},
sort=[('version', pymongo.ASCENDING)]):
n = v.get('version')
is_deleted = v.get('metadata').get('deleted', False) if v.get('metadata') else False
if is_deleted:
matching = 0
else:
spec = {'symbol': sym, 'parent': v.get('base_version_id', v['_id']), 'segment': {'$lt': v.get('up_to', 0)}}
matching = mongo_count(l._collection, filter=spec) if not is_deleted else 0
base_id = v.get('base_version_id')
snaps = ['/'.join((str(x), str(x.generation_time))) for x in v.get('parent')] if v.get('parent') else None
added_rows = v.get('up_to', 0) - prev_rows
meta_match_with_prev = v.get('metadata') == prev_v.get('metadata') if prev_v else False
delta_snap_creation = (min([x.generation_time for x in v.get('parent')]) - v['_id'].generation_time).total_seconds() / 60.0 if v.get('parent') else 0.0
prev_v_diff = 0 if not prev_v else v['version'] - prev_v['version']
corrupted = not is_deleted and (is_corrupted(l, sym, v) if do_reads else fast_is_corrupted(l, sym, v))
logging.info(
"v{: <6} "
"{: <6} "
"{: <5} "
"({: <20}): "
"expected={: <6} "
"found={: <6} "
"last_row={: <10} "
"new_rows={: <10} "
"append count={: <10} "
"append_size={: <10} "
"type={: <14} {: <14} "
"base={: <24}/{: <28} "
"snap={: <30}[{:.1f} mins delayed] "
"{: <20} "
"{: <20}".format(
n,
prev_v_diff,
'DEL' if is_deleted else 'ALIVE',
str(v['_id'].generation_time),
v.get('segment_count', 0),
matching,
v.get('up_to', 0),
added_rows,
v.get('append_count'),
v.get('append_size'),
v.get('type'),
'meta-same' if meta_match_with_prev else 'meta-changed',
str(base_id),
str(base_id.generation_time) if base_id else '',
str(snaps),
delta_snap_creation,
'PREV_MISSING' if prev_n < n - 1 else '',
'CORRUPTED VERSION' if corrupted else '')
)
prev_rows = v.get('up_to', 0)
prev_n = n
prev_v = v
logging.info('\nSegments for {}:'.format(sym))
for seg in l._collection.find({'symbol': sym}, sort=[('_id', pymongo.ASCENDING)]):
logging.info("{: <32} {: <7} {: <10} {: <30}".format(
hashlib.sha1(seg['sha']).hexdigest(),
seg.get('segment'),
'compressed' if seg.get('compressed', False) else 'raw',
str([str(p) for p in seg.get('parent', [])])
)) | def function[analyze_symbol, parameter[l, sym, from_ver, to_ver, do_reads]]:
constant[
This is a utility function to produce text output with details about the versions of a given symbol.
It is useful for debugging corruption issues and to mark corrupted versions.
Parameters
----------
l : `arctic.store.version_store.VersionStore`
The VersionStore instance against which the analysis will be run.
sym : `str`
The symbol to analyze
from_ver : `int` or `None`
The lower bound for the version number we wish to analyze. If None then start from the earliest version.
to_ver : `int` or `None`
The upper bound for the version number we wish to analyze. If None then stop at the latest version.
do_reads : `bool`
If this flag is set to true, then the corruption check will actually try to read the symbol (slower).
]
call[name[logging].info, parameter[call[constant[Analyzing symbol {}. Versions range is [v{}, v{}]].format, parameter[name[sym], name[from_ver], name[to_ver]]]]]
variable[prev_rows] assign[=] constant[0]
variable[prev_n] assign[=] constant[0]
variable[prev_v] assign[=] constant[None]
call[name[logging].info, parameter[call[constant[
Versions for {}:].format, parameter[name[sym]]]]]
for taget[name[v]] in starred[call[name[l]._versions.find, parameter[dictionary[[<ast.Constant object at 0x7da20c796a70>, <ast.Constant object at 0x7da20c795a20>], [<ast.Name object at 0x7da20c7940a0>, <ast.Dict object at 0x7da20c794370>]]]]] begin[:]
variable[n] assign[=] call[name[v].get, parameter[constant[version]]]
variable[is_deleted] assign[=] <ast.IfExp object at 0x7da20c795990>
if name[is_deleted] begin[:]
variable[matching] assign[=] constant[0]
variable[base_id] assign[=] call[name[v].get, parameter[constant[base_version_id]]]
variable[snaps] assign[=] <ast.IfExp object at 0x7da20c796140>
variable[added_rows] assign[=] binary_operation[call[name[v].get, parameter[constant[up_to], constant[0]]] - name[prev_rows]]
variable[meta_match_with_prev] assign[=] <ast.IfExp object at 0x7da20c794c70>
variable[delta_snap_creation] assign[=] <ast.IfExp object at 0x7da20c795840>
variable[prev_v_diff] assign[=] <ast.IfExp object at 0x7da20c6e5870>
variable[corrupted] assign[=] <ast.BoolOp object at 0x7da20c6e57b0>
call[name[logging].info, parameter[call[constant[v{: <6} {: <6} {: <5} ({: <20}): expected={: <6} found={: <6} last_row={: <10} new_rows={: <10} append count={: <10} append_size={: <10} type={: <14} {: <14} base={: <24}/{: <28} snap={: <30}[{:.1f} mins delayed] {: <20} {: <20}].format, parameter[name[n], name[prev_v_diff], <ast.IfExp object at 0x7da20c6e65c0>, call[name[str], parameter[call[name[v]][constant[_id]].generation_time]], call[name[v].get, parameter[constant[segment_count], constant[0]]], name[matching], call[name[v].get, parameter[constant[up_to], constant[0]]], name[added_rows], call[name[v].get, parameter[constant[append_count]]], call[name[v].get, parameter[constant[append_size]]], call[name[v].get, parameter[constant[type]]], <ast.IfExp object at 0x7da20c6e5d20>, call[name[str], parameter[name[base_id]]], <ast.IfExp object at 0x7da20c6e4820>, call[name[str], parameter[name[snaps]]], name[delta_snap_creation], <ast.IfExp object at 0x7da20c6e76d0>, <ast.IfExp object at 0x7da20c6e6560>]]]]
variable[prev_rows] assign[=] call[name[v].get, parameter[constant[up_to], constant[0]]]
variable[prev_n] assign[=] name[n]
variable[prev_v] assign[=] name[v]
call[name[logging].info, parameter[call[constant[
Segments for {}:].format, parameter[name[sym]]]]]
for taget[name[seg]] in starred[call[name[l]._collection.find, parameter[dictionary[[<ast.Constant object at 0x7da20c6e5c60>], [<ast.Name object at 0x7da20c6e46a0>]]]]] begin[:]
call[name[logging].info, parameter[call[constant[{: <32} {: <7} {: <10} {: <30}].format, parameter[call[call[name[hashlib].sha1, parameter[call[name[seg]][constant[sha]]]].hexdigest, parameter[]], call[name[seg].get, parameter[constant[segment]]], <ast.IfExp object at 0x7da20c6e5f30>, call[name[str], parameter[<ast.ListComp object at 0x7da20c6e6b60>]]]]]] | keyword[def] identifier[analyze_symbol] ( identifier[l] , identifier[sym] , identifier[from_ver] , identifier[to_ver] , identifier[do_reads] = keyword[False] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[sym] , identifier[from_ver] , identifier[to_ver] ))
identifier[prev_rows] = literal[int]
identifier[prev_n] = literal[int]
identifier[prev_v] = keyword[None]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[sym] ))
keyword[for] identifier[v] keyword[in] identifier[l] . identifier[_versions] . identifier[find] ({ literal[string] : identifier[sym] , literal[string] :{ literal[string] : identifier[from_ver] , literal[string] : identifier[to_ver] }},
identifier[sort] =[( literal[string] , identifier[pymongo] . identifier[ASCENDING] )]):
identifier[n] = identifier[v] . identifier[get] ( literal[string] )
identifier[is_deleted] = identifier[v] . identifier[get] ( literal[string] ). identifier[get] ( literal[string] , keyword[False] ) keyword[if] identifier[v] . identifier[get] ( literal[string] ) keyword[else] keyword[False]
keyword[if] identifier[is_deleted] :
identifier[matching] = literal[int]
keyword[else] :
identifier[spec] ={ literal[string] : identifier[sym] , literal[string] : identifier[v] . identifier[get] ( literal[string] , identifier[v] [ literal[string] ]), literal[string] :{ literal[string] : identifier[v] . identifier[get] ( literal[string] , literal[int] )}}
identifier[matching] = identifier[mongo_count] ( identifier[l] . identifier[_collection] , identifier[filter] = identifier[spec] ) keyword[if] keyword[not] identifier[is_deleted] keyword[else] literal[int]
identifier[base_id] = identifier[v] . identifier[get] ( literal[string] )
identifier[snaps] =[ literal[string] . identifier[join] (( identifier[str] ( identifier[x] ), identifier[str] ( identifier[x] . identifier[generation_time] ))) keyword[for] identifier[x] keyword[in] identifier[v] . identifier[get] ( literal[string] )] keyword[if] identifier[v] . identifier[get] ( literal[string] ) keyword[else] keyword[None]
identifier[added_rows] = identifier[v] . identifier[get] ( literal[string] , literal[int] )- identifier[prev_rows]
identifier[meta_match_with_prev] = identifier[v] . identifier[get] ( literal[string] )== identifier[prev_v] . identifier[get] ( literal[string] ) keyword[if] identifier[prev_v] keyword[else] keyword[False]
identifier[delta_snap_creation] =( identifier[min] ([ identifier[x] . identifier[generation_time] keyword[for] identifier[x] keyword[in] identifier[v] . identifier[get] ( literal[string] )])- identifier[v] [ literal[string] ]. identifier[generation_time] ). identifier[total_seconds] ()/ literal[int] keyword[if] identifier[v] . identifier[get] ( literal[string] ) keyword[else] literal[int]
identifier[prev_v_diff] = literal[int] keyword[if] keyword[not] identifier[prev_v] keyword[else] identifier[v] [ literal[string] ]- identifier[prev_v] [ literal[string] ]
identifier[corrupted] = keyword[not] identifier[is_deleted] keyword[and] ( identifier[is_corrupted] ( identifier[l] , identifier[sym] , identifier[v] ) keyword[if] identifier[do_reads] keyword[else] identifier[fast_is_corrupted] ( identifier[l] , identifier[sym] , identifier[v] ))
identifier[logging] . identifier[info] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] (
identifier[n] ,
identifier[prev_v_diff] ,
literal[string] keyword[if] identifier[is_deleted] keyword[else] literal[string] ,
identifier[str] ( identifier[v] [ literal[string] ]. identifier[generation_time] ),
identifier[v] . identifier[get] ( literal[string] , literal[int] ),
identifier[matching] ,
identifier[v] . identifier[get] ( literal[string] , literal[int] ),
identifier[added_rows] ,
identifier[v] . identifier[get] ( literal[string] ),
identifier[v] . identifier[get] ( literal[string] ),
identifier[v] . identifier[get] ( literal[string] ),
literal[string] keyword[if] identifier[meta_match_with_prev] keyword[else] literal[string] ,
identifier[str] ( identifier[base_id] ),
identifier[str] ( identifier[base_id] . identifier[generation_time] ) keyword[if] identifier[base_id] keyword[else] literal[string] ,
identifier[str] ( identifier[snaps] ),
identifier[delta_snap_creation] ,
literal[string] keyword[if] identifier[prev_n] < identifier[n] - literal[int] keyword[else] literal[string] ,
literal[string] keyword[if] identifier[corrupted] keyword[else] literal[string] )
)
identifier[prev_rows] = identifier[v] . identifier[get] ( literal[string] , literal[int] )
identifier[prev_n] = identifier[n]
identifier[prev_v] = identifier[v]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[sym] ))
keyword[for] identifier[seg] keyword[in] identifier[l] . identifier[_collection] . identifier[find] ({ literal[string] : identifier[sym] }, identifier[sort] =[( literal[string] , identifier[pymongo] . identifier[ASCENDING] )]):
identifier[logging] . identifier[info] ( literal[string] . identifier[format] (
identifier[hashlib] . identifier[sha1] ( identifier[seg] [ literal[string] ]). identifier[hexdigest] (),
identifier[seg] . identifier[get] ( literal[string] ),
literal[string] keyword[if] identifier[seg] . identifier[get] ( literal[string] , keyword[False] ) keyword[else] literal[string] ,
identifier[str] ([ identifier[str] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[seg] . identifier[get] ( literal[string] ,[])])
)) | def analyze_symbol(l, sym, from_ver, to_ver, do_reads=False):
"""
This is a utility function to produce text output with details about the versions of a given symbol.
It is useful for debugging corruption issues and to mark corrupted versions.
Parameters
----------
l : `arctic.store.version_store.VersionStore`
The VersionStore instance against which the analysis will be run.
sym : `str`
The symbol to analyze
from_ver : `int` or `None`
The lower bound for the version number we wish to analyze. If None then start from the earliest version.
to_ver : `int` or `None`
The upper bound for the version number we wish to analyze. If None then stop at the latest version.
do_reads : `bool`
If this flag is set to true, then the corruption check will actually try to read the symbol (slower).
"""
logging.info('Analyzing symbol {}. Versions range is [v{}, v{}]'.format(sym, from_ver, to_ver))
prev_rows = 0
prev_n = 0
prev_v = None
logging.info('\nVersions for {}:'.format(sym))
for v in l._versions.find({'symbol': sym, 'version': {'$gte': from_ver, '$lte': to_ver}}, sort=[('version', pymongo.ASCENDING)]):
n = v.get('version')
is_deleted = v.get('metadata').get('deleted', False) if v.get('metadata') else False
if is_deleted:
matching = 0 # depends on [control=['if'], data=[]]
else:
spec = {'symbol': sym, 'parent': v.get('base_version_id', v['_id']), 'segment': {'$lt': v.get('up_to', 0)}}
matching = mongo_count(l._collection, filter=spec) if not is_deleted else 0
base_id = v.get('base_version_id')
snaps = ['/'.join((str(x), str(x.generation_time))) for x in v.get('parent')] if v.get('parent') else None
added_rows = v.get('up_to', 0) - prev_rows
meta_match_with_prev = v.get('metadata') == prev_v.get('metadata') if prev_v else False
delta_snap_creation = (min([x.generation_time for x in v.get('parent')]) - v['_id'].generation_time).total_seconds() / 60.0 if v.get('parent') else 0.0
prev_v_diff = 0 if not prev_v else v['version'] - prev_v['version']
corrupted = not is_deleted and (is_corrupted(l, sym, v) if do_reads else fast_is_corrupted(l, sym, v))
logging.info('v{: <6} {: <6} {: <5} ({: <20}): expected={: <6} found={: <6} last_row={: <10} new_rows={: <10} append count={: <10} append_size={: <10} type={: <14} {: <14} base={: <24}/{: <28} snap={: <30}[{:.1f} mins delayed] {: <20} {: <20}'.format(n, prev_v_diff, 'DEL' if is_deleted else 'ALIVE', str(v['_id'].generation_time), v.get('segment_count', 0), matching, v.get('up_to', 0), added_rows, v.get('append_count'), v.get('append_size'), v.get('type'), 'meta-same' if meta_match_with_prev else 'meta-changed', str(base_id), str(base_id.generation_time) if base_id else '', str(snaps), delta_snap_creation, 'PREV_MISSING' if prev_n < n - 1 else '', 'CORRUPTED VERSION' if corrupted else ''))
prev_rows = v.get('up_to', 0)
prev_n = n
prev_v = v # depends on [control=['for'], data=['v']]
logging.info('\nSegments for {}:'.format(sym))
for seg in l._collection.find({'symbol': sym}, sort=[('_id', pymongo.ASCENDING)]):
logging.info('{: <32} {: <7} {: <10} {: <30}'.format(hashlib.sha1(seg['sha']).hexdigest(), seg.get('segment'), 'compressed' if seg.get('compressed', False) else 'raw', str([str(p) for p in seg.get('parent', [])]))) # depends on [control=['for'], data=['seg']] |
def _prepare_general_arguments(RRTMGobject):
'''Prepare arguments needed for both RRTMG_SW and RRTMG_LW with correct dimensions.'''
tlay = _climlab_to_rrtm(RRTMGobject.Tatm)
tlev = _climlab_to_rrtm(interface_temperature(**RRTMGobject.state))
play = _climlab_to_rrtm(RRTMGobject.lev * np.ones_like(tlay))
plev = _climlab_to_rrtm(RRTMGobject.lev_bounds * np.ones_like(tlev))
ncol, nlay = tlay.shape
tsfc = _climlab_to_rrtm_sfc(RRTMGobject.Ts, RRTMGobject.Ts)
# GASES -- put them in proper dimensions and units
vapor_mixing_ratio = mmr_to_vmr(RRTMGobject.specific_humidity, gas='H2O')
h2ovmr = _climlab_to_rrtm(vapor_mixing_ratio * np.ones_like(RRTMGobject.Tatm))
o3vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['O3'] * np.ones_like(RRTMGobject.Tatm))
co2vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CO2'] * np.ones_like(RRTMGobject.Tatm))
ch4vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CH4'] * np.ones_like(RRTMGobject.Tatm))
n2ovmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['N2O'] * np.ones_like(RRTMGobject.Tatm))
o2vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['O2'] * np.ones_like(RRTMGobject.Tatm))
cfc11vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CFC11'] * np.ones_like(RRTMGobject.Tatm))
cfc12vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CFC12'] * np.ones_like(RRTMGobject.Tatm))
cfc22vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CFC22'] * np.ones_like(RRTMGobject.Tatm))
ccl4vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CCL4'] * np.ones_like(RRTMGobject.Tatm))
# Cloud parameters
cldfrac = _climlab_to_rrtm(RRTMGobject.cldfrac * np.ones_like(RRTMGobject.Tatm))
ciwp = _climlab_to_rrtm(RRTMGobject.ciwp * np.ones_like(RRTMGobject.Tatm))
clwp = _climlab_to_rrtm(RRTMGobject.clwp * np.ones_like(RRTMGobject.Tatm))
relq = _climlab_to_rrtm(RRTMGobject.r_liq * np.ones_like(RRTMGobject.Tatm))
reic = _climlab_to_rrtm(RRTMGobject.r_ice * np.ones_like(RRTMGobject.Tatm))
return (ncol, nlay, play, plev, tlay, tlev, tsfc,
h2ovmr, o3vmr, co2vmr, ch4vmr, n2ovmr, o2vmr, cfc11vmr,
cfc12vmr, cfc12vmr, cfc22vmr, ccl4vmr,
cldfrac, ciwp, clwp, relq, reic) | def function[_prepare_general_arguments, parameter[RRTMGobject]]:
constant[Prepare arguments needed for both RRTMG_SW and RRTMG_LW with correct dimensions.]
variable[tlay] assign[=] call[name[_climlab_to_rrtm], parameter[name[RRTMGobject].Tatm]]
variable[tlev] assign[=] call[name[_climlab_to_rrtm], parameter[call[name[interface_temperature], parameter[]]]]
variable[play] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[name[RRTMGobject].lev * call[name[np].ones_like, parameter[name[tlay]]]]]]
variable[plev] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[name[RRTMGobject].lev_bounds * call[name[np].ones_like, parameter[name[tlev]]]]]]
<ast.Tuple object at 0x7da1b13e6890> assign[=] name[tlay].shape
variable[tsfc] assign[=] call[name[_climlab_to_rrtm_sfc], parameter[name[RRTMGobject].Ts, name[RRTMGobject].Ts]]
variable[vapor_mixing_ratio] assign[=] call[name[mmr_to_vmr], parameter[name[RRTMGobject].specific_humidity]]
variable[h2ovmr] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[name[vapor_mixing_ratio] * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[o3vmr] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[call[name[RRTMGobject].absorber_vmr][constant[O3]] * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[co2vmr] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[call[name[RRTMGobject].absorber_vmr][constant[CO2]] * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[ch4vmr] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[call[name[RRTMGobject].absorber_vmr][constant[CH4]] * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[n2ovmr] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[call[name[RRTMGobject].absorber_vmr][constant[N2O]] * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[o2vmr] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[call[name[RRTMGobject].absorber_vmr][constant[O2]] * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[cfc11vmr] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[call[name[RRTMGobject].absorber_vmr][constant[CFC11]] * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[cfc12vmr] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[call[name[RRTMGobject].absorber_vmr][constant[CFC12]] * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[cfc22vmr] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[call[name[RRTMGobject].absorber_vmr][constant[CFC22]] * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[ccl4vmr] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[call[name[RRTMGobject].absorber_vmr][constant[CCL4]] * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[cldfrac] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[name[RRTMGobject].cldfrac * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[ciwp] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[name[RRTMGobject].ciwp * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[clwp] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[name[RRTMGobject].clwp * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[relq] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[name[RRTMGobject].r_liq * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
variable[reic] assign[=] call[name[_climlab_to_rrtm], parameter[binary_operation[name[RRTMGobject].r_ice * call[name[np].ones_like, parameter[name[RRTMGobject].Tatm]]]]]
return[tuple[[<ast.Name object at 0x7da2044c3040>, <ast.Name object at 0x7da2044c15d0>, <ast.Name object at 0x7da2044c03a0>, <ast.Name object at 0x7da2044c2980>, <ast.Name object at 0x7da2044c2fe0>, <ast.Name object at 0x7da2044c1360>, <ast.Name object at 0x7da2044c3a00>, <ast.Name object at 0x7da2044c20e0>, <ast.Name object at 0x7da2044c37c0>, <ast.Name object at 0x7da2044c3ac0>, <ast.Name object at 0x7da2044c3fa0>, <ast.Name object at 0x7da2044c0be0>, <ast.Name object at 0x7da2044c1960>, <ast.Name object at 0x7da2044c0a30>, <ast.Name object at 0x7da2044c21d0>, <ast.Name object at 0x7da2044c2320>, <ast.Name object at 0x7da2044c1990>, <ast.Name object at 0x7da2044c3f40>, <ast.Name object at 0x7da2044c1660>, <ast.Name object at 0x7da2044c0640>, <ast.Name object at 0x7da2044c1de0>, <ast.Name object at 0x7da2044c3070>, <ast.Name object at 0x7da2044c3e80>]]] | keyword[def] identifier[_prepare_general_arguments] ( identifier[RRTMGobject] ):
literal[string]
identifier[tlay] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[Tatm] )
identifier[tlev] = identifier[_climlab_to_rrtm] ( identifier[interface_temperature] (** identifier[RRTMGobject] . identifier[state] ))
identifier[play] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[lev] * identifier[np] . identifier[ones_like] ( identifier[tlay] ))
identifier[plev] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[lev_bounds] * identifier[np] . identifier[ones_like] ( identifier[tlev] ))
identifier[ncol] , identifier[nlay] = identifier[tlay] . identifier[shape]
identifier[tsfc] = identifier[_climlab_to_rrtm_sfc] ( identifier[RRTMGobject] . identifier[Ts] , identifier[RRTMGobject] . identifier[Ts] )
identifier[vapor_mixing_ratio] = identifier[mmr_to_vmr] ( identifier[RRTMGobject] . identifier[specific_humidity] , identifier[gas] = literal[string] )
identifier[h2ovmr] = identifier[_climlab_to_rrtm] ( identifier[vapor_mixing_ratio] * identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[o3vmr] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[absorber_vmr] [ literal[string] ]* identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[co2vmr] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[absorber_vmr] [ literal[string] ]* identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[ch4vmr] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[absorber_vmr] [ literal[string] ]* identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[n2ovmr] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[absorber_vmr] [ literal[string] ]* identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[o2vmr] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[absorber_vmr] [ literal[string] ]* identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[cfc11vmr] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[absorber_vmr] [ literal[string] ]* identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[cfc12vmr] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[absorber_vmr] [ literal[string] ]* identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[cfc22vmr] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[absorber_vmr] [ literal[string] ]* identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[ccl4vmr] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[absorber_vmr] [ literal[string] ]* identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[cldfrac] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[cldfrac] * identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[ciwp] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[ciwp] * identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[clwp] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[clwp] * identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[relq] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[r_liq] * identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
identifier[reic] = identifier[_climlab_to_rrtm] ( identifier[RRTMGobject] . identifier[r_ice] * identifier[np] . identifier[ones_like] ( identifier[RRTMGobject] . identifier[Tatm] ))
keyword[return] ( identifier[ncol] , identifier[nlay] , identifier[play] , identifier[plev] , identifier[tlay] , identifier[tlev] , identifier[tsfc] ,
identifier[h2ovmr] , identifier[o3vmr] , identifier[co2vmr] , identifier[ch4vmr] , identifier[n2ovmr] , identifier[o2vmr] , identifier[cfc11vmr] ,
identifier[cfc12vmr] , identifier[cfc12vmr] , identifier[cfc22vmr] , identifier[ccl4vmr] ,
identifier[cldfrac] , identifier[ciwp] , identifier[clwp] , identifier[relq] , identifier[reic] ) | def _prepare_general_arguments(RRTMGobject):
"""Prepare arguments needed for both RRTMG_SW and RRTMG_LW with correct dimensions."""
tlay = _climlab_to_rrtm(RRTMGobject.Tatm)
tlev = _climlab_to_rrtm(interface_temperature(**RRTMGobject.state))
play = _climlab_to_rrtm(RRTMGobject.lev * np.ones_like(tlay))
plev = _climlab_to_rrtm(RRTMGobject.lev_bounds * np.ones_like(tlev))
(ncol, nlay) = tlay.shape
tsfc = _climlab_to_rrtm_sfc(RRTMGobject.Ts, RRTMGobject.Ts)
# GASES -- put them in proper dimensions and units
vapor_mixing_ratio = mmr_to_vmr(RRTMGobject.specific_humidity, gas='H2O')
h2ovmr = _climlab_to_rrtm(vapor_mixing_ratio * np.ones_like(RRTMGobject.Tatm))
o3vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['O3'] * np.ones_like(RRTMGobject.Tatm))
co2vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CO2'] * np.ones_like(RRTMGobject.Tatm))
ch4vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CH4'] * np.ones_like(RRTMGobject.Tatm))
n2ovmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['N2O'] * np.ones_like(RRTMGobject.Tatm))
o2vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['O2'] * np.ones_like(RRTMGobject.Tatm))
cfc11vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CFC11'] * np.ones_like(RRTMGobject.Tatm))
cfc12vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CFC12'] * np.ones_like(RRTMGobject.Tatm))
cfc22vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CFC22'] * np.ones_like(RRTMGobject.Tatm))
ccl4vmr = _climlab_to_rrtm(RRTMGobject.absorber_vmr['CCL4'] * np.ones_like(RRTMGobject.Tatm))
# Cloud parameters
cldfrac = _climlab_to_rrtm(RRTMGobject.cldfrac * np.ones_like(RRTMGobject.Tatm))
ciwp = _climlab_to_rrtm(RRTMGobject.ciwp * np.ones_like(RRTMGobject.Tatm))
clwp = _climlab_to_rrtm(RRTMGobject.clwp * np.ones_like(RRTMGobject.Tatm))
relq = _climlab_to_rrtm(RRTMGobject.r_liq * np.ones_like(RRTMGobject.Tatm))
reic = _climlab_to_rrtm(RRTMGobject.r_ice * np.ones_like(RRTMGobject.Tatm))
return (ncol, nlay, play, plev, tlay, tlev, tsfc, h2ovmr, o3vmr, co2vmr, ch4vmr, n2ovmr, o2vmr, cfc11vmr, cfc12vmr, cfc12vmr, cfc22vmr, ccl4vmr, cldfrac, ciwp, clwp, relq, reic) |
def _jks_keystream(iv, password):
"""Helper keystream generator for _jks_pkey_decrypt"""
cur = iv
while 1:
xhash = hashlib.sha1(bytes(password + cur)) # hashlib.sha1 in python 2.6 does not accept a bytearray argument
cur = bytearray(xhash.digest()) # make sure we iterate over ints in both Py2 and Py3
for byte in cur:
yield byte | def function[_jks_keystream, parameter[iv, password]]:
constant[Helper keystream generator for _jks_pkey_decrypt]
variable[cur] assign[=] name[iv]
while constant[1] begin[:]
variable[xhash] assign[=] call[name[hashlib].sha1, parameter[call[name[bytes], parameter[binary_operation[name[password] + name[cur]]]]]]
variable[cur] assign[=] call[name[bytearray], parameter[call[name[xhash].digest, parameter[]]]]
for taget[name[byte]] in starred[name[cur]] begin[:]
<ast.Yield object at 0x7da1b0658ca0> | keyword[def] identifier[_jks_keystream] ( identifier[iv] , identifier[password] ):
literal[string]
identifier[cur] = identifier[iv]
keyword[while] literal[int] :
identifier[xhash] = identifier[hashlib] . identifier[sha1] ( identifier[bytes] ( identifier[password] + identifier[cur] ))
identifier[cur] = identifier[bytearray] ( identifier[xhash] . identifier[digest] ())
keyword[for] identifier[byte] keyword[in] identifier[cur] :
keyword[yield] identifier[byte] | def _jks_keystream(iv, password):
"""Helper keystream generator for _jks_pkey_decrypt"""
cur = iv
while 1:
xhash = hashlib.sha1(bytes(password + cur)) # hashlib.sha1 in python 2.6 does not accept a bytearray argument
cur = bytearray(xhash.digest()) # make sure we iterate over ints in both Py2 and Py3
for byte in cur:
yield byte # depends on [control=['for'], data=['byte']] # depends on [control=['while'], data=[]] |
def is_key(cls, result):
"""Return ``True`` if result is a key object."""
from boto.gs.key import Key
return isinstance(result, Key) | def function[is_key, parameter[cls, result]]:
constant[Return ``True`` if result is a key object.]
from relative_module[boto.gs.key] import module[Key]
return[call[name[isinstance], parameter[name[result], name[Key]]]] | keyword[def] identifier[is_key] ( identifier[cls] , identifier[result] ):
literal[string]
keyword[from] identifier[boto] . identifier[gs] . identifier[key] keyword[import] identifier[Key]
keyword[return] identifier[isinstance] ( identifier[result] , identifier[Key] ) | def is_key(cls, result):
"""Return ``True`` if result is a key object."""
from boto.gs.key import Key
return isinstance(result, Key) |
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359) | def function[set_rainbow, parameter[self, duration]]:
constant[Turn the bulb on and create a rainbow.]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], constant[359]]]] begin[:]
call[name[self].set_color_hsv, parameter[name[i], constant[100], constant[100]]]
call[name[time].sleep, parameter[binary_operation[name[duration] / constant[359]]]] | keyword[def] identifier[set_rainbow] ( identifier[self] , identifier[duration] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[self] . identifier[set_color_hsv] ( identifier[i] , literal[int] , literal[int] )
identifier[time] . identifier[sleep] ( identifier[duration] / literal[int] ) | def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration / 359) # depends on [control=['for'], data=['i']] |
def print_tokens(tokens, style=None, true_color=False, file=None):
"""
Print a list of (Token, text) tuples in the given style to the output.
E.g.::
style = style_from_dict({
Token.Hello: '#ff0066',
Token.World: '#884444 italic',
})
tokens = [
(Token.Hello, 'Hello'),
(Token.World, 'World'),
]
print_tokens(tokens, style=style)
:param tokens: List of ``(Token, text)`` tuples.
:param style: :class:`.Style` instance for the color scheme.
:param true_color: When True, use 24bit colors instead of 256 colors.
:param file: The output file. This can be `sys.stdout` or `sys.stderr`.
"""
if style is None:
style = DEFAULT_STYLE
assert isinstance(style, Style)
output = create_output(true_color=true_color, stdout=file)
renderer_print_tokens(output, tokens, style) | def function[print_tokens, parameter[tokens, style, true_color, file]]:
constant[
Print a list of (Token, text) tuples in the given style to the output.
E.g.::
style = style_from_dict({
Token.Hello: '#ff0066',
Token.World: '#884444 italic',
})
tokens = [
(Token.Hello, 'Hello'),
(Token.World, 'World'),
]
print_tokens(tokens, style=style)
:param tokens: List of ``(Token, text)`` tuples.
:param style: :class:`.Style` instance for the color scheme.
:param true_color: When True, use 24bit colors instead of 256 colors.
:param file: The output file. This can be `sys.stdout` or `sys.stderr`.
]
if compare[name[style] is constant[None]] begin[:]
variable[style] assign[=] name[DEFAULT_STYLE]
assert[call[name[isinstance], parameter[name[style], name[Style]]]]
variable[output] assign[=] call[name[create_output], parameter[]]
call[name[renderer_print_tokens], parameter[name[output], name[tokens], name[style]]] | keyword[def] identifier[print_tokens] ( identifier[tokens] , identifier[style] = keyword[None] , identifier[true_color] = keyword[False] , identifier[file] = keyword[None] ):
literal[string]
keyword[if] identifier[style] keyword[is] keyword[None] :
identifier[style] = identifier[DEFAULT_STYLE]
keyword[assert] identifier[isinstance] ( identifier[style] , identifier[Style] )
identifier[output] = identifier[create_output] ( identifier[true_color] = identifier[true_color] , identifier[stdout] = identifier[file] )
identifier[renderer_print_tokens] ( identifier[output] , identifier[tokens] , identifier[style] ) | def print_tokens(tokens, style=None, true_color=False, file=None):
"""
Print a list of (Token, text) tuples in the given style to the output.
E.g.::
style = style_from_dict({
Token.Hello: '#ff0066',
Token.World: '#884444 italic',
})
tokens = [
(Token.Hello, 'Hello'),
(Token.World, 'World'),
]
print_tokens(tokens, style=style)
:param tokens: List of ``(Token, text)`` tuples.
:param style: :class:`.Style` instance for the color scheme.
:param true_color: When True, use 24bit colors instead of 256 colors.
:param file: The output file. This can be `sys.stdout` or `sys.stderr`.
"""
if style is None:
style = DEFAULT_STYLE # depends on [control=['if'], data=['style']]
assert isinstance(style, Style)
output = create_output(true_color=true_color, stdout=file)
renderer_print_tokens(output, tokens, style) |
def admin_command(sudo, command):
"""
If sudo is needed, make sure the command is prepended
correctly, otherwise return the command as it came.
:param sudo: A boolean representing the intention of having a sudo command
(or not)
:param command: A list of the actual command to execute with Popen.
"""
if sudo:
if not isinstance(command, list):
command = [command]
return ['sudo'] + [cmd for cmd in command]
return command | def function[admin_command, parameter[sudo, command]]:
constant[
If sudo is needed, make sure the command is prepended
correctly, otherwise return the command as it came.
:param sudo: A boolean representing the intention of having a sudo command
(or not)
:param command: A list of the actual command to execute with Popen.
]
if name[sudo] begin[:]
if <ast.UnaryOp object at 0x7da18fe91b70> begin[:]
variable[command] assign[=] list[[<ast.Name object at 0x7da18fe91480>]]
return[binary_operation[list[[<ast.Constant object at 0x7da18fe913f0>]] + <ast.ListComp object at 0x7da18fe93e80>]]
return[name[command]] | keyword[def] identifier[admin_command] ( identifier[sudo] , identifier[command] ):
literal[string]
keyword[if] identifier[sudo] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[command] , identifier[list] ):
identifier[command] =[ identifier[command] ]
keyword[return] [ literal[string] ]+[ identifier[cmd] keyword[for] identifier[cmd] keyword[in] identifier[command] ]
keyword[return] identifier[command] | def admin_command(sudo, command):
"""
If sudo is needed, make sure the command is prepended
correctly, otherwise return the command as it came.
:param sudo: A boolean representing the intention of having a sudo command
(or not)
:param command: A list of the actual command to execute with Popen.
"""
if sudo:
if not isinstance(command, list):
command = [command] # depends on [control=['if'], data=[]]
return ['sudo'] + [cmd for cmd in command] # depends on [control=['if'], data=[]]
return command |
def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
preserve_paragraphs=False):
"""A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\\b`` character (``\\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
"""
from ._textwrap import TextWrapper
text = text.expandtabs()
post_wrap_indent = subsequent_indent[:-1]
subsequent_indent = subsequent_indent[-1:]
wrapper = TextWrapper(width, initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
replace_whitespace=False)
if not preserve_paragraphs:
return add_subsequent_indent(wrapper.fill(text), post_wrap_indent)
p = []
buf = []
indent = None
def _flush_par():
if not buf:
return
if buf[0].strip() == '\b':
p.append((indent or 0, True, '\n'.join(buf[1:])))
else:
p.append((indent or 0, False, ' '.join(buf)))
del buf[:]
for line in text.splitlines():
if not line:
_flush_par()
indent = None
else:
if indent is None:
orig_len = term_len(line)
line = line.lstrip()
indent = orig_len - term_len(line)
buf.append(line)
_flush_par()
rv = []
for indent, raw, text in p:
with wrapper.extra_indent(' ' * indent):
if raw:
rv.append(add_subsequent_indent(wrapper.indent_only(text),
post_wrap_indent))
else:
rv.append(add_subsequent_indent(wrapper.fill(text),
post_wrap_indent))
return '\n\n'.join(rv) | def function[wrap_text, parameter[text, width, initial_indent, subsequent_indent, preserve_paragraphs]]:
constant[A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\b`` character (``\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
]
from relative_module[_textwrap] import module[TextWrapper]
variable[text] assign[=] call[name[text].expandtabs, parameter[]]
variable[post_wrap_indent] assign[=] call[name[subsequent_indent]][<ast.Slice object at 0x7da1b0fa6590>]
variable[subsequent_indent] assign[=] call[name[subsequent_indent]][<ast.Slice object at 0x7da1b0fa5750>]
variable[wrapper] assign[=] call[name[TextWrapper], parameter[name[width]]]
if <ast.UnaryOp object at 0x7da1b0fa6890> begin[:]
return[call[name[add_subsequent_indent], parameter[call[name[wrapper].fill, parameter[name[text]]], name[post_wrap_indent]]]]
variable[p] assign[=] list[[]]
variable[buf] assign[=] list[[]]
variable[indent] assign[=] constant[None]
def function[_flush_par, parameter[]]:
if <ast.UnaryOp object at 0x7da1b0fa66e0> begin[:]
return[None]
if compare[call[call[name[buf]][constant[0]].strip, parameter[]] equal[==] constant[]] begin[:]
call[name[p].append, parameter[tuple[[<ast.BoolOp object at 0x7da1b0fa6b60>, <ast.Constant object at 0x7da1b11ef880>, <ast.Call object at 0x7da1b11ec580>]]]]
<ast.Delete object at 0x7da1b11ec430>
for taget[name[line]] in starred[call[name[text].splitlines, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b11ef490> begin[:]
call[name[_flush_par], parameter[]]
variable[indent] assign[=] constant[None]
call[name[_flush_par], parameter[]]
variable[rv] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b11ed3f0>, <ast.Name object at 0x7da1b11ef640>, <ast.Name object at 0x7da1b11eead0>]]] in starred[name[p]] begin[:]
with call[name[wrapper].extra_indent, parameter[binary_operation[constant[ ] * name[indent]]]] begin[:]
if name[raw] begin[:]
call[name[rv].append, parameter[call[name[add_subsequent_indent], parameter[call[name[wrapper].indent_only, parameter[name[text]]], name[post_wrap_indent]]]]]
return[call[constant[
].join, parameter[name[rv]]]] | keyword[def] identifier[wrap_text] ( identifier[text] , identifier[width] = literal[int] , identifier[initial_indent] = literal[string] , identifier[subsequent_indent] = literal[string] ,
identifier[preserve_paragraphs] = keyword[False] ):
literal[string]
keyword[from] . identifier[_textwrap] keyword[import] identifier[TextWrapper]
identifier[text] = identifier[text] . identifier[expandtabs] ()
identifier[post_wrap_indent] = identifier[subsequent_indent] [:- literal[int] ]
identifier[subsequent_indent] = identifier[subsequent_indent] [- literal[int] :]
identifier[wrapper] = identifier[TextWrapper] ( identifier[width] , identifier[initial_indent] = identifier[initial_indent] ,
identifier[subsequent_indent] = identifier[subsequent_indent] ,
identifier[replace_whitespace] = keyword[False] )
keyword[if] keyword[not] identifier[preserve_paragraphs] :
keyword[return] identifier[add_subsequent_indent] ( identifier[wrapper] . identifier[fill] ( identifier[text] ), identifier[post_wrap_indent] )
identifier[p] =[]
identifier[buf] =[]
identifier[indent] = keyword[None]
keyword[def] identifier[_flush_par] ():
keyword[if] keyword[not] identifier[buf] :
keyword[return]
keyword[if] identifier[buf] [ literal[int] ]. identifier[strip] ()== literal[string] :
identifier[p] . identifier[append] (( identifier[indent] keyword[or] literal[int] , keyword[True] , literal[string] . identifier[join] ( identifier[buf] [ literal[int] :])))
keyword[else] :
identifier[p] . identifier[append] (( identifier[indent] keyword[or] literal[int] , keyword[False] , literal[string] . identifier[join] ( identifier[buf] )))
keyword[del] identifier[buf] [:]
keyword[for] identifier[line] keyword[in] identifier[text] . identifier[splitlines] ():
keyword[if] keyword[not] identifier[line] :
identifier[_flush_par] ()
identifier[indent] = keyword[None]
keyword[else] :
keyword[if] identifier[indent] keyword[is] keyword[None] :
identifier[orig_len] = identifier[term_len] ( identifier[line] )
identifier[line] = identifier[line] . identifier[lstrip] ()
identifier[indent] = identifier[orig_len] - identifier[term_len] ( identifier[line] )
identifier[buf] . identifier[append] ( identifier[line] )
identifier[_flush_par] ()
identifier[rv] =[]
keyword[for] identifier[indent] , identifier[raw] , identifier[text] keyword[in] identifier[p] :
keyword[with] identifier[wrapper] . identifier[extra_indent] ( literal[string] * identifier[indent] ):
keyword[if] identifier[raw] :
identifier[rv] . identifier[append] ( identifier[add_subsequent_indent] ( identifier[wrapper] . identifier[indent_only] ( identifier[text] ),
identifier[post_wrap_indent] ))
keyword[else] :
identifier[rv] . identifier[append] ( identifier[add_subsequent_indent] ( identifier[wrapper] . identifier[fill] ( identifier[text] ),
identifier[post_wrap_indent] ))
keyword[return] literal[string] . identifier[join] ( identifier[rv] ) | def wrap_text(text, width=78, initial_indent='', subsequent_indent='', preserve_paragraphs=False):
"""A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\\b`` character (``\\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
"""
from ._textwrap import TextWrapper
text = text.expandtabs()
post_wrap_indent = subsequent_indent[:-1]
subsequent_indent = subsequent_indent[-1:]
wrapper = TextWrapper(width, initial_indent=initial_indent, subsequent_indent=subsequent_indent, replace_whitespace=False)
if not preserve_paragraphs:
return add_subsequent_indent(wrapper.fill(text), post_wrap_indent) # depends on [control=['if'], data=[]]
p = []
buf = []
indent = None
def _flush_par():
if not buf:
return # depends on [control=['if'], data=[]]
if buf[0].strip() == '\x08':
p.append((indent or 0, True, '\n'.join(buf[1:]))) # depends on [control=['if'], data=[]]
else:
p.append((indent or 0, False, ' '.join(buf)))
del buf[:]
for line in text.splitlines():
if not line:
_flush_par()
indent = None # depends on [control=['if'], data=[]]
else:
if indent is None:
orig_len = term_len(line)
line = line.lstrip()
indent = orig_len - term_len(line) # depends on [control=['if'], data=['indent']]
buf.append(line) # depends on [control=['for'], data=['line']]
_flush_par()
rv = []
for (indent, raw, text) in p:
with wrapper.extra_indent(' ' * indent):
if raw:
rv.append(add_subsequent_indent(wrapper.indent_only(text), post_wrap_indent)) # depends on [control=['if'], data=[]]
else:
rv.append(add_subsequent_indent(wrapper.fill(text), post_wrap_indent)) # depends on [control=['with'], data=[]] # depends on [control=['for'], data=[]]
return '\n\n'.join(rv) |
def build(self, build_execution_configuration, **kwargs):
"""
Triggers the build execution for a given configuration.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.build(build_execution_configuration, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str build_execution_configuration: Build Execution Configuration. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required)
:param str username_triggered: Username who triggered the build. If empty current user is used.
:param str callback_url: Optional Callback URL
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.build_with_http_info(build_execution_configuration, **kwargs)
else:
(data) = self.build_with_http_info(build_execution_configuration, **kwargs)
return data | def function[build, parameter[self, build_execution_configuration]]:
constant[
Triggers the build execution for a given configuration.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.build(build_execution_configuration, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str build_execution_configuration: Build Execution Configuration. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required)
:param str username_triggered: Username who triggered the build. If empty current user is used.
:param str callback_url: Optional Callback URL
:return: None
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[callback]]] begin[:]
return[call[name[self].build_with_http_info, parameter[name[build_execution_configuration]]]] | keyword[def] identifier[build] ( identifier[self] , identifier[build_execution_configuration] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[build_with_http_info] ( identifier[build_execution_configuration] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[build_with_http_info] ( identifier[build_execution_configuration] ,** identifier[kwargs] )
keyword[return] identifier[data] | def build(self, build_execution_configuration, **kwargs):
"""
Triggers the build execution for a given configuration.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.build(build_execution_configuration, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str build_execution_configuration: Build Execution Configuration. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required)
:param str username_triggered: Username who triggered the build. If empty current user is used.
:param str callback_url: Optional Callback URL
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.build_with_http_info(build_execution_configuration, **kwargs) # depends on [control=['if'], data=[]]
else:
data = self.build_with_http_info(build_execution_configuration, **kwargs)
return data |
def add_visualization(self, visualization, size_x=6, size_y=3, col=0, row=0):
"""
Adds the visualization to the dashboard. Leave col and row = 0 for automatic placement of the visualization.
Visualizations are placed on a grid with 12 columns and unlimited rows.
:param visualization: previously loaded visualization
:param size_x width of the panel
:param size_y height of the panel
:param col 1-based column of the top left corner, leave 0 for automatic placement
:param row 1-based row of the top left corner, leave 0 for automatic placement
:return: newly created panel or None
"""
new_panel_index = self.get_max_index()+1
if col and row:
new_panel = {
'col': col, 'row': row,
'size_x': size_x, 'size_y': size_y,
'panelIndex': new_panel_index,
'type': 'visualization',
'id': visualization.id
}
self.panels.append(new_panel)
return new_panel
else:
new_panel = append_panel(self.panels, size_x, size_y)
if new_panel:
new_panel['id'] = visualization.id
new_panel['panelIndex'] = new_panel_index
new_panel['type'] = 'visualization'
return new_panel | def function[add_visualization, parameter[self, visualization, size_x, size_y, col, row]]:
constant[
Adds the visualization to the dashboard. Leave col and row = 0 for automatic placement of the visualization.
Visualizations are placed on a grid with 12 columns and unlimited rows.
:param visualization: previously loaded visualization
:param size_x width of the panel
:param size_y height of the panel
:param col 1-based column of the top left corner, leave 0 for automatic placement
:param row 1-based row of the top left corner, leave 0 for automatic placement
:return: newly created panel or None
]
variable[new_panel_index] assign[=] binary_operation[call[name[self].get_max_index, parameter[]] + constant[1]]
if <ast.BoolOp object at 0x7da1b1aa6200> begin[:]
variable[new_panel] assign[=] dictionary[[<ast.Constant object at 0x7da1b1aa7ee0>, <ast.Constant object at 0x7da1b1aa6410>, <ast.Constant object at 0x7da1b1aa5240>, <ast.Constant object at 0x7da1b1aa79d0>, <ast.Constant object at 0x7da1b1aa5630>, <ast.Constant object at 0x7da1b1aa4bb0>, <ast.Constant object at 0x7da1b1aa7eb0>], [<ast.Name object at 0x7da1b1aa6f20>, <ast.Name object at 0x7da1b1aa60b0>, <ast.Name object at 0x7da1b1aa51b0>, <ast.Name object at 0x7da1b1aa6aa0>, <ast.Name object at 0x7da1b1aa47c0>, <ast.Constant object at 0x7da1b1aa6e00>, <ast.Attribute object at 0x7da1b1aa6050>]]
call[name[self].panels.append, parameter[name[new_panel]]]
return[name[new_panel]] | keyword[def] identifier[add_visualization] ( identifier[self] , identifier[visualization] , identifier[size_x] = literal[int] , identifier[size_y] = literal[int] , identifier[col] = literal[int] , identifier[row] = literal[int] ):
literal[string]
identifier[new_panel_index] = identifier[self] . identifier[get_max_index] ()+ literal[int]
keyword[if] identifier[col] keyword[and] identifier[row] :
identifier[new_panel] ={
literal[string] : identifier[col] , literal[string] : identifier[row] ,
literal[string] : identifier[size_x] , literal[string] : identifier[size_y] ,
literal[string] : identifier[new_panel_index] ,
literal[string] : literal[string] ,
literal[string] : identifier[visualization] . identifier[id]
}
identifier[self] . identifier[panels] . identifier[append] ( identifier[new_panel] )
keyword[return] identifier[new_panel]
keyword[else] :
identifier[new_panel] = identifier[append_panel] ( identifier[self] . identifier[panels] , identifier[size_x] , identifier[size_y] )
keyword[if] identifier[new_panel] :
identifier[new_panel] [ literal[string] ]= identifier[visualization] . identifier[id]
identifier[new_panel] [ literal[string] ]= identifier[new_panel_index]
identifier[new_panel] [ literal[string] ]= literal[string]
keyword[return] identifier[new_panel] | def add_visualization(self, visualization, size_x=6, size_y=3, col=0, row=0):
"""
Adds the visualization to the dashboard. Leave col and row = 0 for automatic placement of the visualization.
Visualizations are placed on a grid with 12 columns and unlimited rows.
:param visualization: previously loaded visualization
:param size_x width of the panel
:param size_y height of the panel
:param col 1-based column of the top left corner, leave 0 for automatic placement
:param row 1-based row of the top left corner, leave 0 for automatic placement
:return: newly created panel or None
"""
new_panel_index = self.get_max_index() + 1
if col and row:
new_panel = {'col': col, 'row': row, 'size_x': size_x, 'size_y': size_y, 'panelIndex': new_panel_index, 'type': 'visualization', 'id': visualization.id}
self.panels.append(new_panel)
return new_panel # depends on [control=['if'], data=[]]
else:
new_panel = append_panel(self.panels, size_x, size_y)
if new_panel:
new_panel['id'] = visualization.id
new_panel['panelIndex'] = new_panel_index
new_panel['type'] = 'visualization'
return new_panel # depends on [control=['if'], data=[]] |
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,
copy=copy, limit=limit,
tolerance=tolerance)
return self.reindex(**d) | def function[reindex_like, parameter[self, other, method, copy, limit, tolerance]]:
constant[
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
]
variable[d] assign[=] call[name[other]._construct_axes_dict, parameter[]]
return[call[name[self].reindex, parameter[]]] | keyword[def] identifier[reindex_like] ( identifier[self] , identifier[other] , identifier[method] = keyword[None] , identifier[copy] = keyword[True] , identifier[limit] = keyword[None] ,
identifier[tolerance] = keyword[None] ):
literal[string]
identifier[d] = identifier[other] . identifier[_construct_axes_dict] ( identifier[axes] = identifier[self] . identifier[_AXIS_ORDERS] , identifier[method] = identifier[method] ,
identifier[copy] = identifier[copy] , identifier[limit] = identifier[limit] ,
identifier[tolerance] = identifier[tolerance] )
keyword[return] identifier[self] . identifier[reindex] (** identifier[d] ) | def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance)
return self.reindex(**d) |
def get_code_hash(code: str) -> str:
"""
:param code: bytecode
:return: Returns hash of the given bytecode
"""
code = code[2:] if code[:2] == "0x" else code
try:
keccak = sha3.keccak_256()
keccak.update(bytes.fromhex(code))
return "0x" + keccak.hexdigest()
except ValueError:
log.debug("Unable to change the bytecode to bytes. Bytecode: {}".format(code))
return "" | def function[get_code_hash, parameter[code]]:
constant[
:param code: bytecode
:return: Returns hash of the given bytecode
]
variable[code] assign[=] <ast.IfExp object at 0x7da1b1d35600>
<ast.Try object at 0x7da1b1d37ac0> | keyword[def] identifier[get_code_hash] ( identifier[code] : identifier[str] )-> identifier[str] :
literal[string]
identifier[code] = identifier[code] [ literal[int] :] keyword[if] identifier[code] [: literal[int] ]== literal[string] keyword[else] identifier[code]
keyword[try] :
identifier[keccak] = identifier[sha3] . identifier[keccak_256] ()
identifier[keccak] . identifier[update] ( identifier[bytes] . identifier[fromhex] ( identifier[code] ))
keyword[return] literal[string] + identifier[keccak] . identifier[hexdigest] ()
keyword[except] identifier[ValueError] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[code] ))
keyword[return] literal[string] | def get_code_hash(code: str) -> str:
"""
:param code: bytecode
:return: Returns hash of the given bytecode
"""
code = code[2:] if code[:2] == '0x' else code
try:
keccak = sha3.keccak_256()
keccak.update(bytes.fromhex(code))
return '0x' + keccak.hexdigest() # depends on [control=['try'], data=[]]
except ValueError:
log.debug('Unable to change the bytecode to bytes. Bytecode: {}'.format(code))
return '' # depends on [control=['except'], data=[]] |
def on_error_print_details(actual, expected):
"""
Print text details in case of assertation failed errors.
.. sourcecode:: python
with on_error_print_details(actual_text, expected_text):
... # Do something
"""
try:
yield
except Exception:
diff = difflib.ndiff(expected.splitlines(), actual.splitlines())
diff_text = u"\n".join(diff)
print(u"DIFF (+ ACTUAL, - EXPECTED):\n{0}\n".format(diff_text))
if DEBUG:
print(u"expected:\n{0}\n".format(expected))
print(u"actual:\n{0}".format(actual))
raise | def function[on_error_print_details, parameter[actual, expected]]:
constant[
Print text details in case of assertation failed errors.
.. sourcecode:: python
with on_error_print_details(actual_text, expected_text):
... # Do something
]
<ast.Try object at 0x7da20e9b2b00> | keyword[def] identifier[on_error_print_details] ( identifier[actual] , identifier[expected] ):
literal[string]
keyword[try] :
keyword[yield]
keyword[except] identifier[Exception] :
identifier[diff] = identifier[difflib] . identifier[ndiff] ( identifier[expected] . identifier[splitlines] (), identifier[actual] . identifier[splitlines] ())
identifier[diff_text] = literal[string] . identifier[join] ( identifier[diff] )
identifier[print] ( literal[string] . identifier[format] ( identifier[diff_text] ))
keyword[if] identifier[DEBUG] :
identifier[print] ( literal[string] . identifier[format] ( identifier[expected] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[actual] ))
keyword[raise] | def on_error_print_details(actual, expected):
"""
Print text details in case of assertation failed errors.
.. sourcecode:: python
with on_error_print_details(actual_text, expected_text):
... # Do something
"""
try:
yield # depends on [control=['try'], data=[]]
except Exception:
diff = difflib.ndiff(expected.splitlines(), actual.splitlines())
diff_text = u'\n'.join(diff)
print(u'DIFF (+ ACTUAL, - EXPECTED):\n{0}\n'.format(diff_text))
if DEBUG:
print(u'expected:\n{0}\n'.format(expected))
print(u'actual:\n{0}'.format(actual)) # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=[]] |
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Array or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
"""
# TODO: skipna is broken with max.
# See https://github.com/pandas-dev/pandas/issues/24265
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
mask = self.isna()
if skipna:
values = self[~mask].asi8
elif mask.any():
return NaT
else:
values = self.asi8
if not len(values):
# short-circut for empty max / min
return NaT
result = nanops.nanmax(values, skipna=skipna)
# Don't have to worry about NA `result`, since no NA went in.
return self._box_func(result) | def function[max, parameter[self, axis, skipna]]:
constant[
Return the maximum value of the Array or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
]
call[name[nv].validate_max, parameter[name[args], name[kwargs]]]
call[name[nv].validate_minmax_axis, parameter[name[axis]]]
variable[mask] assign[=] call[name[self].isna, parameter[]]
if name[skipna] begin[:]
variable[values] assign[=] call[name[self]][<ast.UnaryOp object at 0x7da18f7222c0>].asi8
if <ast.UnaryOp object at 0x7da18f7215d0> begin[:]
return[name[NaT]]
variable[result] assign[=] call[name[nanops].nanmax, parameter[name[values]]]
return[call[name[self]._box_func, parameter[name[result]]]] | keyword[def] identifier[max] ( identifier[self] , identifier[axis] = keyword[None] , identifier[skipna] = keyword[True] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[nv] . identifier[validate_max] ( identifier[args] , identifier[kwargs] )
identifier[nv] . identifier[validate_minmax_axis] ( identifier[axis] )
identifier[mask] = identifier[self] . identifier[isna] ()
keyword[if] identifier[skipna] :
identifier[values] = identifier[self] [~ identifier[mask] ]. identifier[asi8]
keyword[elif] identifier[mask] . identifier[any] ():
keyword[return] identifier[NaT]
keyword[else] :
identifier[values] = identifier[self] . identifier[asi8]
keyword[if] keyword[not] identifier[len] ( identifier[values] ):
keyword[return] identifier[NaT]
identifier[result] = identifier[nanops] . identifier[nanmax] ( identifier[values] , identifier[skipna] = identifier[skipna] )
keyword[return] identifier[self] . identifier[_box_func] ( identifier[result] ) | def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Array or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
"""
# TODO: skipna is broken with max.
# See https://github.com/pandas-dev/pandas/issues/24265
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
mask = self.isna()
if skipna:
values = self[~mask].asi8 # depends on [control=['if'], data=[]]
elif mask.any():
return NaT # depends on [control=['if'], data=[]]
else:
values = self.asi8
if not len(values):
# short-circut for empty max / min
return NaT # depends on [control=['if'], data=[]]
result = nanops.nanmax(values, skipna=skipna)
# Don't have to worry about NA `result`, since no NA went in.
return self._box_func(result) |
def find_by(self, column=None, value=None, order_by=None, limit=0):
"""
Find all items that matches your a column/value.
:param column: column to search.
:param value: value to look for in `column`.
:param limit: How many rows to fetch.
:param order_by: column on which to order the results. \
To change the sort, prepend with < or >.
"""
with rconnect() as conn:
if column is None or value is None:
raise ValueError("You need to supply both a column and a value")
try:
query = self._base()
if order_by is not None:
query = self._order_by(query, order_by)
if limit > 0:
query = self._limit(query, limit)
query = query.filter({column: value})
log.debug(query)
rv = query.run(conn)
except Exception as e:
log.warn(e)
raise
else:
data = [self._model(_) for _ in rv]
return data | def function[find_by, parameter[self, column, value, order_by, limit]]:
constant[
Find all items that matches your a column/value.
:param column: column to search.
:param value: value to look for in `column`.
:param limit: How many rows to fetch.
:param order_by: column on which to order the results. To change the sort, prepend with < or >.
]
with call[name[rconnect], parameter[]] begin[:]
if <ast.BoolOp object at 0x7da1b1622ef0> begin[:]
<ast.Raise object at 0x7da1b1622b30>
<ast.Try object at 0x7da1b16229e0> | keyword[def] identifier[find_by] ( identifier[self] , identifier[column] = keyword[None] , identifier[value] = keyword[None] , identifier[order_by] = keyword[None] , identifier[limit] = literal[int] ):
literal[string]
keyword[with] identifier[rconnect] () keyword[as] identifier[conn] :
keyword[if] identifier[column] keyword[is] keyword[None] keyword[or] identifier[value] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[try] :
identifier[query] = identifier[self] . identifier[_base] ()
keyword[if] identifier[order_by] keyword[is] keyword[not] keyword[None] :
identifier[query] = identifier[self] . identifier[_order_by] ( identifier[query] , identifier[order_by] )
keyword[if] identifier[limit] > literal[int] :
identifier[query] = identifier[self] . identifier[_limit] ( identifier[query] , identifier[limit] )
identifier[query] = identifier[query] . identifier[filter] ({ identifier[column] : identifier[value] })
identifier[log] . identifier[debug] ( identifier[query] )
identifier[rv] = identifier[query] . identifier[run] ( identifier[conn] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[log] . identifier[warn] ( identifier[e] )
keyword[raise]
keyword[else] :
identifier[data] =[ identifier[self] . identifier[_model] ( identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[rv] ]
keyword[return] identifier[data] | def find_by(self, column=None, value=None, order_by=None, limit=0):
"""
Find all items that matches your a column/value.
:param column: column to search.
:param value: value to look for in `column`.
:param limit: How many rows to fetch.
:param order_by: column on which to order the results. To change the sort, prepend with < or >.
"""
with rconnect() as conn:
if column is None or value is None:
raise ValueError('You need to supply both a column and a value') # depends on [control=['if'], data=[]]
try:
query = self._base()
if order_by is not None:
query = self._order_by(query, order_by) # depends on [control=['if'], data=['order_by']]
if limit > 0:
query = self._limit(query, limit) # depends on [control=['if'], data=['limit']]
query = query.filter({column: value})
log.debug(query)
rv = query.run(conn) # depends on [control=['try'], data=[]]
except Exception as e:
log.warn(e)
raise # depends on [control=['except'], data=['e']]
else:
data = [self._model(_) for _ in rv]
return data # depends on [control=['with'], data=['conn']] |
def iterative_overlap_assembly(
variant_sequences,
min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE):
"""
Assembles longer sequences from reads centered on a variant by
between merging all pairs of overlapping sequences and collapsing
shorter sequences onto every longer sequence which contains them.
Returns a list of variant sequences, sorted by decreasing read support.
"""
if len(variant_sequences) <= 1:
# if we don't have at least two sequences to start with then
# skip the whole mess below
return variant_sequences
# reduce the number of inputs to the merge algorithm by first collapsing
# shorter sequences onto the longer sequences which contain them
n_before_collapse = len(variant_sequences)
variant_sequences = collapse_substrings(variant_sequences)
n_after_collapse = len(variant_sequences)
logger.info(
"Collapsed %d -> %d sequences",
n_before_collapse,
n_after_collapse)
merged_variant_sequences = greedy_merge(variant_sequences, min_overlap_size)
return list(sorted(
merged_variant_sequences,
key=lambda seq: -len(seq.reads))) | def function[iterative_overlap_assembly, parameter[variant_sequences, min_overlap_size]]:
constant[
Assembles longer sequences from reads centered on a variant by
between merging all pairs of overlapping sequences and collapsing
shorter sequences onto every longer sequence which contains them.
Returns a list of variant sequences, sorted by decreasing read support.
]
if compare[call[name[len], parameter[name[variant_sequences]]] less_or_equal[<=] constant[1]] begin[:]
return[name[variant_sequences]]
variable[n_before_collapse] assign[=] call[name[len], parameter[name[variant_sequences]]]
variable[variant_sequences] assign[=] call[name[collapse_substrings], parameter[name[variant_sequences]]]
variable[n_after_collapse] assign[=] call[name[len], parameter[name[variant_sequences]]]
call[name[logger].info, parameter[constant[Collapsed %d -> %d sequences], name[n_before_collapse], name[n_after_collapse]]]
variable[merged_variant_sequences] assign[=] call[name[greedy_merge], parameter[name[variant_sequences], name[min_overlap_size]]]
return[call[name[list], parameter[call[name[sorted], parameter[name[merged_variant_sequences]]]]]] | keyword[def] identifier[iterative_overlap_assembly] (
identifier[variant_sequences] ,
identifier[min_overlap_size] = identifier[MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE] ):
literal[string]
keyword[if] identifier[len] ( identifier[variant_sequences] )<= literal[int] :
keyword[return] identifier[variant_sequences]
identifier[n_before_collapse] = identifier[len] ( identifier[variant_sequences] )
identifier[variant_sequences] = identifier[collapse_substrings] ( identifier[variant_sequences] )
identifier[n_after_collapse] = identifier[len] ( identifier[variant_sequences] )
identifier[logger] . identifier[info] (
literal[string] ,
identifier[n_before_collapse] ,
identifier[n_after_collapse] )
identifier[merged_variant_sequences] = identifier[greedy_merge] ( identifier[variant_sequences] , identifier[min_overlap_size] )
keyword[return] identifier[list] ( identifier[sorted] (
identifier[merged_variant_sequences] ,
identifier[key] = keyword[lambda] identifier[seq] :- identifier[len] ( identifier[seq] . identifier[reads] ))) | def iterative_overlap_assembly(variant_sequences, min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE):
"""
Assembles longer sequences from reads centered on a variant by
between merging all pairs of overlapping sequences and collapsing
shorter sequences onto every longer sequence which contains them.
Returns a list of variant sequences, sorted by decreasing read support.
"""
if len(variant_sequences) <= 1:
# if we don't have at least two sequences to start with then
# skip the whole mess below
return variant_sequences # depends on [control=['if'], data=[]]
# reduce the number of inputs to the merge algorithm by first collapsing
# shorter sequences onto the longer sequences which contain them
n_before_collapse = len(variant_sequences)
variant_sequences = collapse_substrings(variant_sequences)
n_after_collapse = len(variant_sequences)
logger.info('Collapsed %d -> %d sequences', n_before_collapse, n_after_collapse)
merged_variant_sequences = greedy_merge(variant_sequences, min_overlap_size)
return list(sorted(merged_variant_sequences, key=lambda seq: -len(seq.reads))) |
def arg_split(s, posix=False):
"""Split a command line's arguments in a shell-like manner returned
as a list of lists. Use ';;' with white space to indicate separate
commands.
This is a modified version of the standard library's shlex.split()
function, but with a default of posix=False for splitting, so that quotes
in inputs are respected.
"""
args_list = [[]]
if isinstance(s, bytes):
s = s.decode("utf-8")
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
args = list(lex)
for arg in args:
if ';;' == arg:
args_list.append([])
else:
args_list[-1].append(arg)
pass
pass
return args_list | def function[arg_split, parameter[s, posix]]:
constant[Split a command line's arguments in a shell-like manner returned
as a list of lists. Use ';;' with white space to indicate separate
commands.
This is a modified version of the standard library's shlex.split()
function, but with a default of posix=False for splitting, so that quotes
in inputs are respected.
]
variable[args_list] assign[=] list[[<ast.List object at 0x7da1b05c58d0>]]
if call[name[isinstance], parameter[name[s], name[bytes]]] begin[:]
variable[s] assign[=] call[name[s].decode, parameter[constant[utf-8]]]
variable[lex] assign[=] call[name[shlex].shlex, parameter[name[s]]]
name[lex].whitespace_split assign[=] constant[True]
variable[args] assign[=] call[name[list], parameter[name[lex]]]
for taget[name[arg]] in starred[name[args]] begin[:]
if compare[constant[;;] equal[==] name[arg]] begin[:]
call[name[args_list].append, parameter[list[[]]]]
pass
return[name[args_list]] | keyword[def] identifier[arg_split] ( identifier[s] , identifier[posix] = keyword[False] ):
literal[string]
identifier[args_list] =[[]]
keyword[if] identifier[isinstance] ( identifier[s] , identifier[bytes] ):
identifier[s] = identifier[s] . identifier[decode] ( literal[string] )
identifier[lex] = identifier[shlex] . identifier[shlex] ( identifier[s] , identifier[posix] = identifier[posix] )
identifier[lex] . identifier[whitespace_split] = keyword[True]
identifier[args] = identifier[list] ( identifier[lex] )
keyword[for] identifier[arg] keyword[in] identifier[args] :
keyword[if] literal[string] == identifier[arg] :
identifier[args_list] . identifier[append] ([])
keyword[else] :
identifier[args_list] [- literal[int] ]. identifier[append] ( identifier[arg] )
keyword[pass]
keyword[pass]
keyword[return] identifier[args_list] | def arg_split(s, posix=False):
"""Split a command line's arguments in a shell-like manner returned
as a list of lists. Use ';;' with white space to indicate separate
commands.
This is a modified version of the standard library's shlex.split()
function, but with a default of posix=False for splitting, so that quotes
in inputs are respected.
"""
args_list = [[]]
if isinstance(s, bytes):
s = s.decode('utf-8') # depends on [control=['if'], data=[]]
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
args = list(lex)
for arg in args:
if ';;' == arg:
args_list.append([]) # depends on [control=['if'], data=[]]
else:
args_list[-1].append(arg)
pass
pass # depends on [control=['for'], data=['arg']]
return args_list |
def markDownloaded(self, media):
""" Mark the file as downloaded (by the nature of Plex it will be marked as downloaded within
any SyncItem where it presented).
Parameters:
media (base.Playable): the media to be marked as downloaded.
"""
url = '/sync/%s/item/%s/downloaded' % (self.clientIdentifier, media.ratingKey)
media._server.query(url, method=requests.put) | def function[markDownloaded, parameter[self, media]]:
constant[ Mark the file as downloaded (by the nature of Plex it will be marked as downloaded within
any SyncItem where it presented).
Parameters:
media (base.Playable): the media to be marked as downloaded.
]
variable[url] assign[=] binary_operation[constant[/sync/%s/item/%s/downloaded] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b060a5f0>, <ast.Attribute object at 0x7da1b060a290>]]]
call[name[media]._server.query, parameter[name[url]]] | keyword[def] identifier[markDownloaded] ( identifier[self] , identifier[media] ):
literal[string]
identifier[url] = literal[string] %( identifier[self] . identifier[clientIdentifier] , identifier[media] . identifier[ratingKey] )
identifier[media] . identifier[_server] . identifier[query] ( identifier[url] , identifier[method] = identifier[requests] . identifier[put] ) | def markDownloaded(self, media):
""" Mark the file as downloaded (by the nature of Plex it will be marked as downloaded within
any SyncItem where it presented).
Parameters:
media (base.Playable): the media to be marked as downloaded.
"""
url = '/sync/%s/item/%s/downloaded' % (self.clientIdentifier, media.ratingKey)
media._server.query(url, method=requests.put) |
def pose(T_frame_world, alpha=0.1, tube_radius=0.005, center_scale=0.01):
"""Plot a 3D pose as a set of axes (x red, y green, z blue).
Parameters
----------
T_frame_world : autolab_core.RigidTransform
The pose relative to world coordinates.
alpha : float
Length of plotted x,y,z axes.
tube_radius : float
Radius of plotted x,y,z axes.
center_scale : float
Radius of the pose's origin ball.
"""
R = T_frame_world.rotation
t = T_frame_world.translation
x_axis_tf = np.array([t, t + alpha * R[:,0]])
y_axis_tf = np.array([t, t + alpha * R[:,1]])
z_axis_tf = np.array([t, t + alpha * R[:,2]])
Visualizer3D.points(t, color=(1,1,1), scale=center_scale)
Visualizer3D.plot3d(x_axis_tf, color=(1,0,0), tube_radius=tube_radius)
Visualizer3D.plot3d(y_axis_tf, color=(0,1,0), tube_radius=tube_radius)
Visualizer3D.plot3d(z_axis_tf, color=(0,0,1), tube_radius=tube_radius) | def function[pose, parameter[T_frame_world, alpha, tube_radius, center_scale]]:
constant[Plot a 3D pose as a set of axes (x red, y green, z blue).
Parameters
----------
T_frame_world : autolab_core.RigidTransform
The pose relative to world coordinates.
alpha : float
Length of plotted x,y,z axes.
tube_radius : float
Radius of plotted x,y,z axes.
center_scale : float
Radius of the pose's origin ball.
]
variable[R] assign[=] name[T_frame_world].rotation
variable[t] assign[=] name[T_frame_world].translation
variable[x_axis_tf] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b0498bb0>, <ast.BinOp object at 0x7da1b04995d0>]]]]
variable[y_axis_tf] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b049a830>, <ast.BinOp object at 0x7da1b04993f0>]]]]
variable[z_axis_tf] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b0499900>, <ast.BinOp object at 0x7da1b049b190>]]]]
call[name[Visualizer3D].points, parameter[name[t]]]
call[name[Visualizer3D].plot3d, parameter[name[x_axis_tf]]]
call[name[Visualizer3D].plot3d, parameter[name[y_axis_tf]]]
call[name[Visualizer3D].plot3d, parameter[name[z_axis_tf]]] | keyword[def] identifier[pose] ( identifier[T_frame_world] , identifier[alpha] = literal[int] , identifier[tube_radius] = literal[int] , identifier[center_scale] = literal[int] ):
literal[string]
identifier[R] = identifier[T_frame_world] . identifier[rotation]
identifier[t] = identifier[T_frame_world] . identifier[translation]
identifier[x_axis_tf] = identifier[np] . identifier[array] ([ identifier[t] , identifier[t] + identifier[alpha] * identifier[R] [:, literal[int] ]])
identifier[y_axis_tf] = identifier[np] . identifier[array] ([ identifier[t] , identifier[t] + identifier[alpha] * identifier[R] [:, literal[int] ]])
identifier[z_axis_tf] = identifier[np] . identifier[array] ([ identifier[t] , identifier[t] + identifier[alpha] * identifier[R] [:, literal[int] ]])
identifier[Visualizer3D] . identifier[points] ( identifier[t] , identifier[color] =( literal[int] , literal[int] , literal[int] ), identifier[scale] = identifier[center_scale] )
identifier[Visualizer3D] . identifier[plot3d] ( identifier[x_axis_tf] , identifier[color] =( literal[int] , literal[int] , literal[int] ), identifier[tube_radius] = identifier[tube_radius] )
identifier[Visualizer3D] . identifier[plot3d] ( identifier[y_axis_tf] , identifier[color] =( literal[int] , literal[int] , literal[int] ), identifier[tube_radius] = identifier[tube_radius] )
identifier[Visualizer3D] . identifier[plot3d] ( identifier[z_axis_tf] , identifier[color] =( literal[int] , literal[int] , literal[int] ), identifier[tube_radius] = identifier[tube_radius] ) | def pose(T_frame_world, alpha=0.1, tube_radius=0.005, center_scale=0.01):
"""Plot a 3D pose as a set of axes (x red, y green, z blue).
Parameters
----------
T_frame_world : autolab_core.RigidTransform
The pose relative to world coordinates.
alpha : float
Length of plotted x,y,z axes.
tube_radius : float
Radius of plotted x,y,z axes.
center_scale : float
Radius of the pose's origin ball.
"""
R = T_frame_world.rotation
t = T_frame_world.translation
x_axis_tf = np.array([t, t + alpha * R[:, 0]])
y_axis_tf = np.array([t, t + alpha * R[:, 1]])
z_axis_tf = np.array([t, t + alpha * R[:, 2]])
Visualizer3D.points(t, color=(1, 1, 1), scale=center_scale)
Visualizer3D.plot3d(x_axis_tf, color=(1, 0, 0), tube_radius=tube_radius)
Visualizer3D.plot3d(y_axis_tf, color=(0, 1, 0), tube_radius=tube_radius)
Visualizer3D.plot3d(z_axis_tf, color=(0, 0, 1), tube_radius=tube_radius) |
def attowiki_distro_path():
"""return the absolute complete path where attowiki is located
.. todo:: use pkg_resources ?
"""
attowiki_path = os.path.abspath(__file__)
if attowiki_path[-1] != '/':
attowiki_path = attowiki_path[:attowiki_path.rfind('/')]
else:
attowiki_path = attowiki_path[:attowiki_path[:-1].rfind('/')]
return attowiki_path | def function[attowiki_distro_path, parameter[]]:
constant[return the absolute complete path where attowiki is located
.. todo:: use pkg_resources ?
]
variable[attowiki_path] assign[=] call[name[os].path.abspath, parameter[name[__file__]]]
if compare[call[name[attowiki_path]][<ast.UnaryOp object at 0x7da20c6e4be0>] not_equal[!=] constant[/]] begin[:]
variable[attowiki_path] assign[=] call[name[attowiki_path]][<ast.Slice object at 0x7da20c6e4370>]
return[name[attowiki_path]] | keyword[def] identifier[attowiki_distro_path] ():
literal[string]
identifier[attowiki_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[__file__] )
keyword[if] identifier[attowiki_path] [- literal[int] ]!= literal[string] :
identifier[attowiki_path] = identifier[attowiki_path] [: identifier[attowiki_path] . identifier[rfind] ( literal[string] )]
keyword[else] :
identifier[attowiki_path] = identifier[attowiki_path] [: identifier[attowiki_path] [:- literal[int] ]. identifier[rfind] ( literal[string] )]
keyword[return] identifier[attowiki_path] | def attowiki_distro_path():
"""return the absolute complete path where attowiki is located
.. todo:: use pkg_resources ?
"""
attowiki_path = os.path.abspath(__file__)
if attowiki_path[-1] != '/':
attowiki_path = attowiki_path[:attowiki_path.rfind('/')] # depends on [control=['if'], data=[]]
else:
attowiki_path = attowiki_path[:attowiki_path[:-1].rfind('/')]
return attowiki_path |
def normalize_serial_number(sn,
max_length=None, left_fill='0', right_fill=str(), blank=str(),
valid_chars=' -0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
invalid_chars=None,
strip_whitespace=True, join=False, na=rex.nones):
r"""Make a string compatible with typical serial number requirements
# Default configuration strips internal and external whitespaces and retains only the last 10 characters
>>> normalize_serial_number('1C 234567890 ')
'0234567890'
>>> normalize_serial_number('1C 234567890 ', max_length=20)
'000000001C 234567890'
>>> normalize_serial_number('Unknown', blank=None, left_fill=str())
''
>>> normalize_serial_number('N/A', blank='', left_fill=str())
'A'
>>> normalize_serial_number('1C 234567890 ', max_length=20, left_fill='')
'1C 234567890'
Notice how the max_length setting (20) carries over from the previous test!
>>> len(normalize_serial_number('Unknown', blank=False))
20
>>> normalize_serial_number('Unknown', blank=False)
'00000000000000000000'
>>> normalize_serial_number(' \t1C\t-\t234567890 \x00\x7f', max_length=14, left_fill='0',
... valid_chars='0123456789ABC', invalid_chars=None, join=True)
'1C\t-\t234567890'
Notice how the max_length setting carries over from the previous test!
>>> len(normalize_serial_number('Unknown', blank=False))
14
Restore the default max_length setting
>>> len(normalize_serial_number('Unknown', blank=False, max_length=10))
10
>>> normalize_serial_number('NO SERIAL', blank='--=--', left_fill='') # doctest: +NORMALIZE_WHITESPACE
'NO SERIAL'
>>> normalize_serial_number('NO SERIAL', blank='', left_fill='') # doctest: +NORMALIZE_WHITESPACE
'NO SERIAL'
>>> normalize_serial_number('1C 234567890 ', valid_chars='0123456789')
'0234567890'
"""
# All 9 kwargs have persistent default values stored as attributes of the funcion instance
if max_length is None:
max_length = normalize_serial_number.max_length
else:
normalize_serial_number.max_length = max_length
if left_fill is None:
left_fill = normalize_serial_number.left_fill
else:
normalize_serial_number.left_fill = left_fill
if right_fill is None:
right_fill = normalize_serial_number.right_fill
else:
normalize_serial_number.right_fill = right_fill
if blank is None:
blank = normalize_serial_number.blank
else:
normalize_serial_number.blank = blank
if valid_chars is None:
valid_chars = normalize_serial_number.valid_chars
else:
normalize_serial_number.valid_chars = valid_chars
if invalid_chars is None:
invalid_chars = normalize_serial_number.invalid_chars
else:
normalize_serial_number.invalid_chars = invalid_chars
if strip_whitespace is None:
strip_whitespace = normalize_serial_number.strip_whitespace
else:
normalize_serial_number.strip_whitespace = strip_whitespace
if join is None:
join = normalize_serial_number.join
else:
normalize_serial_number.join = join
if na is None:
na = normalize_serial_number.na
else:
normalize_serial_number.na = na
if invalid_chars is None:
invalid_chars = (c for c in charlist.ascii_all if c not in valid_chars)
invalid_chars = ''.join(invalid_chars)
sn = str(sn).strip(invalid_chars)
if strip_whitespace:
sn = sn.strip()
if invalid_chars:
if join:
sn = sn.translate(dict(zip(invalid_chars, [''] * len(invalid_chars))))
else:
sn = multisplit(sn, invalid_chars)[-1]
sn = sn[-max_length:]
if strip_whitespace:
sn = sn.strip()
if na:
if isinstance(na, (tuple, set, dict, list)) and sn in na:
sn = ''
elif na.match(sn):
sn = ''
if not sn and not (blank is False):
return blank
if left_fill:
sn = left_fill * int(max_length - len(sn) / len(left_fill)) + sn
if right_fill:
sn = sn + right_fill * (max_length - len(sn) / len(right_fill))
return sn | def function[normalize_serial_number, parameter[sn, max_length, left_fill, right_fill, blank, valid_chars, invalid_chars, strip_whitespace, join, na]]:
constant[Make a string compatible with typical serial number requirements
# Default configuration strips internal and external whitespaces and retains only the last 10 characters
>>> normalize_serial_number('1C 234567890 ')
'0234567890'
>>> normalize_serial_number('1C 234567890 ', max_length=20)
'000000001C 234567890'
>>> normalize_serial_number('Unknown', blank=None, left_fill=str())
''
>>> normalize_serial_number('N/A', blank='', left_fill=str())
'A'
>>> normalize_serial_number('1C 234567890 ', max_length=20, left_fill='')
'1C 234567890'
Notice how the max_length setting (20) carries over from the previous test!
>>> len(normalize_serial_number('Unknown', blank=False))
20
>>> normalize_serial_number('Unknown', blank=False)
'00000000000000000000'
>>> normalize_serial_number(' \t1C\t-\t234567890 \x00\x7f', max_length=14, left_fill='0',
... valid_chars='0123456789ABC', invalid_chars=None, join=True)
'1C\t-\t234567890'
Notice how the max_length setting carries over from the previous test!
>>> len(normalize_serial_number('Unknown', blank=False))
14
Restore the default max_length setting
>>> len(normalize_serial_number('Unknown', blank=False, max_length=10))
10
>>> normalize_serial_number('NO SERIAL', blank='--=--', left_fill='') # doctest: +NORMALIZE_WHITESPACE
'NO SERIAL'
>>> normalize_serial_number('NO SERIAL', blank='', left_fill='') # doctest: +NORMALIZE_WHITESPACE
'NO SERIAL'
>>> normalize_serial_number('1C 234567890 ', valid_chars='0123456789')
'0234567890'
]
if compare[name[max_length] is constant[None]] begin[:]
variable[max_length] assign[=] name[normalize_serial_number].max_length
if compare[name[left_fill] is constant[None]] begin[:]
variable[left_fill] assign[=] name[normalize_serial_number].left_fill
if compare[name[right_fill] is constant[None]] begin[:]
variable[right_fill] assign[=] name[normalize_serial_number].right_fill
if compare[name[blank] is constant[None]] begin[:]
variable[blank] assign[=] name[normalize_serial_number].blank
if compare[name[valid_chars] is constant[None]] begin[:]
variable[valid_chars] assign[=] name[normalize_serial_number].valid_chars
if compare[name[invalid_chars] is constant[None]] begin[:]
variable[invalid_chars] assign[=] name[normalize_serial_number].invalid_chars
if compare[name[strip_whitespace] is constant[None]] begin[:]
variable[strip_whitespace] assign[=] name[normalize_serial_number].strip_whitespace
if compare[name[join] is constant[None]] begin[:]
variable[join] assign[=] name[normalize_serial_number].join
if compare[name[na] is constant[None]] begin[:]
variable[na] assign[=] name[normalize_serial_number].na
if compare[name[invalid_chars] is constant[None]] begin[:]
variable[invalid_chars] assign[=] <ast.GeneratorExp object at 0x7da1b2448550>
variable[invalid_chars] assign[=] call[constant[].join, parameter[name[invalid_chars]]]
variable[sn] assign[=] call[call[name[str], parameter[name[sn]]].strip, parameter[name[invalid_chars]]]
if name[strip_whitespace] begin[:]
variable[sn] assign[=] call[name[sn].strip, parameter[]]
if name[invalid_chars] begin[:]
if name[join] begin[:]
variable[sn] assign[=] call[name[sn].translate, parameter[call[name[dict], parameter[call[name[zip], parameter[name[invalid_chars], binary_operation[list[[<ast.Constant object at 0x7da1b2448e50>]] * call[name[len], parameter[name[invalid_chars]]]]]]]]]]
variable[sn] assign[=] call[name[sn]][<ast.Slice object at 0x7da1b2449030>]
if name[strip_whitespace] begin[:]
variable[sn] assign[=] call[name[sn].strip, parameter[]]
if name[na] begin[:]
if <ast.BoolOp object at 0x7da1b2449300> begin[:]
variable[sn] assign[=] constant[]
if <ast.BoolOp object at 0x7da1b244ad40> begin[:]
return[name[blank]]
if name[left_fill] begin[:]
variable[sn] assign[=] binary_operation[binary_operation[name[left_fill] * call[name[int], parameter[binary_operation[name[max_length] - binary_operation[call[name[len], parameter[name[sn]]] / call[name[len], parameter[name[left_fill]]]]]]]] + name[sn]]
if name[right_fill] begin[:]
variable[sn] assign[=] binary_operation[name[sn] + binary_operation[name[right_fill] * binary_operation[name[max_length] - binary_operation[call[name[len], parameter[name[sn]]] / call[name[len], parameter[name[right_fill]]]]]]]
return[name[sn]] | keyword[def] identifier[normalize_serial_number] ( identifier[sn] ,
identifier[max_length] = keyword[None] , identifier[left_fill] = literal[string] , identifier[right_fill] = identifier[str] (), identifier[blank] = identifier[str] (),
identifier[valid_chars] = literal[string] ,
identifier[invalid_chars] = keyword[None] ,
identifier[strip_whitespace] = keyword[True] , identifier[join] = keyword[False] , identifier[na] = identifier[rex] . identifier[nones] ):
literal[string]
keyword[if] identifier[max_length] keyword[is] keyword[None] :
identifier[max_length] = identifier[normalize_serial_number] . identifier[max_length]
keyword[else] :
identifier[normalize_serial_number] . identifier[max_length] = identifier[max_length]
keyword[if] identifier[left_fill] keyword[is] keyword[None] :
identifier[left_fill] = identifier[normalize_serial_number] . identifier[left_fill]
keyword[else] :
identifier[normalize_serial_number] . identifier[left_fill] = identifier[left_fill]
keyword[if] identifier[right_fill] keyword[is] keyword[None] :
identifier[right_fill] = identifier[normalize_serial_number] . identifier[right_fill]
keyword[else] :
identifier[normalize_serial_number] . identifier[right_fill] = identifier[right_fill]
keyword[if] identifier[blank] keyword[is] keyword[None] :
identifier[blank] = identifier[normalize_serial_number] . identifier[blank]
keyword[else] :
identifier[normalize_serial_number] . identifier[blank] = identifier[blank]
keyword[if] identifier[valid_chars] keyword[is] keyword[None] :
identifier[valid_chars] = identifier[normalize_serial_number] . identifier[valid_chars]
keyword[else] :
identifier[normalize_serial_number] . identifier[valid_chars] = identifier[valid_chars]
keyword[if] identifier[invalid_chars] keyword[is] keyword[None] :
identifier[invalid_chars] = identifier[normalize_serial_number] . identifier[invalid_chars]
keyword[else] :
identifier[normalize_serial_number] . identifier[invalid_chars] = identifier[invalid_chars]
keyword[if] identifier[strip_whitespace] keyword[is] keyword[None] :
identifier[strip_whitespace] = identifier[normalize_serial_number] . identifier[strip_whitespace]
keyword[else] :
identifier[normalize_serial_number] . identifier[strip_whitespace] = identifier[strip_whitespace]
keyword[if] identifier[join] keyword[is] keyword[None] :
identifier[join] = identifier[normalize_serial_number] . identifier[join]
keyword[else] :
identifier[normalize_serial_number] . identifier[join] = identifier[join]
keyword[if] identifier[na] keyword[is] keyword[None] :
identifier[na] = identifier[normalize_serial_number] . identifier[na]
keyword[else] :
identifier[normalize_serial_number] . identifier[na] = identifier[na]
keyword[if] identifier[invalid_chars] keyword[is] keyword[None] :
identifier[invalid_chars] =( identifier[c] keyword[for] identifier[c] keyword[in] identifier[charlist] . identifier[ascii_all] keyword[if] identifier[c] keyword[not] keyword[in] identifier[valid_chars] )
identifier[invalid_chars] = literal[string] . identifier[join] ( identifier[invalid_chars] )
identifier[sn] = identifier[str] ( identifier[sn] ). identifier[strip] ( identifier[invalid_chars] )
keyword[if] identifier[strip_whitespace] :
identifier[sn] = identifier[sn] . identifier[strip] ()
keyword[if] identifier[invalid_chars] :
keyword[if] identifier[join] :
identifier[sn] = identifier[sn] . identifier[translate] ( identifier[dict] ( identifier[zip] ( identifier[invalid_chars] ,[ literal[string] ]* identifier[len] ( identifier[invalid_chars] ))))
keyword[else] :
identifier[sn] = identifier[multisplit] ( identifier[sn] , identifier[invalid_chars] )[- literal[int] ]
identifier[sn] = identifier[sn] [- identifier[max_length] :]
keyword[if] identifier[strip_whitespace] :
identifier[sn] = identifier[sn] . identifier[strip] ()
keyword[if] identifier[na] :
keyword[if] identifier[isinstance] ( identifier[na] ,( identifier[tuple] , identifier[set] , identifier[dict] , identifier[list] )) keyword[and] identifier[sn] keyword[in] identifier[na] :
identifier[sn] = literal[string]
keyword[elif] identifier[na] . identifier[match] ( identifier[sn] ):
identifier[sn] = literal[string]
keyword[if] keyword[not] identifier[sn] keyword[and] keyword[not] ( identifier[blank] keyword[is] keyword[False] ):
keyword[return] identifier[blank]
keyword[if] identifier[left_fill] :
identifier[sn] = identifier[left_fill] * identifier[int] ( identifier[max_length] - identifier[len] ( identifier[sn] )/ identifier[len] ( identifier[left_fill] ))+ identifier[sn]
keyword[if] identifier[right_fill] :
identifier[sn] = identifier[sn] + identifier[right_fill] *( identifier[max_length] - identifier[len] ( identifier[sn] )/ identifier[len] ( identifier[right_fill] ))
keyword[return] identifier[sn] | def normalize_serial_number(sn, max_length=None, left_fill='0', right_fill=str(), blank=str(), valid_chars=' -0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', invalid_chars=None, strip_whitespace=True, join=False, na=rex.nones):
"""Make a string compatible with typical serial number requirements
# Default configuration strips internal and external whitespaces and retains only the last 10 characters
>>> normalize_serial_number('1C 234567890 ')
'0234567890'
>>> normalize_serial_number('1C 234567890 ', max_length=20)
'000000001C 234567890'
>>> normalize_serial_number('Unknown', blank=None, left_fill=str())
''
>>> normalize_serial_number('N/A', blank='', left_fill=str())
'A'
>>> normalize_serial_number('1C 234567890 ', max_length=20, left_fill='')
'1C 234567890'
Notice how the max_length setting (20) carries over from the previous test!
>>> len(normalize_serial_number('Unknown', blank=False))
20
>>> normalize_serial_number('Unknown', blank=False)
'00000000000000000000'
>>> normalize_serial_number(' \\t1C\\t-\\t234567890 \\x00\\x7f', max_length=14, left_fill='0',
... valid_chars='0123456789ABC', invalid_chars=None, join=True)
'1C\\t-\\t234567890'
Notice how the max_length setting carries over from the previous test!
>>> len(normalize_serial_number('Unknown', blank=False))
14
Restore the default max_length setting
>>> len(normalize_serial_number('Unknown', blank=False, max_length=10))
10
>>> normalize_serial_number('NO SERIAL', blank='--=--', left_fill='') # doctest: +NORMALIZE_WHITESPACE
'NO SERIAL'
>>> normalize_serial_number('NO SERIAL', blank='', left_fill='') # doctest: +NORMALIZE_WHITESPACE
'NO SERIAL'
>>> normalize_serial_number('1C 234567890 ', valid_chars='0123456789')
'0234567890'
"""
# All 9 kwargs have persistent default values stored as attributes of the funcion instance
if max_length is None:
max_length = normalize_serial_number.max_length # depends on [control=['if'], data=['max_length']]
else:
normalize_serial_number.max_length = max_length
if left_fill is None:
left_fill = normalize_serial_number.left_fill # depends on [control=['if'], data=['left_fill']]
else:
normalize_serial_number.left_fill = left_fill
if right_fill is None:
right_fill = normalize_serial_number.right_fill # depends on [control=['if'], data=['right_fill']]
else:
normalize_serial_number.right_fill = right_fill
if blank is None:
blank = normalize_serial_number.blank # depends on [control=['if'], data=['blank']]
else:
normalize_serial_number.blank = blank
if valid_chars is None:
valid_chars = normalize_serial_number.valid_chars # depends on [control=['if'], data=['valid_chars']]
else:
normalize_serial_number.valid_chars = valid_chars
if invalid_chars is None:
invalid_chars = normalize_serial_number.invalid_chars # depends on [control=['if'], data=['invalid_chars']]
else:
normalize_serial_number.invalid_chars = invalid_chars
if strip_whitespace is None:
strip_whitespace = normalize_serial_number.strip_whitespace # depends on [control=['if'], data=['strip_whitespace']]
else:
normalize_serial_number.strip_whitespace = strip_whitespace
if join is None:
join = normalize_serial_number.join # depends on [control=['if'], data=['join']]
else:
normalize_serial_number.join = join
if na is None:
na = normalize_serial_number.na # depends on [control=['if'], data=['na']]
else:
normalize_serial_number.na = na
if invalid_chars is None:
invalid_chars = (c for c in charlist.ascii_all if c not in valid_chars) # depends on [control=['if'], data=['invalid_chars']]
invalid_chars = ''.join(invalid_chars)
sn = str(sn).strip(invalid_chars)
if strip_whitespace:
sn = sn.strip() # depends on [control=['if'], data=[]]
if invalid_chars:
if join:
sn = sn.translate(dict(zip(invalid_chars, [''] * len(invalid_chars)))) # depends on [control=['if'], data=[]]
else:
sn = multisplit(sn, invalid_chars)[-1] # depends on [control=['if'], data=[]]
sn = sn[-max_length:]
if strip_whitespace:
sn = sn.strip() # depends on [control=['if'], data=[]]
if na:
if isinstance(na, (tuple, set, dict, list)) and sn in na:
sn = '' # depends on [control=['if'], data=[]]
elif na.match(sn):
sn = '' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not sn and (not blank is False):
return blank # depends on [control=['if'], data=[]]
if left_fill:
sn = left_fill * int(max_length - len(sn) / len(left_fill)) + sn # depends on [control=['if'], data=[]]
if right_fill:
sn = sn + right_fill * (max_length - len(sn) / len(right_fill)) # depends on [control=['if'], data=[]]
return sn |
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX]) | def function[set_decade_lims, parameter[axis, direction]]:
constant[
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
]
if compare[name[axis] is constant[None]] begin[:]
variable[axis] assign[=] call[name[plt].gca, parameter[]]
if <ast.BoolOp object at 0x7da1b209e680> begin[:]
<ast.Tuple object at 0x7da1b209e6e0> assign[=] call[name[axis].get_xlim, parameter[]]
variable[MIN] assign[=] binary_operation[constant[10] ** call[name[np].floor, parameter[call[name[np].log10, parameter[name[MIN]]]]]]
variable[MAX] assign[=] binary_operation[constant[10] ** call[name[np].ceil, parameter[call[name[np].log10, parameter[name[MAX]]]]]]
call[name[axis].set_xlim, parameter[list[[<ast.Name object at 0x7da1b20d5060>, <ast.Name object at 0x7da1b20d4790>]]]]
if <ast.BoolOp object at 0x7da1b20d61a0> begin[:]
<ast.Tuple object at 0x7da1b20d5c00> assign[=] call[name[axis].get_ylim, parameter[]]
variable[MIN] assign[=] binary_operation[constant[10] ** call[name[np].floor, parameter[call[name[np].log10, parameter[name[MIN]]]]]]
variable[MAX] assign[=] binary_operation[constant[10] ** call[name[np].ceil, parameter[call[name[np].log10, parameter[name[MAX]]]]]]
call[name[axis].set_ylim, parameter[list[[<ast.Name object at 0x7da1b20d5240>, <ast.Name object at 0x7da1b20d6200>]]]] | keyword[def] identifier[set_decade_lims] ( identifier[axis] = keyword[None] , identifier[direction] = keyword[None] ):
literal[string]
keyword[if] identifier[axis] keyword[is] keyword[None] :
identifier[axis] = identifier[plt] . identifier[gca] ()
keyword[if] identifier[direction] keyword[is] keyword[None] keyword[or] identifier[direction] == literal[string] :
identifier[MIN] , identifier[MAX] = identifier[axis] . identifier[get_xlim] ()
identifier[MIN] = literal[int] **( identifier[np] . identifier[floor] ( identifier[np] . identifier[log10] ( identifier[MIN] )))
identifier[MAX] = literal[int] **( identifier[np] . identifier[ceil] ( identifier[np] . identifier[log10] ( identifier[MAX] )))
identifier[axis] . identifier[set_xlim] ([ identifier[MIN] , identifier[MAX] ])
keyword[if] identifier[direction] keyword[is] keyword[None] keyword[or] identifier[direction] == literal[string] :
identifier[MIN] , identifier[MAX] = identifier[axis] . identifier[get_ylim] ()
identifier[MIN] = literal[int] **( identifier[np] . identifier[floor] ( identifier[np] . identifier[log10] ( identifier[MIN] )))
identifier[MAX] = literal[int] **( identifier[np] . identifier[ceil] ( identifier[np] . identifier[log10] ( identifier[MAX] )))
identifier[axis] . identifier[set_ylim] ([ identifier[MIN] , identifier[MAX] ]) | def set_decade_lims(axis=None, direction=None):
"""
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
"""
# get current axis
if axis is None:
axis = plt.gca() # depends on [control=['if'], data=['axis']]
# x-axis
if direction is None or direction == 'x':
# - get current limits
(MIN, MAX) = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** np.floor(np.log10(MIN))
MAX = 10 ** np.ceil(np.log10(MAX))
# - apply
axis.set_xlim([MIN, MAX]) # depends on [control=['if'], data=[]]
# y-axis
if direction is None or direction == 'y':
# - get current limits
(MIN, MAX) = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** np.floor(np.log10(MIN))
MAX = 10 ** np.ceil(np.log10(MAX))
# - apply
axis.set_ylim([MIN, MAX]) # depends on [control=['if'], data=[]] |
def put_scancode(self, scancode):
"""Sends a scancode to the keyboard.
in scancode of type int
raises :class:`VBoxErrorIprtError`
Could not send scan code to virtual keyboard.
"""
if not isinstance(scancode, baseinteger):
raise TypeError("scancode can only be an instance of type baseinteger")
self._call("putScancode",
in_p=[scancode]) | def function[put_scancode, parameter[self, scancode]]:
constant[Sends a scancode to the keyboard.
in scancode of type int
raises :class:`VBoxErrorIprtError`
Could not send scan code to virtual keyboard.
]
if <ast.UnaryOp object at 0x7da2047eb760> begin[:]
<ast.Raise object at 0x7da2047e8be0>
call[name[self]._call, parameter[constant[putScancode]]] | keyword[def] identifier[put_scancode] ( identifier[self] , identifier[scancode] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[scancode] , identifier[baseinteger] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[scancode] ]) | def put_scancode(self, scancode):
"""Sends a scancode to the keyboard.
in scancode of type int
raises :class:`VBoxErrorIprtError`
Could not send scan code to virtual keyboard.
"""
if not isinstance(scancode, baseinteger):
raise TypeError('scancode can only be an instance of type baseinteger') # depends on [control=['if'], data=[]]
self._call('putScancode', in_p=[scancode]) |
def CreateSessionStart(self):
"""Creates a session start.
Returns:
SessionStart: session start attribute container.
"""
session_start = SessionStart()
session_start.artifact_filters = self.artifact_filters
session_start.command_line_arguments = self.command_line_arguments
session_start.debug_mode = self.debug_mode
session_start.enabled_parser_names = self.enabled_parser_names
session_start.filter_file = self.filter_file
session_start.identifier = self.identifier
session_start.parser_filter_expression = self.parser_filter_expression
session_start.preferred_encoding = self.preferred_encoding
session_start.preferred_time_zone = self.preferred_time_zone
session_start.product_name = self.product_name
session_start.product_version = self.product_version
session_start.timestamp = self.start_time
return session_start | def function[CreateSessionStart, parameter[self]]:
constant[Creates a session start.
Returns:
SessionStart: session start attribute container.
]
variable[session_start] assign[=] call[name[SessionStart], parameter[]]
name[session_start].artifact_filters assign[=] name[self].artifact_filters
name[session_start].command_line_arguments assign[=] name[self].command_line_arguments
name[session_start].debug_mode assign[=] name[self].debug_mode
name[session_start].enabled_parser_names assign[=] name[self].enabled_parser_names
name[session_start].filter_file assign[=] name[self].filter_file
name[session_start].identifier assign[=] name[self].identifier
name[session_start].parser_filter_expression assign[=] name[self].parser_filter_expression
name[session_start].preferred_encoding assign[=] name[self].preferred_encoding
name[session_start].preferred_time_zone assign[=] name[self].preferred_time_zone
name[session_start].product_name assign[=] name[self].product_name
name[session_start].product_version assign[=] name[self].product_version
name[session_start].timestamp assign[=] name[self].start_time
return[name[session_start]] | keyword[def] identifier[CreateSessionStart] ( identifier[self] ):
literal[string]
identifier[session_start] = identifier[SessionStart] ()
identifier[session_start] . identifier[artifact_filters] = identifier[self] . identifier[artifact_filters]
identifier[session_start] . identifier[command_line_arguments] = identifier[self] . identifier[command_line_arguments]
identifier[session_start] . identifier[debug_mode] = identifier[self] . identifier[debug_mode]
identifier[session_start] . identifier[enabled_parser_names] = identifier[self] . identifier[enabled_parser_names]
identifier[session_start] . identifier[filter_file] = identifier[self] . identifier[filter_file]
identifier[session_start] . identifier[identifier] = identifier[self] . identifier[identifier]
identifier[session_start] . identifier[parser_filter_expression] = identifier[self] . identifier[parser_filter_expression]
identifier[session_start] . identifier[preferred_encoding] = identifier[self] . identifier[preferred_encoding]
identifier[session_start] . identifier[preferred_time_zone] = identifier[self] . identifier[preferred_time_zone]
identifier[session_start] . identifier[product_name] = identifier[self] . identifier[product_name]
identifier[session_start] . identifier[product_version] = identifier[self] . identifier[product_version]
identifier[session_start] . identifier[timestamp] = identifier[self] . identifier[start_time]
keyword[return] identifier[session_start] | def CreateSessionStart(self):
"""Creates a session start.
Returns:
SessionStart: session start attribute container.
"""
session_start = SessionStart()
session_start.artifact_filters = self.artifact_filters
session_start.command_line_arguments = self.command_line_arguments
session_start.debug_mode = self.debug_mode
session_start.enabled_parser_names = self.enabled_parser_names
session_start.filter_file = self.filter_file
session_start.identifier = self.identifier
session_start.parser_filter_expression = self.parser_filter_expression
session_start.preferred_encoding = self.preferred_encoding
session_start.preferred_time_zone = self.preferred_time_zone
session_start.product_name = self.product_name
session_start.product_version = self.product_version
session_start.timestamp = self.start_time
return session_start |
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
""" | def function[get_period_LS, parameter[self, date, mag, n_threads, min_period]]:
constant[
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
]
variable[oversampling] assign[=] constant[3.0]
variable[hifac] assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[binary_operation[call[name[max], parameter[name[date]]] - call[name[min], parameter[name[date]]]] / call[name[len], parameter[name[date]]]] / name[min_period]] * constant[2.0]]]]
if compare[name[hifac] less[<] constant[100]] begin[:]
variable[hifac] assign[=] constant[100]
<ast.Tuple object at 0x7da20c6c7100> assign[=] call[name[pLS].fasper, parameter[name[date], name[mag], name[oversampling], name[hifac], name[n_threads]]]
name[self].f assign[=] call[name[fx]][name[jmax]]
name[self].period assign[=] binary_operation[constant[1.0] / name[self].f]
name[self].period_uncertainty assign[=] call[name[self].get_period_uncertainty, parameter[name[fx], name[fy], name[jmax]]]
name[self].period_log10FAP assign[=] call[name[np].log10, parameter[call[call[name[pLS].getSignificance, parameter[name[fx], name[fy], name[nout], name[oversampling]]]][name[jmax]]]]
name[self].period_SNR assign[=] binary_operation[binary_operation[call[name[fy]][name[jmax]] - call[name[np].median, parameter[name[fy]]]] / call[name[np].std, parameter[name[fy]]]]
variable[order] assign[=] constant[3]
variable[p0] assign[=] call[name[np].ones, parameter[binary_operation[binary_operation[name[order] * constant[2]] + constant[1]]]]
variable[date_period] assign[=] binary_operation[binary_operation[name[date] <ast.Mod object at 0x7da2590d6920> name[self].period] / name[self].period]
<ast.Tuple object at 0x7da20c6c73a0> assign[=] call[name[leastsq], parameter[name[self].residuals, name[p0]]]
name[self].amplitude assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[call[name[p1]][constant[1]] ** constant[2]] + binary_operation[call[name[p1]][constant[2]] ** constant[2]]]]]
name[self].r21 assign[=] binary_operation[call[name[np].sqrt, parameter[binary_operation[binary_operation[call[name[p1]][constant[3]] ** constant[2]] + binary_operation[call[name[p1]][constant[4]] ** constant[2]]]]] / name[self].amplitude]
name[self].r31 assign[=] binary_operation[call[name[np].sqrt, parameter[binary_operation[binary_operation[call[name[p1]][constant[5]] ** constant[2]] + binary_operation[call[name[p1]][constant[6]] ** constant[2]]]]] / name[self].amplitude]
name[self].f_phase assign[=] call[name[np].arctan, parameter[binary_operation[<ast.UnaryOp object at 0x7da18eb55090> / call[name[p1]][constant[2]]]]]
name[self].phi21 assign[=] binary_operation[call[name[np].arctan, parameter[binary_operation[<ast.UnaryOp object at 0x7da18eb577f0> / call[name[p1]][constant[4]]]]] - binary_operation[constant[2.0] * name[self].f_phase]]
name[self].phi31 assign[=] binary_operation[call[name[np].arctan, parameter[binary_operation[<ast.UnaryOp object at 0x7da18eb55540> / call[name[p1]][constant[6]]]]] - binary_operation[constant[3.0] * name[self].f_phase]]
constant[
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] * (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
] | keyword[def] identifier[get_period_LS] ( identifier[self] , identifier[date] , identifier[mag] , identifier[n_threads] , identifier[min_period] ):
literal[string]
identifier[oversampling] = literal[int]
identifier[hifac] = identifier[int] (( identifier[max] ( identifier[date] )- identifier[min] ( identifier[date] ))/ identifier[len] ( identifier[date] )/ identifier[min_period] * literal[int] )
keyword[if] identifier[hifac] < literal[int] :
identifier[hifac] = literal[int]
identifier[fx] , identifier[fy] , identifier[nout] , identifier[jmax] , identifier[prob] = identifier[pLS] . identifier[fasper] ( identifier[date] , identifier[mag] , identifier[oversampling] , identifier[hifac] ,
identifier[n_threads] )
identifier[self] . identifier[f] = identifier[fx] [ identifier[jmax] ]
identifier[self] . identifier[period] = literal[int] / identifier[self] . identifier[f]
identifier[self] . identifier[period_uncertainty] = identifier[self] . identifier[get_period_uncertainty] ( identifier[fx] , identifier[fy] , identifier[jmax] )
identifier[self] . identifier[period_log10FAP] = identifier[np] . identifier[log10] ( identifier[pLS] . identifier[getSignificance] ( identifier[fx] , identifier[fy] , identifier[nout] , identifier[oversampling] )[ identifier[jmax] ])
identifier[self] . identifier[period_SNR] =( identifier[fy] [ identifier[jmax] ]- identifier[np] . identifier[median] ( identifier[fy] ))/ identifier[np] . identifier[std] ( identifier[fy] )
identifier[order] = literal[int]
identifier[p0] = identifier[np] . identifier[ones] ( identifier[order] * literal[int] + literal[int] )
identifier[date_period] =( identifier[date] % identifier[self] . identifier[period] )/ identifier[self] . identifier[period]
identifier[p1] , identifier[success] = identifier[leastsq] ( identifier[self] . identifier[residuals] , identifier[p0] ,
identifier[args] =( identifier[date_period] , identifier[mag] , identifier[order] ))
identifier[self] . identifier[amplitude] = identifier[np] . identifier[sqrt] ( identifier[p1] [ literal[int] ]** literal[int] + identifier[p1] [ literal[int] ]** literal[int] )
identifier[self] . identifier[r21] = identifier[np] . identifier[sqrt] ( identifier[p1] [ literal[int] ]** literal[int] + identifier[p1] [ literal[int] ]** literal[int] )/ identifier[self] . identifier[amplitude]
identifier[self] . identifier[r31] = identifier[np] . identifier[sqrt] ( identifier[p1] [ literal[int] ]** literal[int] + identifier[p1] [ literal[int] ]** literal[int] )/ identifier[self] . identifier[amplitude]
identifier[self] . identifier[f_phase] = identifier[np] . identifier[arctan] (- identifier[p1] [ literal[int] ]/ identifier[p1] [ literal[int] ])
identifier[self] . identifier[phi21] = identifier[np] . identifier[arctan] (- identifier[p1] [ literal[int] ]/ identifier[p1] [ literal[int] ])- literal[int] * identifier[self] . identifier[f_phase]
identifier[self] . identifier[phi31] = identifier[np] . identifier[arctan] (- identifier[p1] [ literal[int] ]/ identifier[p1] [ literal[int] ])- literal[int] * identifier[self] . identifier[f_phase]
literal[string] | def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.0
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.0)
# Minimum hifac
if hifac < 100:
hifac = 100 # depends on [control=['if'], data=['hifac']]
# Lomb-Scargle.
(fx, fy, nout, jmax, prob) = pLS.fasper(date, mag, oversampling, hifac, n_threads)
self.f = fx[jmax]
self.period = 1.0 / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = date % self.period / self.period
(p1, success) = leastsq(self.residuals, p0, args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2.0 * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3.0 * self.f_phase
"\n # Derive a second period.\n # Whitening a light curve.\n residual_mag = mag - fitted_y\n\n # Lomb-Scargle again to find the second period.\n omega_top, power_top = search_frequencies(date, residual_mag, err,\n #LS_kwargs={'generalized':True, 'subtract_mean':True},\n n_eval=5000, n_retry=3, n_save=50)\n\n self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]\n self.f2 = 1. / self.period2\n self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] * (len(self.date) - 1) / 2.\n\n # Fit Fourier Series again.\n p0 = [1.] * order * 2\n date_period = (date % self.period) / self.period\n p2, success = leastsq(self.residuals, p0,\n args=(date_period, residual_mag, order))\n fitted_y = self.FourierSeries(p2, date_period, order)\n\n #plt.plot(date%self.period2, residual_mag, 'b+')\n #plt.show()\n\n # Derive Fourier features for the first second.\n self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)\n self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp\n self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp\n self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp\n self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp\n self.f2_phase = np.arctan(-p2[1] / p2[2])\n self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase\n self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase\n self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase\n self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase\n\n # Calculate features using the first and second periods.\n self.f12_ratio = self.f2 / self.f1\n self.f12_remain = self.f1 % self.f2 if self.f1 > self.f2 else self.f2 % self.f1\n self.f12_amp = self.f2_amp / self.f1_amp\n self.f12_phase = self.f2_phase - self.f1_phase\n " |
def replace_spaces(s, replacer):
# type: (str, Callable[[int, str], str]) -> str
"""
>>> replace_spaces('ab', lambda n, l: '_' * n)
'ab'
>>> replace_spaces('a b', lambda n, l: '_' * n)
'a_b'
>>> replace_spaces(' ab', lambda n, l: '_' * n)
'_ab'
>>> replace_spaces(' a b ', lambda n, s: s * n)
'leftleftacenterbright'
>>> replace_spaces(' a b ', lambda n, _: '0 0' * n)
'0 0a0 0b0 00 0'
"""
def replace(m):
# type: (Match[str]) -> str
if m.start() == 0:
side = 'left'
elif m.end() == len(s):
side = 'right'
else:
side = 'center'
return replacer(len(m.group()), side)
return re.sub(r'[ ]+', replace, s) | def function[replace_spaces, parameter[s, replacer]]:
constant[
>>> replace_spaces('ab', lambda n, l: '_' * n)
'ab'
>>> replace_spaces('a b', lambda n, l: '_' * n)
'a_b'
>>> replace_spaces(' ab', lambda n, l: '_' * n)
'_ab'
>>> replace_spaces(' a b ', lambda n, s: s * n)
'leftleftacenterbright'
>>> replace_spaces(' a b ', lambda n, _: '0 0' * n)
'0 0a0 0b0 00 0'
]
def function[replace, parameter[m]]:
if compare[call[name[m].start, parameter[]] equal[==] constant[0]] begin[:]
variable[side] assign[=] constant[left]
return[call[name[replacer], parameter[call[name[len], parameter[call[name[m].group, parameter[]]]], name[side]]]]
return[call[name[re].sub, parameter[constant[[ ]+], name[replace], name[s]]]] | keyword[def] identifier[replace_spaces] ( identifier[s] , identifier[replacer] ):
literal[string]
keyword[def] identifier[replace] ( identifier[m] ):
keyword[if] identifier[m] . identifier[start] ()== literal[int] :
identifier[side] = literal[string]
keyword[elif] identifier[m] . identifier[end] ()== identifier[len] ( identifier[s] ):
identifier[side] = literal[string]
keyword[else] :
identifier[side] = literal[string]
keyword[return] identifier[replacer] ( identifier[len] ( identifier[m] . identifier[group] ()), identifier[side] )
keyword[return] identifier[re] . identifier[sub] ( literal[string] , identifier[replace] , identifier[s] ) | def replace_spaces(s, replacer):
# type: (str, Callable[[int, str], str]) -> str
"\n >>> replace_spaces('ab', lambda n, l: '_' * n)\n 'ab'\n >>> replace_spaces('a b', lambda n, l: '_' * n)\n 'a_b'\n >>> replace_spaces(' ab', lambda n, l: '_' * n)\n '_ab'\n >>> replace_spaces(' a b ', lambda n, s: s * n)\n 'leftleftacenterbright'\n >>> replace_spaces(' a b ', lambda n, _: '0 0' * n)\n '0 0a0 0b0 00 0'\n "
def replace(m):
# type: (Match[str]) -> str
if m.start() == 0:
side = 'left' # depends on [control=['if'], data=[]]
elif m.end() == len(s):
side = 'right' # depends on [control=['if'], data=[]]
else:
side = 'center'
return replacer(len(m.group()), side)
return re.sub('[ ]+', replace, s) |
def padDigitalData(self, dig_data, n):
"""Pad dig_data with its last element so that the new array is a
multiple of n.
"""
n = int(n)
l0 = len(dig_data)
if l0 % n == 0:
return dig_data # no need of padding
else:
ladd = n - (l0 % n)
dig_data_add = np.zeros(ladd, dtype="uint32")
dig_data_add.fill(dig_data[-1])
return np.concatenate((dig_data, dig_data_add)) | def function[padDigitalData, parameter[self, dig_data, n]]:
constant[Pad dig_data with its last element so that the new array is a
multiple of n.
]
variable[n] assign[=] call[name[int], parameter[name[n]]]
variable[l0] assign[=] call[name[len], parameter[name[dig_data]]]
if compare[binary_operation[name[l0] <ast.Mod object at 0x7da2590d6920> name[n]] equal[==] constant[0]] begin[:]
return[name[dig_data]] | keyword[def] identifier[padDigitalData] ( identifier[self] , identifier[dig_data] , identifier[n] ):
literal[string]
identifier[n] = identifier[int] ( identifier[n] )
identifier[l0] = identifier[len] ( identifier[dig_data] )
keyword[if] identifier[l0] % identifier[n] == literal[int] :
keyword[return] identifier[dig_data]
keyword[else] :
identifier[ladd] = identifier[n] -( identifier[l0] % identifier[n] )
identifier[dig_data_add] = identifier[np] . identifier[zeros] ( identifier[ladd] , identifier[dtype] = literal[string] )
identifier[dig_data_add] . identifier[fill] ( identifier[dig_data] [- literal[int] ])
keyword[return] identifier[np] . identifier[concatenate] (( identifier[dig_data] , identifier[dig_data_add] )) | def padDigitalData(self, dig_data, n):
"""Pad dig_data with its last element so that the new array is a
multiple of n.
"""
n = int(n)
l0 = len(dig_data)
if l0 % n == 0:
return dig_data # no need of padding # depends on [control=['if'], data=[]]
else:
ladd = n - l0 % n
dig_data_add = np.zeros(ladd, dtype='uint32')
dig_data_add.fill(dig_data[-1])
return np.concatenate((dig_data, dig_data_add)) |
def cut_module_meta(app, what, name, obj, options, lines):
"""Don't render lines that start with ``:copyright:`` or
``:license:`` when rendering module autodoc. These lines are useful
meta information in the source code, but are noisy in the docs.
"""
if what != "module":
return
lines[:] = [
line for line in lines if not line.startswith((":copyright:", ":license:"))
] | def function[cut_module_meta, parameter[app, what, name, obj, options, lines]]:
constant[Don't render lines that start with ``:copyright:`` or
``:license:`` when rendering module autodoc. These lines are useful
meta information in the source code, but are noisy in the docs.
]
if compare[name[what] not_equal[!=] constant[module]] begin[:]
return[None]
call[name[lines]][<ast.Slice object at 0x7da204566e60>] assign[=] <ast.ListComp object at 0x7da204564c10> | keyword[def] identifier[cut_module_meta] ( identifier[app] , identifier[what] , identifier[name] , identifier[obj] , identifier[options] , identifier[lines] ):
literal[string]
keyword[if] identifier[what] != literal[string] :
keyword[return]
identifier[lines] [:]=[
identifier[line] keyword[for] identifier[line] keyword[in] identifier[lines] keyword[if] keyword[not] identifier[line] . identifier[startswith] (( literal[string] , literal[string] ))
] | def cut_module_meta(app, what, name, obj, options, lines):
"""Don't render lines that start with ``:copyright:`` or
``:license:`` when rendering module autodoc. These lines are useful
meta information in the source code, but are noisy in the docs.
"""
if what != 'module':
return # depends on [control=['if'], data=[]]
lines[:] = [line for line in lines if not line.startswith((':copyright:', ':license:'))] |
def find_delegated_role(roles, delegated_role):
"""
<Purpose>
Find the index, if any, of a role with a given name in a list of roles.
<Arguments>
roles:
The list of roles, each of which must have a 'name' attribute.
delegated_role:
The name of the role to be found in the list of roles.
<Exceptions>
securesystemslib.exceptions.RepositoryError, if the list of roles has
invalid data.
<Side Effects>
No known side effects.
<Returns>
The unique index, an interger, in the list of roles. if 'delegated_role'
does not exist, 'None' is returned.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named. Raise
# 'securesystemslib.exceptions.FormatError' if any are improperly formatted.
securesystemslib.formats.ROLELIST_SCHEMA.check_match(roles)
securesystemslib.formats.ROLENAME_SCHEMA.check_match(delegated_role)
# The index of a role, if any, with the same name.
role_index = None
for index in six.moves.xrange(len(roles)):
role = roles[index]
name = role.get('name')
# This role has no name.
if name is None:
no_name_message = 'Role with no name.'
raise securesystemslib.exceptions.RepositoryError(no_name_message)
# Does this role have the same name?
else:
# This role has the same name, and...
if name == delegated_role:
# ...it is the only known role with the same name.
if role_index is None:
role_index = index
# ...there are at least two roles with the same name.
else:
duplicate_role_message = 'Duplicate role (' + str(delegated_role) + ').'
raise securesystemslib.exceptions.RepositoryError(
'Duplicate role (' + str(delegated_role) + ').')
# This role has a different name.
else:
logger.debug('Skipping delegated role: ' + repr(delegated_role))
return role_index | def function[find_delegated_role, parameter[roles, delegated_role]]:
constant[
<Purpose>
Find the index, if any, of a role with a given name in a list of roles.
<Arguments>
roles:
The list of roles, each of which must have a 'name' attribute.
delegated_role:
The name of the role to be found in the list of roles.
<Exceptions>
securesystemslib.exceptions.RepositoryError, if the list of roles has
invalid data.
<Side Effects>
No known side effects.
<Returns>
The unique index, an interger, in the list of roles. if 'delegated_role'
does not exist, 'None' is returned.
]
call[name[securesystemslib].formats.ROLELIST_SCHEMA.check_match, parameter[name[roles]]]
call[name[securesystemslib].formats.ROLENAME_SCHEMA.check_match, parameter[name[delegated_role]]]
variable[role_index] assign[=] constant[None]
for taget[name[index]] in starred[call[name[six].moves.xrange, parameter[call[name[len], parameter[name[roles]]]]]] begin[:]
variable[role] assign[=] call[name[roles]][name[index]]
variable[name] assign[=] call[name[role].get, parameter[constant[name]]]
if compare[name[name] is constant[None]] begin[:]
variable[no_name_message] assign[=] constant[Role with no name.]
<ast.Raise object at 0x7da204345cf0>
return[name[role_index]] | keyword[def] identifier[find_delegated_role] ( identifier[roles] , identifier[delegated_role] ):
literal[string]
identifier[securesystemslib] . identifier[formats] . identifier[ROLELIST_SCHEMA] . identifier[check_match] ( identifier[roles] )
identifier[securesystemslib] . identifier[formats] . identifier[ROLENAME_SCHEMA] . identifier[check_match] ( identifier[delegated_role] )
identifier[role_index] = keyword[None]
keyword[for] identifier[index] keyword[in] identifier[six] . identifier[moves] . identifier[xrange] ( identifier[len] ( identifier[roles] )):
identifier[role] = identifier[roles] [ identifier[index] ]
identifier[name] = identifier[role] . identifier[get] ( literal[string] )
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[no_name_message] = literal[string]
keyword[raise] identifier[securesystemslib] . identifier[exceptions] . identifier[RepositoryError] ( identifier[no_name_message] )
keyword[else] :
keyword[if] identifier[name] == identifier[delegated_role] :
keyword[if] identifier[role_index] keyword[is] keyword[None] :
identifier[role_index] = identifier[index]
keyword[else] :
identifier[duplicate_role_message] = literal[string] + identifier[str] ( identifier[delegated_role] )+ literal[string]
keyword[raise] identifier[securesystemslib] . identifier[exceptions] . identifier[RepositoryError] (
literal[string] + identifier[str] ( identifier[delegated_role] )+ literal[string] )
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] + identifier[repr] ( identifier[delegated_role] ))
keyword[return] identifier[role_index] | def find_delegated_role(roles, delegated_role):
"""
<Purpose>
Find the index, if any, of a role with a given name in a list of roles.
<Arguments>
roles:
The list of roles, each of which must have a 'name' attribute.
delegated_role:
The name of the role to be found in the list of roles.
<Exceptions>
securesystemslib.exceptions.RepositoryError, if the list of roles has
invalid data.
<Side Effects>
No known side effects.
<Returns>
The unique index, an interger, in the list of roles. if 'delegated_role'
does not exist, 'None' is returned.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named. Raise
# 'securesystemslib.exceptions.FormatError' if any are improperly formatted.
securesystemslib.formats.ROLELIST_SCHEMA.check_match(roles)
securesystemslib.formats.ROLENAME_SCHEMA.check_match(delegated_role)
# The index of a role, if any, with the same name.
role_index = None
for index in six.moves.xrange(len(roles)):
role = roles[index]
name = role.get('name')
# This role has no name.
if name is None:
no_name_message = 'Role with no name.'
raise securesystemslib.exceptions.RepositoryError(no_name_message) # depends on [control=['if'], data=[]]
# Does this role have the same name?
# This role has the same name, and...
elif name == delegated_role:
# ...it is the only known role with the same name.
if role_index is None:
role_index = index # depends on [control=['if'], data=['role_index']]
else:
# ...there are at least two roles with the same name.
duplicate_role_message = 'Duplicate role (' + str(delegated_role) + ').'
raise securesystemslib.exceptions.RepositoryError('Duplicate role (' + str(delegated_role) + ').') # depends on [control=['if'], data=['delegated_role']]
else:
# This role has a different name.
logger.debug('Skipping delegated role: ' + repr(delegated_role)) # depends on [control=['for'], data=['index']]
return role_index |
def decompose_space(H, A):
"""Simplifies OperatorTrace expressions over tensor-product spaces by
turning it into iterated partial traces.
Args:
H (ProductSpace): The full space.
A (Operator):
Returns:
Operator: Iterative partial trace expression
"""
return OperatorTrace.create(
OperatorTrace.create(A, over_space=H.operands[-1]),
over_space=ProductSpace.create(*H.operands[:-1])) | def function[decompose_space, parameter[H, A]]:
constant[Simplifies OperatorTrace expressions over tensor-product spaces by
turning it into iterated partial traces.
Args:
H (ProductSpace): The full space.
A (Operator):
Returns:
Operator: Iterative partial trace expression
]
return[call[name[OperatorTrace].create, parameter[call[name[OperatorTrace].create, parameter[name[A]]]]]] | keyword[def] identifier[decompose_space] ( identifier[H] , identifier[A] ):
literal[string]
keyword[return] identifier[OperatorTrace] . identifier[create] (
identifier[OperatorTrace] . identifier[create] ( identifier[A] , identifier[over_space] = identifier[H] . identifier[operands] [- literal[int] ]),
identifier[over_space] = identifier[ProductSpace] . identifier[create] (* identifier[H] . identifier[operands] [:- literal[int] ])) | def decompose_space(H, A):
"""Simplifies OperatorTrace expressions over tensor-product spaces by
turning it into iterated partial traces.
Args:
H (ProductSpace): The full space.
A (Operator):
Returns:
Operator: Iterative partial trace expression
"""
return OperatorTrace.create(OperatorTrace.create(A, over_space=H.operands[-1]), over_space=ProductSpace.create(*H.operands[:-1])) |
def set_amount(self, amount):
"""
Set transaction amount
"""
if amount:
try:
self.IsoMessage.FieldData(4, int(amount))
except ValueError:
self.IsoMessage.FieldData(4, 0)
self.rebuild() | def function[set_amount, parameter[self, amount]]:
constant[
Set transaction amount
]
if name[amount] begin[:]
<ast.Try object at 0x7da20c76ee60>
call[name[self].rebuild, parameter[]] | keyword[def] identifier[set_amount] ( identifier[self] , identifier[amount] ):
literal[string]
keyword[if] identifier[amount] :
keyword[try] :
identifier[self] . identifier[IsoMessage] . identifier[FieldData] ( literal[int] , identifier[int] ( identifier[amount] ))
keyword[except] identifier[ValueError] :
identifier[self] . identifier[IsoMessage] . identifier[FieldData] ( literal[int] , literal[int] )
identifier[self] . identifier[rebuild] () | def set_amount(self, amount):
"""
Set transaction amount
"""
if amount:
try:
self.IsoMessage.FieldData(4, int(amount)) # depends on [control=['try'], data=[]]
except ValueError:
self.IsoMessage.FieldData(4, 0) # depends on [control=['except'], data=[]]
self.rebuild() # depends on [control=['if'], data=[]] |
def PyplotHistogram():
"""
=============================================================
Demo of the histogram (hist) function with multiple data sets
=============================================================
Plot histogram with multiple sample sets and demonstrate:
* Use of legend with multiple sample sets
* Stacked bars
* Step curve with no fill
* Data sets of different sample sizes
Selecting different bin counts and sizes can significantly affect the
shape of a histogram. The Astropy docs have a great section on how to
select these parameters:
http://docs.astropy.org/en/stable/visualization/histogram.html
"""
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
n_bins = 10
x = np.random.randn(1000, 3)
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3 = axes.flatten()
colors = ['red', 'tan', 'lime']
ax0.hist(x, n_bins, normed=1, histtype='bar', color=colors, label=colors)
ax0.legend(prop={'size': 10})
ax0.set_title('bars with legend')
ax1.hist(x, n_bins, normed=1, histtype='bar', stacked=True)
ax1.set_title('stacked bar')
ax2.hist(x, n_bins, histtype='step', stacked=True, fill=False)
ax2.set_title('stack step (unfilled)')
# Make a multiple-histogram of data-sets with different length.
x_multi = [np.random.randn(n) for n in [10000, 5000, 2000]]
ax3.hist(x_multi, n_bins, histtype='bar')
ax3.set_title('different sample sizes')
fig.tight_layout()
return fig | def function[PyplotHistogram, parameter[]]:
constant[
=============================================================
Demo of the histogram (hist) function with multiple data sets
=============================================================
Plot histogram with multiple sample sets and demonstrate:
* Use of legend with multiple sample sets
* Stacked bars
* Step curve with no fill
* Data sets of different sample sizes
Selecting different bin counts and sizes can significantly affect the
shape of a histogram. The Astropy docs have a great section on how to
select these parameters:
http://docs.astropy.org/en/stable/visualization/histogram.html
]
import module[numpy] as alias[np]
import module[matplotlib.pyplot] as alias[plt]
call[name[np].random.seed, parameter[constant[0]]]
variable[n_bins] assign[=] constant[10]
variable[x] assign[=] call[name[np].random.randn, parameter[constant[1000], constant[3]]]
<ast.Tuple object at 0x7da1b1f1a500> assign[=] call[name[plt].subplots, parameter[]]
<ast.Tuple object at 0x7da1b1f1bb20> assign[=] call[name[axes].flatten, parameter[]]
variable[colors] assign[=] list[[<ast.Constant object at 0x7da1b1f1b5e0>, <ast.Constant object at 0x7da1b1f1a800>, <ast.Constant object at 0x7da1b1f18550>]]
call[name[ax0].hist, parameter[name[x], name[n_bins]]]
call[name[ax0].legend, parameter[]]
call[name[ax0].set_title, parameter[constant[bars with legend]]]
call[name[ax1].hist, parameter[name[x], name[n_bins]]]
call[name[ax1].set_title, parameter[constant[stacked bar]]]
call[name[ax2].hist, parameter[name[x], name[n_bins]]]
call[name[ax2].set_title, parameter[constant[stack step (unfilled)]]]
variable[x_multi] assign[=] <ast.ListComp object at 0x7da1b1f1bd30>
call[name[ax3].hist, parameter[name[x_multi], name[n_bins]]]
call[name[ax3].set_title, parameter[constant[different sample sizes]]]
call[name[fig].tight_layout, parameter[]]
return[name[fig]] | keyword[def] identifier[PyplotHistogram] ():
literal[string]
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt]
identifier[np] . identifier[random] . identifier[seed] ( literal[int] )
identifier[n_bins] = literal[int]
identifier[x] = identifier[np] . identifier[random] . identifier[randn] ( literal[int] , literal[int] )
identifier[fig] , identifier[axes] = identifier[plt] . identifier[subplots] ( identifier[nrows] = literal[int] , identifier[ncols] = literal[int] )
identifier[ax0] , identifier[ax1] , identifier[ax2] , identifier[ax3] = identifier[axes] . identifier[flatten] ()
identifier[colors] =[ literal[string] , literal[string] , literal[string] ]
identifier[ax0] . identifier[hist] ( identifier[x] , identifier[n_bins] , identifier[normed] = literal[int] , identifier[histtype] = literal[string] , identifier[color] = identifier[colors] , identifier[label] = identifier[colors] )
identifier[ax0] . identifier[legend] ( identifier[prop] ={ literal[string] : literal[int] })
identifier[ax0] . identifier[set_title] ( literal[string] )
identifier[ax1] . identifier[hist] ( identifier[x] , identifier[n_bins] , identifier[normed] = literal[int] , identifier[histtype] = literal[string] , identifier[stacked] = keyword[True] )
identifier[ax1] . identifier[set_title] ( literal[string] )
identifier[ax2] . identifier[hist] ( identifier[x] , identifier[n_bins] , identifier[histtype] = literal[string] , identifier[stacked] = keyword[True] , identifier[fill] = keyword[False] )
identifier[ax2] . identifier[set_title] ( literal[string] )
identifier[x_multi] =[ identifier[np] . identifier[random] . identifier[randn] ( identifier[n] ) keyword[for] identifier[n] keyword[in] [ literal[int] , literal[int] , literal[int] ]]
identifier[ax3] . identifier[hist] ( identifier[x_multi] , identifier[n_bins] , identifier[histtype] = literal[string] )
identifier[ax3] . identifier[set_title] ( literal[string] )
identifier[fig] . identifier[tight_layout] ()
keyword[return] identifier[fig] | def PyplotHistogram():
"""
=============================================================
Demo of the histogram (hist) function with multiple data sets
=============================================================
Plot histogram with multiple sample sets and demonstrate:
* Use of legend with multiple sample sets
* Stacked bars
* Step curve with no fill
* Data sets of different sample sizes
Selecting different bin counts and sizes can significantly affect the
shape of a histogram. The Astropy docs have a great section on how to
select these parameters:
http://docs.astropy.org/en/stable/visualization/histogram.html
"""
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
n_bins = 10
x = np.random.randn(1000, 3)
(fig, axes) = plt.subplots(nrows=2, ncols=2)
(ax0, ax1, ax2, ax3) = axes.flatten()
colors = ['red', 'tan', 'lime']
ax0.hist(x, n_bins, normed=1, histtype='bar', color=colors, label=colors)
ax0.legend(prop={'size': 10})
ax0.set_title('bars with legend')
ax1.hist(x, n_bins, normed=1, histtype='bar', stacked=True)
ax1.set_title('stacked bar')
ax2.hist(x, n_bins, histtype='step', stacked=True, fill=False)
ax2.set_title('stack step (unfilled)')
# Make a multiple-histogram of data-sets with different length.
x_multi = [np.random.randn(n) for n in [10000, 5000, 2000]]
ax3.hist(x_multi, n_bins, histtype='bar')
ax3.set_title('different sample sizes')
fig.tight_layout()
return fig |
def split_path(path):
"""
@see: L{join_path}
@type path: str
@param path: Absolute or relative path.
@rtype: list( str... )
@return: List of path components.
"""
components = list()
while path:
next = win32.PathFindNextComponent(path)
if next:
prev = path[ : -len(next) ]
components.append(prev)
path = next
return components | def function[split_path, parameter[path]]:
constant[
@see: L{join_path}
@type path: str
@param path: Absolute or relative path.
@rtype: list( str... )
@return: List of path components.
]
variable[components] assign[=] call[name[list], parameter[]]
while name[path] begin[:]
variable[next] assign[=] call[name[win32].PathFindNextComponent, parameter[name[path]]]
if name[next] begin[:]
variable[prev] assign[=] call[name[path]][<ast.Slice object at 0x7da1b0778a00>]
call[name[components].append, parameter[name[prev]]]
variable[path] assign[=] name[next]
return[name[components]] | keyword[def] identifier[split_path] ( identifier[path] ):
literal[string]
identifier[components] = identifier[list] ()
keyword[while] identifier[path] :
identifier[next] = identifier[win32] . identifier[PathFindNextComponent] ( identifier[path] )
keyword[if] identifier[next] :
identifier[prev] = identifier[path] [:- identifier[len] ( identifier[next] )]
identifier[components] . identifier[append] ( identifier[prev] )
identifier[path] = identifier[next]
keyword[return] identifier[components] | def split_path(path):
"""
@see: L{join_path}
@type path: str
@param path: Absolute or relative path.
@rtype: list( str... )
@return: List of path components.
"""
components = list()
while path:
next = win32.PathFindNextComponent(path)
if next:
prev = path[:-len(next)]
components.append(prev) # depends on [control=['if'], data=[]]
path = next # depends on [control=['while'], data=[]]
return components |
def get_jobs_url(self, job_id):
# type: (Text) -> Text
"""
Returns the URL to check job status.
:param job_id:
The ID of the job to check.
"""
return compat.urllib_parse.urlunsplit((
self.uri.scheme,
self.uri.netloc,
self.uri.path.rstrip('/') + '/jobs/' + job_id,
self.uri.query,
self.uri.fragment,
)) | def function[get_jobs_url, parameter[self, job_id]]:
constant[
Returns the URL to check job status.
:param job_id:
The ID of the job to check.
]
return[call[name[compat].urllib_parse.urlunsplit, parameter[tuple[[<ast.Attribute object at 0x7da20c6e6e00>, <ast.Attribute object at 0x7da20c6e79a0>, <ast.BinOp object at 0x7da20c6e64d0>, <ast.Attribute object at 0x7da20c6e40a0>, <ast.Attribute object at 0x7da20c6e7b20>]]]]] | keyword[def] identifier[get_jobs_url] ( identifier[self] , identifier[job_id] ):
literal[string]
keyword[return] identifier[compat] . identifier[urllib_parse] . identifier[urlunsplit] ((
identifier[self] . identifier[uri] . identifier[scheme] ,
identifier[self] . identifier[uri] . identifier[netloc] ,
identifier[self] . identifier[uri] . identifier[path] . identifier[rstrip] ( literal[string] )+ literal[string] + identifier[job_id] ,
identifier[self] . identifier[uri] . identifier[query] ,
identifier[self] . identifier[uri] . identifier[fragment] ,
)) | def get_jobs_url(self, job_id):
# type: (Text) -> Text
'\n Returns the URL to check job status.\n\n :param job_id:\n The ID of the job to check.\n '
return compat.urllib_parse.urlunsplit((self.uri.scheme, self.uri.netloc, self.uri.path.rstrip('/') + '/jobs/' + job_id, self.uri.query, self.uri.fragment)) |
def UpdateClass(self, class_name, gtfs_class):
"""Updates an entry in the list of known classes.
Args:
class_name: A string with the class name that is to be updated.
gtfs_class: The new class
Raises:
NonexistentMapping if there is no class with the specified class_name.
"""
if class_name not in self._class_mapping:
raise problems.NonexistentMapping(class_name)
self._class_mapping[class_name] = gtfs_class | def function[UpdateClass, parameter[self, class_name, gtfs_class]]:
constant[Updates an entry in the list of known classes.
Args:
class_name: A string with the class name that is to be updated.
gtfs_class: The new class
Raises:
NonexistentMapping if there is no class with the specified class_name.
]
if compare[name[class_name] <ast.NotIn object at 0x7da2590d7190> name[self]._class_mapping] begin[:]
<ast.Raise object at 0x7da18bccabf0>
call[name[self]._class_mapping][name[class_name]] assign[=] name[gtfs_class] | keyword[def] identifier[UpdateClass] ( identifier[self] , identifier[class_name] , identifier[gtfs_class] ):
literal[string]
keyword[if] identifier[class_name] keyword[not] keyword[in] identifier[self] . identifier[_class_mapping] :
keyword[raise] identifier[problems] . identifier[NonexistentMapping] ( identifier[class_name] )
identifier[self] . identifier[_class_mapping] [ identifier[class_name] ]= identifier[gtfs_class] | def UpdateClass(self, class_name, gtfs_class):
"""Updates an entry in the list of known classes.
Args:
class_name: A string with the class name that is to be updated.
gtfs_class: The new class
Raises:
NonexistentMapping if there is no class with the specified class_name.
"""
if class_name not in self._class_mapping:
raise problems.NonexistentMapping(class_name) # depends on [control=['if'], data=['class_name']]
self._class_mapping[class_name] = gtfs_class |
def _get_export_mgr(self):
"""
Returns:
(DiskExportManager): Handler for each disk
"""
return (
DiskExportManager.get_instance_by_type(
dst=self._dst,
disk=disk,
do_compress=self._compress,
*self._args,
**self._kwargs
) for disk in self._collect()
) | def function[_get_export_mgr, parameter[self]]:
constant[
Returns:
(DiskExportManager): Handler for each disk
]
return[<ast.GeneratorExp object at 0x7da1b2344e50>] | keyword[def] identifier[_get_export_mgr] ( identifier[self] ):
literal[string]
keyword[return] (
identifier[DiskExportManager] . identifier[get_instance_by_type] (
identifier[dst] = identifier[self] . identifier[_dst] ,
identifier[disk] = identifier[disk] ,
identifier[do_compress] = identifier[self] . identifier[_compress] ,
* identifier[self] . identifier[_args] ,
** identifier[self] . identifier[_kwargs]
) keyword[for] identifier[disk] keyword[in] identifier[self] . identifier[_collect] ()
) | def _get_export_mgr(self):
"""
Returns:
(DiskExportManager): Handler for each disk
"""
return (DiskExportManager.get_instance_by_type(*self._args, dst=self._dst, disk=disk, do_compress=self._compress, **self._kwargs) for disk in self._collect()) |
def get_changelog_date_packager(self):
"""Returns part of the changelog entry, containing date and packager.
"""
try:
packager = subprocess.Popen(
'rpmdev-packager', stdout=subprocess.PIPE).communicate(
)[0].strip()
except OSError:
# Hi John Doe, you should install rpmdevtools
packager = "John Doe <john@doe.com>"
logger.warn("Package rpmdevtools is missing, using default "
"name: {0}.".format(packager))
with utils.c_time_locale():
date_str = time.strftime('%a %b %d %Y', time.gmtime())
encoding = locale.getpreferredencoding()
return u'{0} {1}'.format(date_str, packager.decode(encoding)) | def function[get_changelog_date_packager, parameter[self]]:
constant[Returns part of the changelog entry, containing date and packager.
]
<ast.Try object at 0x7da1b1bf8280>
with call[name[utils].c_time_locale, parameter[]] begin[:]
variable[date_str] assign[=] call[name[time].strftime, parameter[constant[%a %b %d %Y], call[name[time].gmtime, parameter[]]]]
variable[encoding] assign[=] call[name[locale].getpreferredencoding, parameter[]]
return[call[constant[{0} {1}].format, parameter[name[date_str], call[name[packager].decode, parameter[name[encoding]]]]]] | keyword[def] identifier[get_changelog_date_packager] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[packager] = identifier[subprocess] . identifier[Popen] (
literal[string] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] ). identifier[communicate] (
)[ literal[int] ]. identifier[strip] ()
keyword[except] identifier[OSError] :
identifier[packager] = literal[string]
identifier[logger] . identifier[warn] ( literal[string]
literal[string] . identifier[format] ( identifier[packager] ))
keyword[with] identifier[utils] . identifier[c_time_locale] ():
identifier[date_str] = identifier[time] . identifier[strftime] ( literal[string] , identifier[time] . identifier[gmtime] ())
identifier[encoding] = identifier[locale] . identifier[getpreferredencoding] ()
keyword[return] literal[string] . identifier[format] ( identifier[date_str] , identifier[packager] . identifier[decode] ( identifier[encoding] )) | def get_changelog_date_packager(self):
"""Returns part of the changelog entry, containing date and packager.
"""
try:
packager = subprocess.Popen('rpmdev-packager', stdout=subprocess.PIPE).communicate()[0].strip() # depends on [control=['try'], data=[]]
except OSError:
# Hi John Doe, you should install rpmdevtools
packager = 'John Doe <john@doe.com>'
logger.warn('Package rpmdevtools is missing, using default name: {0}.'.format(packager)) # depends on [control=['except'], data=[]]
with utils.c_time_locale():
date_str = time.strftime('%a %b %d %Y', time.gmtime()) # depends on [control=['with'], data=[]]
encoding = locale.getpreferredencoding()
return u'{0} {1}'.format(date_str, packager.decode(encoding)) |
def posify_mask_indexer(indexer):
"""Convert masked values (-1) in an indexer to nearest unmasked values.
This routine is useful for dask, where it can be much faster to index
adjacent points than arbitrary points from the end of an array.
Parameters
----------
indexer : ExplicitIndexer
Input indexer.
Returns
-------
ExplicitIndexer
Same type of input, with all values in ndarray keys equal to -1
replaced by an adjacent non-masked element.
"""
key = tuple(_posify_mask_subindexer(k.ravel()).reshape(k.shape)
if isinstance(k, np.ndarray) else k
for k in indexer.tuple)
return type(indexer)(key) | def function[posify_mask_indexer, parameter[indexer]]:
constant[Convert masked values (-1) in an indexer to nearest unmasked values.
This routine is useful for dask, where it can be much faster to index
adjacent points than arbitrary points from the end of an array.
Parameters
----------
indexer : ExplicitIndexer
Input indexer.
Returns
-------
ExplicitIndexer
Same type of input, with all values in ndarray keys equal to -1
replaced by an adjacent non-masked element.
]
variable[key] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da2041dbee0>]]
return[call[call[name[type], parameter[name[indexer]]], parameter[name[key]]]] | keyword[def] identifier[posify_mask_indexer] ( identifier[indexer] ):
literal[string]
identifier[key] = identifier[tuple] ( identifier[_posify_mask_subindexer] ( identifier[k] . identifier[ravel] ()). identifier[reshape] ( identifier[k] . identifier[shape] )
keyword[if] identifier[isinstance] ( identifier[k] , identifier[np] . identifier[ndarray] ) keyword[else] identifier[k]
keyword[for] identifier[k] keyword[in] identifier[indexer] . identifier[tuple] )
keyword[return] identifier[type] ( identifier[indexer] )( identifier[key] ) | def posify_mask_indexer(indexer):
"""Convert masked values (-1) in an indexer to nearest unmasked values.
This routine is useful for dask, where it can be much faster to index
adjacent points than arbitrary points from the end of an array.
Parameters
----------
indexer : ExplicitIndexer
Input indexer.
Returns
-------
ExplicitIndexer
Same type of input, with all values in ndarray keys equal to -1
replaced by an adjacent non-masked element.
"""
key = tuple((_posify_mask_subindexer(k.ravel()).reshape(k.shape) if isinstance(k, np.ndarray) else k for k in indexer.tuple))
return type(indexer)(key) |
def _get_kriging_matrix(self, n, n_withdrifts):
"""Assembles the kriging matrix."""
xy = np.concatenate((self.X_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis]), axis=1)
d = cdist(xy, xy, 'euclidean')
if self.UNBIAS:
a = np.zeros((n_withdrifts+1, n_withdrifts+1))
else:
a = np.zeros((n_withdrifts, n_withdrifts))
a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.)
i = n
if self.regional_linear_drift:
a[:n, i] = self.X_ADJUSTED
a[i, :n] = self.X_ADJUSTED
i += 1
a[:n, i] = self.Y_ADJUSTED
a[i, :n] = self.Y_ADJUSTED
i += 1
if self.point_log_drift:
for well_no in range(self.point_log_array.shape[0]):
log_dist = np.log(np.sqrt((self.X_ADJUSTED - self.point_log_array[well_no, 0])**2 +
(self.Y_ADJUSTED - self.point_log_array[well_no, 1])**2))
if np.any(np.isinf(log_dist)):
log_dist[np.isinf(log_dist)] = -100.0
a[:n, i] = - self.point_log_array[well_no, 2] * log_dist
a[i, :n] = - self.point_log_array[well_no, 2] * log_dist
i += 1
if self.external_Z_drift:
a[:n, i] = self.z_scalars
a[i, :n] = self.z_scalars
i += 1
if self.specified_drift:
for arr in self.specified_drift_data_arrays:
a[:n, i] = arr
a[i, :n] = arr
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
a[:n, i] = func(self.X_ADJUSTED, self.Y_ADJUSTED)
a[i, :n] = func(self.X_ADJUSTED, self.Y_ADJUSTED)
i += 1
if i != n_withdrifts:
warnings.warn("Error in creating kriging matrix. Kriging may fail.",
RuntimeWarning)
if self.UNBIAS:
a[n_withdrifts, :n] = 1.0
a[:n, n_withdrifts] = 1.0
a[n:n_withdrifts + 1, n:n_withdrifts + 1] = 0.0
return a | def function[_get_kriging_matrix, parameter[self, n, n_withdrifts]]:
constant[Assembles the kriging matrix.]
variable[xy] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Subscript object at 0x7da20c76fee0>, <ast.Subscript object at 0x7da20c76d030>]]]]
variable[d] assign[=] call[name[cdist], parameter[name[xy], name[xy], constant[euclidean]]]
if name[self].UNBIAS begin[:]
variable[a] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da20c76c8e0>, <ast.BinOp object at 0x7da20c76dde0>]]]]
call[name[a]][tuple[[<ast.Slice object at 0x7da20c76e740>, <ast.Slice object at 0x7da20c76d7b0>]]] assign[=] <ast.UnaryOp object at 0x7da20c76e290>
call[name[np].fill_diagonal, parameter[name[a], constant[0.0]]]
variable[i] assign[=] name[n]
if name[self].regional_linear_drift begin[:]
call[name[a]][tuple[[<ast.Slice object at 0x7da20c76f460>, <ast.Name object at 0x7da20c76de40>]]] assign[=] name[self].X_ADJUSTED
call[name[a]][tuple[[<ast.Name object at 0x7da20c76f280>, <ast.Slice object at 0x7da20c76ff40>]]] assign[=] name[self].X_ADJUSTED
<ast.AugAssign object at 0x7da20c76f370>
call[name[a]][tuple[[<ast.Slice object at 0x7da20c76c610>, <ast.Name object at 0x7da20c76c6a0>]]] assign[=] name[self].Y_ADJUSTED
call[name[a]][tuple[[<ast.Name object at 0x7da20c76e590>, <ast.Slice object at 0x7da20c76e830>]]] assign[=] name[self].Y_ADJUSTED
<ast.AugAssign object at 0x7da20c76c5e0>
if name[self].point_log_drift begin[:]
for taget[name[well_no]] in starred[call[name[range], parameter[call[name[self].point_log_array.shape][constant[0]]]]] begin[:]
variable[log_dist] assign[=] call[name[np].log, parameter[call[name[np].sqrt, parameter[binary_operation[binary_operation[binary_operation[name[self].X_ADJUSTED - call[name[self].point_log_array][tuple[[<ast.Name object at 0x7da204621d20>, <ast.Constant object at 0x7da204620430>]]]] ** constant[2]] + binary_operation[binary_operation[name[self].Y_ADJUSTED - call[name[self].point_log_array][tuple[[<ast.Name object at 0x7da20c76f250>, <ast.Constant object at 0x7da20c76d090>]]]] ** constant[2]]]]]]]
if call[name[np].any, parameter[call[name[np].isinf, parameter[name[log_dist]]]]] begin[:]
call[name[log_dist]][call[name[np].isinf, parameter[name[log_dist]]]] assign[=] <ast.UnaryOp object at 0x7da20c76d3c0>
call[name[a]][tuple[[<ast.Slice object at 0x7da20c76ce50>, <ast.Name object at 0x7da20c76ccd0>]]] assign[=] binary_operation[<ast.UnaryOp object at 0x7da20c76f850> * name[log_dist]]
call[name[a]][tuple[[<ast.Name object at 0x7da20c76cac0>, <ast.Slice object at 0x7da20c76df90>]]] assign[=] binary_operation[<ast.UnaryOp object at 0x7da20c76dd50> * name[log_dist]]
<ast.AugAssign object at 0x7da20c76f820>
if name[self].external_Z_drift begin[:]
call[name[a]][tuple[[<ast.Slice object at 0x7da20c76e6b0>, <ast.Name object at 0x7da20c76ee30>]]] assign[=] name[self].z_scalars
call[name[a]][tuple[[<ast.Name object at 0x7da20c76cfa0>, <ast.Slice object at 0x7da20c76d180>]]] assign[=] name[self].z_scalars
<ast.AugAssign object at 0x7da20c76da50>
if name[self].specified_drift begin[:]
for taget[name[arr]] in starred[name[self].specified_drift_data_arrays] begin[:]
call[name[a]][tuple[[<ast.Slice object at 0x7da20c76d9f0>, <ast.Name object at 0x7da20c76e4a0>]]] assign[=] name[arr]
call[name[a]][tuple[[<ast.Name object at 0x7da20c76e1a0>, <ast.Slice object at 0x7da20c76f220>]]] assign[=] name[arr]
<ast.AugAssign object at 0x7da20c76f9d0>
if name[self].functional_drift begin[:]
for taget[name[func]] in starred[name[self].functional_drift_terms] begin[:]
call[name[a]][tuple[[<ast.Slice object at 0x7da20c76e7a0>, <ast.Name object at 0x7da20c76dc30>]]] assign[=] call[name[func], parameter[name[self].X_ADJUSTED, name[self].Y_ADJUSTED]]
call[name[a]][tuple[[<ast.Name object at 0x7da20c76fb20>, <ast.Slice object at 0x7da20c76ddb0>]]] assign[=] call[name[func], parameter[name[self].X_ADJUSTED, name[self].Y_ADJUSTED]]
<ast.AugAssign object at 0x7da20c76c520>
if compare[name[i] not_equal[!=] name[n_withdrifts]] begin[:]
call[name[warnings].warn, parameter[constant[Error in creating kriging matrix. Kriging may fail.], name[RuntimeWarning]]]
if name[self].UNBIAS begin[:]
call[name[a]][tuple[[<ast.Name object at 0x7da20c76f670>, <ast.Slice object at 0x7da20c76de70>]]] assign[=] constant[1.0]
call[name[a]][tuple[[<ast.Slice object at 0x7da20c76f3d0>, <ast.Name object at 0x7da20c76ef50>]]] assign[=] constant[1.0]
call[name[a]][tuple[[<ast.Slice object at 0x7da20c76fb80>, <ast.Slice object at 0x7da20c76de10>]]] assign[=] constant[0.0]
return[name[a]] | keyword[def] identifier[_get_kriging_matrix] ( identifier[self] , identifier[n] , identifier[n_withdrifts] ):
literal[string]
identifier[xy] = identifier[np] . identifier[concatenate] (( identifier[self] . identifier[X_ADJUSTED] [:, identifier[np] . identifier[newaxis] ],
identifier[self] . identifier[Y_ADJUSTED] [:, identifier[np] . identifier[newaxis] ]), identifier[axis] = literal[int] )
identifier[d] = identifier[cdist] ( identifier[xy] , identifier[xy] , literal[string] )
keyword[if] identifier[self] . identifier[UNBIAS] :
identifier[a] = identifier[np] . identifier[zeros] (( identifier[n_withdrifts] + literal[int] , identifier[n_withdrifts] + literal[int] ))
keyword[else] :
identifier[a] = identifier[np] . identifier[zeros] (( identifier[n_withdrifts] , identifier[n_withdrifts] ))
identifier[a] [: identifier[n] ,: identifier[n] ]=- identifier[self] . identifier[variogram_function] ( identifier[self] . identifier[variogram_model_parameters] , identifier[d] )
identifier[np] . identifier[fill_diagonal] ( identifier[a] , literal[int] )
identifier[i] = identifier[n]
keyword[if] identifier[self] . identifier[regional_linear_drift] :
identifier[a] [: identifier[n] , identifier[i] ]= identifier[self] . identifier[X_ADJUSTED]
identifier[a] [ identifier[i] ,: identifier[n] ]= identifier[self] . identifier[X_ADJUSTED]
identifier[i] += literal[int]
identifier[a] [: identifier[n] , identifier[i] ]= identifier[self] . identifier[Y_ADJUSTED]
identifier[a] [ identifier[i] ,: identifier[n] ]= identifier[self] . identifier[Y_ADJUSTED]
identifier[i] += literal[int]
keyword[if] identifier[self] . identifier[point_log_drift] :
keyword[for] identifier[well_no] keyword[in] identifier[range] ( identifier[self] . identifier[point_log_array] . identifier[shape] [ literal[int] ]):
identifier[log_dist] = identifier[np] . identifier[log] ( identifier[np] . identifier[sqrt] (( identifier[self] . identifier[X_ADJUSTED] - identifier[self] . identifier[point_log_array] [ identifier[well_no] , literal[int] ])** literal[int] +
( identifier[self] . identifier[Y_ADJUSTED] - identifier[self] . identifier[point_log_array] [ identifier[well_no] , literal[int] ])** literal[int] ))
keyword[if] identifier[np] . identifier[any] ( identifier[np] . identifier[isinf] ( identifier[log_dist] )):
identifier[log_dist] [ identifier[np] . identifier[isinf] ( identifier[log_dist] )]=- literal[int]
identifier[a] [: identifier[n] , identifier[i] ]=- identifier[self] . identifier[point_log_array] [ identifier[well_no] , literal[int] ]* identifier[log_dist]
identifier[a] [ identifier[i] ,: identifier[n] ]=- identifier[self] . identifier[point_log_array] [ identifier[well_no] , literal[int] ]* identifier[log_dist]
identifier[i] += literal[int]
keyword[if] identifier[self] . identifier[external_Z_drift] :
identifier[a] [: identifier[n] , identifier[i] ]= identifier[self] . identifier[z_scalars]
identifier[a] [ identifier[i] ,: identifier[n] ]= identifier[self] . identifier[z_scalars]
identifier[i] += literal[int]
keyword[if] identifier[self] . identifier[specified_drift] :
keyword[for] identifier[arr] keyword[in] identifier[self] . identifier[specified_drift_data_arrays] :
identifier[a] [: identifier[n] , identifier[i] ]= identifier[arr]
identifier[a] [ identifier[i] ,: identifier[n] ]= identifier[arr]
identifier[i] += literal[int]
keyword[if] identifier[self] . identifier[functional_drift] :
keyword[for] identifier[func] keyword[in] identifier[self] . identifier[functional_drift_terms] :
identifier[a] [: identifier[n] , identifier[i] ]= identifier[func] ( identifier[self] . identifier[X_ADJUSTED] , identifier[self] . identifier[Y_ADJUSTED] )
identifier[a] [ identifier[i] ,: identifier[n] ]= identifier[func] ( identifier[self] . identifier[X_ADJUSTED] , identifier[self] . identifier[Y_ADJUSTED] )
identifier[i] += literal[int]
keyword[if] identifier[i] != identifier[n_withdrifts] :
identifier[warnings] . identifier[warn] ( literal[string] ,
identifier[RuntimeWarning] )
keyword[if] identifier[self] . identifier[UNBIAS] :
identifier[a] [ identifier[n_withdrifts] ,: identifier[n] ]= literal[int]
identifier[a] [: identifier[n] , identifier[n_withdrifts] ]= literal[int]
identifier[a] [ identifier[n] : identifier[n_withdrifts] + literal[int] , identifier[n] : identifier[n_withdrifts] + literal[int] ]= literal[int]
keyword[return] identifier[a] | def _get_kriging_matrix(self, n, n_withdrifts):
"""Assembles the kriging matrix."""
xy = np.concatenate((self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1)
d = cdist(xy, xy, 'euclidean')
if self.UNBIAS:
a = np.zeros((n_withdrifts + 1, n_withdrifts + 1)) # depends on [control=['if'], data=[]]
else:
a = np.zeros((n_withdrifts, n_withdrifts))
a[:n, :n] = -self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
i = n
if self.regional_linear_drift:
a[:n, i] = self.X_ADJUSTED
a[i, :n] = self.X_ADJUSTED
i += 1
a[:n, i] = self.Y_ADJUSTED
a[i, :n] = self.Y_ADJUSTED
i += 1 # depends on [control=['if'], data=[]]
if self.point_log_drift:
for well_no in range(self.point_log_array.shape[0]):
log_dist = np.log(np.sqrt((self.X_ADJUSTED - self.point_log_array[well_no, 0]) ** 2 + (self.Y_ADJUSTED - self.point_log_array[well_no, 1]) ** 2))
if np.any(np.isinf(log_dist)):
log_dist[np.isinf(log_dist)] = -100.0 # depends on [control=['if'], data=[]]
a[:n, i] = -self.point_log_array[well_no, 2] * log_dist
a[i, :n] = -self.point_log_array[well_no, 2] * log_dist
i += 1 # depends on [control=['for'], data=['well_no']] # depends on [control=['if'], data=[]]
if self.external_Z_drift:
a[:n, i] = self.z_scalars
a[i, :n] = self.z_scalars
i += 1 # depends on [control=['if'], data=[]]
if self.specified_drift:
for arr in self.specified_drift_data_arrays:
a[:n, i] = arr
a[i, :n] = arr
i += 1 # depends on [control=['for'], data=['arr']] # depends on [control=['if'], data=[]]
if self.functional_drift:
for func in self.functional_drift_terms:
a[:n, i] = func(self.X_ADJUSTED, self.Y_ADJUSTED)
a[i, :n] = func(self.X_ADJUSTED, self.Y_ADJUSTED)
i += 1 # depends on [control=['for'], data=['func']] # depends on [control=['if'], data=[]]
if i != n_withdrifts:
warnings.warn('Error in creating kriging matrix. Kriging may fail.', RuntimeWarning) # depends on [control=['if'], data=[]]
if self.UNBIAS:
a[n_withdrifts, :n] = 1.0
a[:n, n_withdrifts] = 1.0
a[n:n_withdrifts + 1, n:n_withdrifts + 1] = 0.0 # depends on [control=['if'], data=[]]
return a |
def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath(
'//x:script[@type="math/mml"]',
namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math)
return xml | def function[_unescape_math, parameter[xml]]:
constant[Unescapes Math from Mathjax to MathML.]
variable[xpath_math_script] assign[=] call[name[etree].XPath, parameter[constant[//x:script[@type="math/mml"]]]]
variable[math_script_list] assign[=] call[name[xpath_math_script], parameter[name[xml]]]
for taget[name[mathscript]] in starred[name[math_script_list]] begin[:]
variable[math] assign[=] name[mathscript].text
variable[math] assign[=] call[name[unescape], parameter[call[name[unescape], parameter[name[math]]]]]
call[name[mathscript].clear, parameter[]]
call[name[mathscript].set, parameter[constant[type], constant[math/mml]]]
variable[new_math] assign[=] call[name[etree].fromstring, parameter[name[math]]]
call[name[mathscript].append, parameter[name[new_math]]]
return[name[xml]] | keyword[def] identifier[_unescape_math] ( identifier[xml] ):
literal[string]
identifier[xpath_math_script] = identifier[etree] . identifier[XPath] (
literal[string] ,
identifier[namespaces] ={ literal[string] : literal[string] })
identifier[math_script_list] = identifier[xpath_math_script] ( identifier[xml] )
keyword[for] identifier[mathscript] keyword[in] identifier[math_script_list] :
identifier[math] = identifier[mathscript] . identifier[text]
identifier[math] = identifier[unescape] ( identifier[unescape] ( identifier[math] ))
identifier[mathscript] . identifier[clear] ()
identifier[mathscript] . identifier[set] ( literal[string] , literal[string] )
identifier[new_math] = identifier[etree] . identifier[fromstring] ( identifier[math] )
identifier[mathscript] . identifier[append] ( identifier[new_math] )
keyword[return] identifier[xml] | def _unescape_math(xml):
"""Unescapes Math from Mathjax to MathML."""
xpath_math_script = etree.XPath('//x:script[@type="math/mml"]', namespaces={'x': 'http://www.w3.org/1999/xhtml'})
math_script_list = xpath_math_script(xml)
for mathscript in math_script_list:
math = mathscript.text
# some browsers double escape like e.g. Firefox
math = unescape(unescape(math))
mathscript.clear()
mathscript.set('type', 'math/mml')
new_math = etree.fromstring(math)
mathscript.append(new_math) # depends on [control=['for'], data=['mathscript']]
return xml |
def delete_index(self, display_number):
"""Delete display expression *display_number*"""
old_size = len(self.list)
self.list = [disp for disp in self.list
if display_number != disp.number]
return old_size != len(self.list) | def function[delete_index, parameter[self, display_number]]:
constant[Delete display expression *display_number*]
variable[old_size] assign[=] call[name[len], parameter[name[self].list]]
name[self].list assign[=] <ast.ListComp object at 0x7da1b05beb00>
return[compare[name[old_size] not_equal[!=] call[name[len], parameter[name[self].list]]]] | keyword[def] identifier[delete_index] ( identifier[self] , identifier[display_number] ):
literal[string]
identifier[old_size] = identifier[len] ( identifier[self] . identifier[list] )
identifier[self] . identifier[list] =[ identifier[disp] keyword[for] identifier[disp] keyword[in] identifier[self] . identifier[list]
keyword[if] identifier[display_number] != identifier[disp] . identifier[number] ]
keyword[return] identifier[old_size] != identifier[len] ( identifier[self] . identifier[list] ) | def delete_index(self, display_number):
"""Delete display expression *display_number*"""
old_size = len(self.list)
self.list = [disp for disp in self.list if display_number != disp.number]
return old_size != len(self.list) |
def _try_decode_utf8_content(self, content, content_type):
"""Generic function to decode content.
:param object content:
:return:
"""
if not self._auto_decode or not content:
return content
if content_type in self._decode_cache:
return self._decode_cache[content_type]
if isinstance(content, dict):
content = self._try_decode_dict(content)
else:
content = try_utf8_decode(content)
self._decode_cache[content_type] = content
return content | def function[_try_decode_utf8_content, parameter[self, content, content_type]]:
constant[Generic function to decode content.
:param object content:
:return:
]
if <ast.BoolOp object at 0x7da18f720ac0> begin[:]
return[name[content]]
if compare[name[content_type] in name[self]._decode_cache] begin[:]
return[call[name[self]._decode_cache][name[content_type]]]
if call[name[isinstance], parameter[name[content], name[dict]]] begin[:]
variable[content] assign[=] call[name[self]._try_decode_dict, parameter[name[content]]]
call[name[self]._decode_cache][name[content_type]] assign[=] name[content]
return[name[content]] | keyword[def] identifier[_try_decode_utf8_content] ( identifier[self] , identifier[content] , identifier[content_type] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_auto_decode] keyword[or] keyword[not] identifier[content] :
keyword[return] identifier[content]
keyword[if] identifier[content_type] keyword[in] identifier[self] . identifier[_decode_cache] :
keyword[return] identifier[self] . identifier[_decode_cache] [ identifier[content_type] ]
keyword[if] identifier[isinstance] ( identifier[content] , identifier[dict] ):
identifier[content] = identifier[self] . identifier[_try_decode_dict] ( identifier[content] )
keyword[else] :
identifier[content] = identifier[try_utf8_decode] ( identifier[content] )
identifier[self] . identifier[_decode_cache] [ identifier[content_type] ]= identifier[content]
keyword[return] identifier[content] | def _try_decode_utf8_content(self, content, content_type):
"""Generic function to decode content.
:param object content:
:return:
"""
if not self._auto_decode or not content:
return content # depends on [control=['if'], data=[]]
if content_type in self._decode_cache:
return self._decode_cache[content_type] # depends on [control=['if'], data=['content_type']]
if isinstance(content, dict):
content = self._try_decode_dict(content) # depends on [control=['if'], data=[]]
else:
content = try_utf8_decode(content)
self._decode_cache[content_type] = content
return content |
def drop_edge_punct(word: str) -> str:
"""
Remove edge punctuation.
:param word: a single string
>>> drop_edge_punct("'fieri")
'fieri'
>>> drop_edge_punct('sedes.')
'sedes'
"""
if not word:
return word
try:
if not word[0].isalpha():
word = word[1:]
if not word[-1].isalpha():
word = word[:-1]
except:
pass
return word | def function[drop_edge_punct, parameter[word]]:
constant[
Remove edge punctuation.
:param word: a single string
>>> drop_edge_punct("'fieri")
'fieri'
>>> drop_edge_punct('sedes.')
'sedes'
]
if <ast.UnaryOp object at 0x7da20c795c00> begin[:]
return[name[word]]
<ast.Try object at 0x7da20c796440>
return[name[word]] | keyword[def] identifier[drop_edge_punct] ( identifier[word] : identifier[str] )-> identifier[str] :
literal[string]
keyword[if] keyword[not] identifier[word] :
keyword[return] identifier[word]
keyword[try] :
keyword[if] keyword[not] identifier[word] [ literal[int] ]. identifier[isalpha] ():
identifier[word] = identifier[word] [ literal[int] :]
keyword[if] keyword[not] identifier[word] [- literal[int] ]. identifier[isalpha] ():
identifier[word] = identifier[word] [:- literal[int] ]
keyword[except] :
keyword[pass]
keyword[return] identifier[word] | def drop_edge_punct(word: str) -> str:
"""
Remove edge punctuation.
:param word: a single string
>>> drop_edge_punct("'fieri")
'fieri'
>>> drop_edge_punct('sedes.')
'sedes'
"""
if not word:
return word # depends on [control=['if'], data=[]]
try:
if not word[0].isalpha():
word = word[1:] # depends on [control=['if'], data=[]]
if not word[-1].isalpha():
word = word[:-1] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
return word |
def _parse(self):
"""Parses the input file's content and generates a list of its elements/docstrings.
:returns: the list of elements
"""
#TODO manage decorators
#TODO manage default params with strings escaping chars as (, ), ', ', #, ...
#TODO manage elements ending with comments like: def func(param): # blabla
elem_list = []
reading_element = None
reading_docs = None
waiting_docs = False
elem = ''
raw = ''
start = 0
end = 0
try:
if self.input_file == '-':
fd = sys.stdin
else:
fd = open(self.input_file)
self.input_lines = fd.readlines()
if self.input_file != '-':
fd.close()
except IOError:
msg = BaseException('Failed to open file "' + self.input_file + '". Please provide a valid file.')
raise msg
for i, ln in enumerate(self.input_lines):
l = ln.strip()
if reading_element:
elem += l
if l.endswith(':'):
reading_element = 'end'
elif (l.startswith('def ') or l.startswith('class ')) and not reading_docs:
if self.ignore_private and l[l.find(' '):].strip().startswith("__"):
continue
reading_element = 'start'
elem = l
m = re.match(r'^(\s*)[dc]{1}', ln)
if m is not None and m.group(1) is not None:
spaces = m.group(1)
else:
spaces = ''
if re.search(r''':(|\s*#[^'"]*)$''', l):
reading_element = 'end'
if reading_element == 'end':
reading_element = None
# if currently reading an element content
waiting_docs = True
# *** Creates the DocString object ***
e = DocString(elem.replace('\n', ' '), spaces, quotes=self.quotes,
input_style=self.input_style,
output_style=self.output_style,
first_line=self.first_line,
**self.kwargs)
elem_list.append({'docs': e, 'location': (-i, -i)})
else:
if waiting_docs and ('"""' in l or "'''" in l):
# start of docstring bloc
if not reading_docs:
start = i
# determine which delimiter
idx_c = l.find('"""')
idx_dc = l.find("'''")
lim = '"""'
if idx_c >= 0 and idx_dc >= 0:
if idx_c < idx_dc:
lim = '"""'
else:
lim = "'''"
elif idx_c < 0:
lim = "'''"
reading_docs = lim
raw = ln
# one line docstring
if l.count(lim) == 2:
end = i
elem_list[-1]['docs'].parse_docs(raw)
elem_list[-1]['location'] = (start, end)
reading_docs = None
waiting_docs = False
reading_element = False
raw = ''
# end of docstring bloc
elif waiting_docs and lim in l:
end = i
raw += ln
elem_list[-1]['docs'].parse_docs(raw)
elem_list[-1]['location'] = (start, end)
reading_docs = None
waiting_docs = False
reading_element = False
raw = ''
# inside a docstring bloc
elif waiting_docs:
raw += ln
# no docstring found for current element
elif waiting_docs and l != '' and reading_docs is None:
waiting_docs = False
else:
if reading_docs is not None:
raw += ln
if self.convert_only:
i = 0
while i < len(elem_list):
if elem_list[i]['docs'].get_input_docstring() is None:
elem_list.pop(i)
else:
i += 1
self.docs_list = elem_list
self.parsed = True
return elem_list | def function[_parse, parameter[self]]:
constant[Parses the input file's content and generates a list of its elements/docstrings.
:returns: the list of elements
]
variable[elem_list] assign[=] list[[]]
variable[reading_element] assign[=] constant[None]
variable[reading_docs] assign[=] constant[None]
variable[waiting_docs] assign[=] constant[False]
variable[elem] assign[=] constant[]
variable[raw] assign[=] constant[]
variable[start] assign[=] constant[0]
variable[end] assign[=] constant[0]
<ast.Try object at 0x7da1b112ba30>
for taget[tuple[[<ast.Name object at 0x7da1b1129ed0>, <ast.Name object at 0x7da1b1129cc0>]]] in starred[call[name[enumerate], parameter[name[self].input_lines]]] begin[:]
variable[l] assign[=] call[name[ln].strip, parameter[]]
if name[reading_element] begin[:]
<ast.AugAssign object at 0x7da1b1129ae0>
if call[name[l].endswith, parameter[constant[:]]] begin[:]
variable[reading_element] assign[=] constant[end]
if compare[name[reading_element] equal[==] constant[end]] begin[:]
variable[reading_element] assign[=] constant[None]
variable[waiting_docs] assign[=] constant[True]
variable[e] assign[=] call[name[DocString], parameter[call[name[elem].replace, parameter[constant[
], constant[ ]]], name[spaces]]]
call[name[elem_list].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1128310>, <ast.Constant object at 0x7da1b11282b0>], [<ast.Name object at 0x7da1b1128340>, <ast.Tuple object at 0x7da1b1128280>]]]]
if name[self].convert_only begin[:]
variable[i] assign[=] constant[0]
while compare[name[i] less[<] call[name[len], parameter[name[elem_list]]]] begin[:]
if compare[call[call[call[name[elem_list]][name[i]]][constant[docs]].get_input_docstring, parameter[]] is constant[None]] begin[:]
call[name[elem_list].pop, parameter[name[i]]]
name[self].docs_list assign[=] name[elem_list]
name[self].parsed assign[=] constant[True]
return[name[elem_list]] | keyword[def] identifier[_parse] ( identifier[self] ):
literal[string]
identifier[elem_list] =[]
identifier[reading_element] = keyword[None]
identifier[reading_docs] = keyword[None]
identifier[waiting_docs] = keyword[False]
identifier[elem] = literal[string]
identifier[raw] = literal[string]
identifier[start] = literal[int]
identifier[end] = literal[int]
keyword[try] :
keyword[if] identifier[self] . identifier[input_file] == literal[string] :
identifier[fd] = identifier[sys] . identifier[stdin]
keyword[else] :
identifier[fd] = identifier[open] ( identifier[self] . identifier[input_file] )
identifier[self] . identifier[input_lines] = identifier[fd] . identifier[readlines] ()
keyword[if] identifier[self] . identifier[input_file] != literal[string] :
identifier[fd] . identifier[close] ()
keyword[except] identifier[IOError] :
identifier[msg] = identifier[BaseException] ( literal[string] + identifier[self] . identifier[input_file] + literal[string] )
keyword[raise] identifier[msg]
keyword[for] identifier[i] , identifier[ln] keyword[in] identifier[enumerate] ( identifier[self] . identifier[input_lines] ):
identifier[l] = identifier[ln] . identifier[strip] ()
keyword[if] identifier[reading_element] :
identifier[elem] += identifier[l]
keyword[if] identifier[l] . identifier[endswith] ( literal[string] ):
identifier[reading_element] = literal[string]
keyword[elif] ( identifier[l] . identifier[startswith] ( literal[string] ) keyword[or] identifier[l] . identifier[startswith] ( literal[string] )) keyword[and] keyword[not] identifier[reading_docs] :
keyword[if] identifier[self] . identifier[ignore_private] keyword[and] identifier[l] [ identifier[l] . identifier[find] ( literal[string] ):]. identifier[strip] (). identifier[startswith] ( literal[string] ):
keyword[continue]
identifier[reading_element] = literal[string]
identifier[elem] = identifier[l]
identifier[m] = identifier[re] . identifier[match] ( literal[string] , identifier[ln] )
keyword[if] identifier[m] keyword[is] keyword[not] keyword[None] keyword[and] identifier[m] . identifier[group] ( literal[int] ) keyword[is] keyword[not] keyword[None] :
identifier[spaces] = identifier[m] . identifier[group] ( literal[int] )
keyword[else] :
identifier[spaces] = literal[string]
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[l] ):
identifier[reading_element] = literal[string]
keyword[if] identifier[reading_element] == literal[string] :
identifier[reading_element] = keyword[None]
identifier[waiting_docs] = keyword[True]
identifier[e] = identifier[DocString] ( identifier[elem] . identifier[replace] ( literal[string] , literal[string] ), identifier[spaces] , identifier[quotes] = identifier[self] . identifier[quotes] ,
identifier[input_style] = identifier[self] . identifier[input_style] ,
identifier[output_style] = identifier[self] . identifier[output_style] ,
identifier[first_line] = identifier[self] . identifier[first_line] ,
** identifier[self] . identifier[kwargs] )
identifier[elem_list] . identifier[append] ({ literal[string] : identifier[e] , literal[string] :(- identifier[i] ,- identifier[i] )})
keyword[else] :
keyword[if] identifier[waiting_docs] keyword[and] ( literal[string] keyword[in] identifier[l] keyword[or] literal[string] keyword[in] identifier[l] ):
keyword[if] keyword[not] identifier[reading_docs] :
identifier[start] = identifier[i]
identifier[idx_c] = identifier[l] . identifier[find] ( literal[string] )
identifier[idx_dc] = identifier[l] . identifier[find] ( literal[string] )
identifier[lim] = literal[string]
keyword[if] identifier[idx_c] >= literal[int] keyword[and] identifier[idx_dc] >= literal[int] :
keyword[if] identifier[idx_c] < identifier[idx_dc] :
identifier[lim] = literal[string]
keyword[else] :
identifier[lim] = literal[string]
keyword[elif] identifier[idx_c] < literal[int] :
identifier[lim] = literal[string]
identifier[reading_docs] = identifier[lim]
identifier[raw] = identifier[ln]
keyword[if] identifier[l] . identifier[count] ( identifier[lim] )== literal[int] :
identifier[end] = identifier[i]
identifier[elem_list] [- literal[int] ][ literal[string] ]. identifier[parse_docs] ( identifier[raw] )
identifier[elem_list] [- literal[int] ][ literal[string] ]=( identifier[start] , identifier[end] )
identifier[reading_docs] = keyword[None]
identifier[waiting_docs] = keyword[False]
identifier[reading_element] = keyword[False]
identifier[raw] = literal[string]
keyword[elif] identifier[waiting_docs] keyword[and] identifier[lim] keyword[in] identifier[l] :
identifier[end] = identifier[i]
identifier[raw] += identifier[ln]
identifier[elem_list] [- literal[int] ][ literal[string] ]. identifier[parse_docs] ( identifier[raw] )
identifier[elem_list] [- literal[int] ][ literal[string] ]=( identifier[start] , identifier[end] )
identifier[reading_docs] = keyword[None]
identifier[waiting_docs] = keyword[False]
identifier[reading_element] = keyword[False]
identifier[raw] = literal[string]
keyword[elif] identifier[waiting_docs] :
identifier[raw] += identifier[ln]
keyword[elif] identifier[waiting_docs] keyword[and] identifier[l] != literal[string] keyword[and] identifier[reading_docs] keyword[is] keyword[None] :
identifier[waiting_docs] = keyword[False]
keyword[else] :
keyword[if] identifier[reading_docs] keyword[is] keyword[not] keyword[None] :
identifier[raw] += identifier[ln]
keyword[if] identifier[self] . identifier[convert_only] :
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[elem_list] ):
keyword[if] identifier[elem_list] [ identifier[i] ][ literal[string] ]. identifier[get_input_docstring] () keyword[is] keyword[None] :
identifier[elem_list] . identifier[pop] ( identifier[i] )
keyword[else] :
identifier[i] += literal[int]
identifier[self] . identifier[docs_list] = identifier[elem_list]
identifier[self] . identifier[parsed] = keyword[True]
keyword[return] identifier[elem_list] | def _parse(self):
"""Parses the input file's content and generates a list of its elements/docstrings.
:returns: the list of elements
"""
#TODO manage decorators
#TODO manage default params with strings escaping chars as (, ), ', ', #, ...
#TODO manage elements ending with comments like: def func(param): # blabla
elem_list = []
reading_element = None
reading_docs = None
waiting_docs = False
elem = ''
raw = ''
start = 0
end = 0
try:
if self.input_file == '-':
fd = sys.stdin # depends on [control=['if'], data=[]]
else:
fd = open(self.input_file)
self.input_lines = fd.readlines()
if self.input_file != '-':
fd.close() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except IOError:
msg = BaseException('Failed to open file "' + self.input_file + '". Please provide a valid file.')
raise msg # depends on [control=['except'], data=[]]
for (i, ln) in enumerate(self.input_lines):
l = ln.strip()
if reading_element:
elem += l
if l.endswith(':'):
reading_element = 'end' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif (l.startswith('def ') or l.startswith('class ')) and (not reading_docs):
if self.ignore_private and l[l.find(' '):].strip().startswith('__'):
continue # depends on [control=['if'], data=[]]
reading_element = 'start'
elem = l
m = re.match('^(\\s*)[dc]{1}', ln)
if m is not None and m.group(1) is not None:
spaces = m.group(1) # depends on [control=['if'], data=[]]
else:
spaces = ''
if re.search(':(|\\s*#[^\'"]*)$', l):
reading_element = 'end' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if reading_element == 'end':
reading_element = None
# if currently reading an element content
waiting_docs = True
# *** Creates the DocString object ***
e = DocString(elem.replace('\n', ' '), spaces, quotes=self.quotes, input_style=self.input_style, output_style=self.output_style, first_line=self.first_line, **self.kwargs)
elem_list.append({'docs': e, 'location': (-i, -i)}) # depends on [control=['if'], data=['reading_element']]
elif waiting_docs and ('"""' in l or "'''" in l):
# start of docstring bloc
if not reading_docs:
start = i
# determine which delimiter
idx_c = l.find('"""')
idx_dc = l.find("'''")
lim = '"""'
if idx_c >= 0 and idx_dc >= 0:
if idx_c < idx_dc:
lim = '"""' # depends on [control=['if'], data=[]]
else:
lim = "'''" # depends on [control=['if'], data=[]]
elif idx_c < 0:
lim = "'''" # depends on [control=['if'], data=[]]
reading_docs = lim
raw = ln
# one line docstring
if l.count(lim) == 2:
end = i
elem_list[-1]['docs'].parse_docs(raw)
elem_list[-1]['location'] = (start, end)
reading_docs = None
waiting_docs = False
reading_element = False
raw = '' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# end of docstring bloc
elif waiting_docs and lim in l:
end = i
raw += ln
elem_list[-1]['docs'].parse_docs(raw)
elem_list[-1]['location'] = (start, end)
reading_docs = None
waiting_docs = False
reading_element = False
raw = '' # depends on [control=['if'], data=[]]
# inside a docstring bloc
elif waiting_docs:
raw += ln # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# no docstring found for current element
elif waiting_docs and l != '' and (reading_docs is None):
waiting_docs = False # depends on [control=['if'], data=[]]
elif reading_docs is not None:
raw += ln # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if self.convert_only:
i = 0
while i < len(elem_list):
if elem_list[i]['docs'].get_input_docstring() is None:
elem_list.pop(i) # depends on [control=['if'], data=[]]
else:
i += 1 # depends on [control=['while'], data=['i']] # depends on [control=['if'], data=[]]
self.docs_list = elem_list
self.parsed = True
return elem_list |
def check_signatures(pkg_list, allowed_keys):
"""
Go through list of packages with signatures and check if all are properly signed
:param pkg_list: list of packages in format '%{name} %{SIGPGP:pgpsig}'
:param allowed_keys: list of allowed keys
:return: bool
"""
all_passed = True
for line_str in pkg_list:
all_passed &= process_rpm_ql_line(line_str.strip(), allowed_keys)
if not all_passed:
raise PackageSignatureException(
'Error while checking rpm signatures, see logs for more info') | def function[check_signatures, parameter[pkg_list, allowed_keys]]:
constant[
Go through list of packages with signatures and check if all are properly signed
:param pkg_list: list of packages in format '%{name} %{SIGPGP:pgpsig}'
:param allowed_keys: list of allowed keys
:return: bool
]
variable[all_passed] assign[=] constant[True]
for taget[name[line_str]] in starred[name[pkg_list]] begin[:]
<ast.AugAssign object at 0x7da1b11fe5c0>
if <ast.UnaryOp object at 0x7da1b11fdb40> begin[:]
<ast.Raise object at 0x7da1b11fe620> | keyword[def] identifier[check_signatures] ( identifier[pkg_list] , identifier[allowed_keys] ):
literal[string]
identifier[all_passed] = keyword[True]
keyword[for] identifier[line_str] keyword[in] identifier[pkg_list] :
identifier[all_passed] &= identifier[process_rpm_ql_line] ( identifier[line_str] . identifier[strip] (), identifier[allowed_keys] )
keyword[if] keyword[not] identifier[all_passed] :
keyword[raise] identifier[PackageSignatureException] (
literal[string] ) | def check_signatures(pkg_list, allowed_keys):
"""
Go through list of packages with signatures and check if all are properly signed
:param pkg_list: list of packages in format '%{name} %{SIGPGP:pgpsig}'
:param allowed_keys: list of allowed keys
:return: bool
"""
all_passed = True
for line_str in pkg_list:
all_passed &= process_rpm_ql_line(line_str.strip(), allowed_keys) # depends on [control=['for'], data=['line_str']]
if not all_passed:
raise PackageSignatureException('Error while checking rpm signatures, see logs for more info') # depends on [control=['if'], data=[]] |
def vector_similarity(self, vector, items):
"""Compute the similarity between a vector and a set of items."""
vector = self.normalize(vector)
items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items])
return vector.dot(items_vec.T) | def function[vector_similarity, parameter[self, vector, items]]:
constant[Compute the similarity between a vector and a set of items.]
variable[vector] assign[=] call[name[self].normalize, parameter[name[vector]]]
variable[items_vec] assign[=] call[name[np].stack, parameter[<ast.ListComp object at 0x7da18bccaf50>]]
return[call[name[vector].dot, parameter[name[items_vec].T]]] | keyword[def] identifier[vector_similarity] ( identifier[self] , identifier[vector] , identifier[items] ):
literal[string]
identifier[vector] = identifier[self] . identifier[normalize] ( identifier[vector] )
identifier[items_vec] = identifier[np] . identifier[stack] ([ identifier[self] . identifier[norm_vectors] [ identifier[self] . identifier[items] [ identifier[x] ]] keyword[for] identifier[x] keyword[in] identifier[items] ])
keyword[return] identifier[vector] . identifier[dot] ( identifier[items_vec] . identifier[T] ) | def vector_similarity(self, vector, items):
"""Compute the similarity between a vector and a set of items."""
vector = self.normalize(vector)
items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items])
return vector.dot(items_vec.T) |
def list_resources_info(self, query='?*::INSTR'):
"""Returns a dictionary mapping resource names to resource extended
information of all connected devices matching query.
For details of the VISA Resource Regular Expression syntax used in query,
refer to list_resources().
:param query: a VISA Resource Regular Expression used to match devices.
:return: Mapping of resource name to ResourceInfo
:rtype: dict[str, :class:`pyvisa.highlevel.ResourceInfo`]
"""
return dict((resource, self.resource_info(resource))
for resource in self.list_resources(query)) | def function[list_resources_info, parameter[self, query]]:
constant[Returns a dictionary mapping resource names to resource extended
information of all connected devices matching query.
For details of the VISA Resource Regular Expression syntax used in query,
refer to list_resources().
:param query: a VISA Resource Regular Expression used to match devices.
:return: Mapping of resource name to ResourceInfo
:rtype: dict[str, :class:`pyvisa.highlevel.ResourceInfo`]
]
return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da2044c0c40>]]] | keyword[def] identifier[list_resources_info] ( identifier[self] , identifier[query] = literal[string] ):
literal[string]
keyword[return] identifier[dict] (( identifier[resource] , identifier[self] . identifier[resource_info] ( identifier[resource] ))
keyword[for] identifier[resource] keyword[in] identifier[self] . identifier[list_resources] ( identifier[query] )) | def list_resources_info(self, query='?*::INSTR'):
"""Returns a dictionary mapping resource names to resource extended
information of all connected devices matching query.
For details of the VISA Resource Regular Expression syntax used in query,
refer to list_resources().
:param query: a VISA Resource Regular Expression used to match devices.
:return: Mapping of resource name to ResourceInfo
:rtype: dict[str, :class:`pyvisa.highlevel.ResourceInfo`]
"""
return dict(((resource, self.resource_info(resource)) for resource in self.list_resources(query))) |
def _rotate(lon, lat, theta, axis='x'):
"""
Rotate "lon", "lat" coords (in _degrees_) about the X-axis by "theta"
degrees. This effectively simulates rotating a physical stereonet.
Returns rotated lon, lat coords in _radians_).
"""
# Convert input to numpy arrays in radians
lon, lat = np.atleast_1d(lon, lat)
lon, lat = map(np.radians, [lon, lat])
theta = np.radians(theta)
# Convert to cartesian coords for the rotation
x, y, z = sph2cart(lon, lat)
lookup = {'x':_rotate_x, 'y':_rotate_y, 'z':_rotate_z}
X, Y, Z = lookup[axis](x, y, z, theta)
# Now convert back to spherical coords (longitude and latitude, ignore R)
lon, lat = cart2sph(X,Y,Z)
return lon, lat | def function[_rotate, parameter[lon, lat, theta, axis]]:
constant[
Rotate "lon", "lat" coords (in _degrees_) about the X-axis by "theta"
degrees. This effectively simulates rotating a physical stereonet.
Returns rotated lon, lat coords in _radians_).
]
<ast.Tuple object at 0x7da2046218d0> assign[=] call[name[np].atleast_1d, parameter[name[lon], name[lat]]]
<ast.Tuple object at 0x7da204621ab0> assign[=] call[name[map], parameter[name[np].radians, list[[<ast.Name object at 0x7da2046232b0>, <ast.Name object at 0x7da204623970>]]]]
variable[theta] assign[=] call[name[np].radians, parameter[name[theta]]]
<ast.Tuple object at 0x7da204620850> assign[=] call[name[sph2cart], parameter[name[lon], name[lat]]]
variable[lookup] assign[=] dictionary[[<ast.Constant object at 0x7da204621f60>, <ast.Constant object at 0x7da204620b80>, <ast.Constant object at 0x7da204622830>], [<ast.Name object at 0x7da2046215a0>, <ast.Name object at 0x7da204623730>, <ast.Name object at 0x7da204620280>]]
<ast.Tuple object at 0x7da2046201c0> assign[=] call[call[name[lookup]][name[axis]], parameter[name[x], name[y], name[z], name[theta]]]
<ast.Tuple object at 0x7da204622680> assign[=] call[name[cart2sph], parameter[name[X], name[Y], name[Z]]]
return[tuple[[<ast.Name object at 0x7da204623e20>, <ast.Name object at 0x7da204621300>]]] | keyword[def] identifier[_rotate] ( identifier[lon] , identifier[lat] , identifier[theta] , identifier[axis] = literal[string] ):
literal[string]
identifier[lon] , identifier[lat] = identifier[np] . identifier[atleast_1d] ( identifier[lon] , identifier[lat] )
identifier[lon] , identifier[lat] = identifier[map] ( identifier[np] . identifier[radians] ,[ identifier[lon] , identifier[lat] ])
identifier[theta] = identifier[np] . identifier[radians] ( identifier[theta] )
identifier[x] , identifier[y] , identifier[z] = identifier[sph2cart] ( identifier[lon] , identifier[lat] )
identifier[lookup] ={ literal[string] : identifier[_rotate_x] , literal[string] : identifier[_rotate_y] , literal[string] : identifier[_rotate_z] }
identifier[X] , identifier[Y] , identifier[Z] = identifier[lookup] [ identifier[axis] ]( identifier[x] , identifier[y] , identifier[z] , identifier[theta] )
identifier[lon] , identifier[lat] = identifier[cart2sph] ( identifier[X] , identifier[Y] , identifier[Z] )
keyword[return] identifier[lon] , identifier[lat] | def _rotate(lon, lat, theta, axis='x'):
"""
Rotate "lon", "lat" coords (in _degrees_) about the X-axis by "theta"
degrees. This effectively simulates rotating a physical stereonet.
Returns rotated lon, lat coords in _radians_).
"""
# Convert input to numpy arrays in radians
(lon, lat) = np.atleast_1d(lon, lat)
(lon, lat) = map(np.radians, [lon, lat])
theta = np.radians(theta)
# Convert to cartesian coords for the rotation
(x, y, z) = sph2cart(lon, lat)
lookup = {'x': _rotate_x, 'y': _rotate_y, 'z': _rotate_z}
(X, Y, Z) = lookup[axis](x, y, z, theta)
# Now convert back to spherical coords (longitude and latitude, ignore R)
(lon, lat) = cart2sph(X, Y, Z)
return (lon, lat) |
def format_repeated_pair_list(self, key, root_list, level):
"""
Process (possibly) repeated lists of pairs e.g. POINTs blocks
"""
lines = []
def depth(L):
return isinstance(L, (tuple, list)) and max(map(depth, L)) + 1
if depth(root_list) == 2:
# single set of points only
root_list = [root_list]
for pair_list in root_list:
lines += self.format_pair_list(key, pair_list, level)
return lines | def function[format_repeated_pair_list, parameter[self, key, root_list, level]]:
constant[
Process (possibly) repeated lists of pairs e.g. POINTs blocks
]
variable[lines] assign[=] list[[]]
def function[depth, parameter[L]]:
return[<ast.BoolOp object at 0x7da18dc99ab0>]
if compare[call[name[depth], parameter[name[root_list]]] equal[==] constant[2]] begin[:]
variable[root_list] assign[=] list[[<ast.Name object at 0x7da18dc98670>]]
for taget[name[pair_list]] in starred[name[root_list]] begin[:]
<ast.AugAssign object at 0x7da18dc99780>
return[name[lines]] | keyword[def] identifier[format_repeated_pair_list] ( identifier[self] , identifier[key] , identifier[root_list] , identifier[level] ):
literal[string]
identifier[lines] =[]
keyword[def] identifier[depth] ( identifier[L] ):
keyword[return] identifier[isinstance] ( identifier[L] ,( identifier[tuple] , identifier[list] )) keyword[and] identifier[max] ( identifier[map] ( identifier[depth] , identifier[L] ))+ literal[int]
keyword[if] identifier[depth] ( identifier[root_list] )== literal[int] :
identifier[root_list] =[ identifier[root_list] ]
keyword[for] identifier[pair_list] keyword[in] identifier[root_list] :
identifier[lines] += identifier[self] . identifier[format_pair_list] ( identifier[key] , identifier[pair_list] , identifier[level] )
keyword[return] identifier[lines] | def format_repeated_pair_list(self, key, root_list, level):
"""
Process (possibly) repeated lists of pairs e.g. POINTs blocks
"""
lines = []
def depth(L):
return isinstance(L, (tuple, list)) and max(map(depth, L)) + 1
if depth(root_list) == 2:
# single set of points only
root_list = [root_list] # depends on [control=['if'], data=[]]
for pair_list in root_list:
lines += self.format_pair_list(key, pair_list, level) # depends on [control=['for'], data=['pair_list']]
return lines |
def format_name(self):
"""
:return: str Generated name
"""
format_data = self.get_format_data()
return Formatter().vformat(self.string_formatter, args=[], kwargs=format_data) | def function[format_name, parameter[self]]:
constant[
:return: str Generated name
]
variable[format_data] assign[=] call[name[self].get_format_data, parameter[]]
return[call[call[name[Formatter], parameter[]].vformat, parameter[name[self].string_formatter]]] | keyword[def] identifier[format_name] ( identifier[self] ):
literal[string]
identifier[format_data] = identifier[self] . identifier[get_format_data] ()
keyword[return] identifier[Formatter] (). identifier[vformat] ( identifier[self] . identifier[string_formatter] , identifier[args] =[], identifier[kwargs] = identifier[format_data] ) | def format_name(self):
"""
:return: str Generated name
"""
format_data = self.get_format_data()
return Formatter().vformat(self.string_formatter, args=[], kwargs=format_data) |
def tasks(self):
"""
:class:`~zhmcclient.TaskManager`: Access to the :term:`Tasks <Task>` in
this Console.
"""
# We do here some lazy loading.
if not self._tasks:
self._tasks = TaskManager(self)
return self._tasks | def function[tasks, parameter[self]]:
constant[
:class:`~zhmcclient.TaskManager`: Access to the :term:`Tasks <Task>` in
this Console.
]
if <ast.UnaryOp object at 0x7da2041daa40> begin[:]
name[self]._tasks assign[=] call[name[TaskManager], parameter[name[self]]]
return[name[self]._tasks] | keyword[def] identifier[tasks] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_tasks] :
identifier[self] . identifier[_tasks] = identifier[TaskManager] ( identifier[self] )
keyword[return] identifier[self] . identifier[_tasks] | def tasks(self):
"""
:class:`~zhmcclient.TaskManager`: Access to the :term:`Tasks <Task>` in
this Console.
"""
# We do here some lazy loading.
if not self._tasks:
self._tasks = TaskManager(self) # depends on [control=['if'], data=[]]
return self._tasks |
def system_switch_attributes_host_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system = ET.SubElement(config, "system", xmlns="urn:brocade.com:mgmt:brocade-ras")
switch_attributes = ET.SubElement(system, "switch-attributes")
host_name = ET.SubElement(switch_attributes, "host-name")
host_name.text = kwargs.pop('host_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[system_switch_attributes_host_name, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[system] assign[=] call[name[ET].SubElement, parameter[name[config], constant[system]]]
variable[switch_attributes] assign[=] call[name[ET].SubElement, parameter[name[system], constant[switch-attributes]]]
variable[host_name] assign[=] call[name[ET].SubElement, parameter[name[switch_attributes], constant[host-name]]]
name[host_name].text assign[=] call[name[kwargs].pop, parameter[constant[host_name]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[system_switch_attributes_host_name] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[system] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[switch_attributes] = identifier[ET] . identifier[SubElement] ( identifier[system] , literal[string] )
identifier[host_name] = identifier[ET] . identifier[SubElement] ( identifier[switch_attributes] , literal[string] )
identifier[host_name] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def system_switch_attributes_host_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
system = ET.SubElement(config, 'system', xmlns='urn:brocade.com:mgmt:brocade-ras')
switch_attributes = ET.SubElement(system, 'switch-attributes')
host_name = ET.SubElement(switch_attributes, 'host-name')
host_name.text = kwargs.pop('host_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def convert_to_11(self):
"""
converts the iocs in self.iocs from openioc 1.0 to openioc 1.1 format.
the converted iocs are stored in the dictionary self.iocs_11
"""
if len(self) < 1:
log.error('No iocs available to modify.')
return False
log.info('Converting IOCs from 1.0 to 1.1')
errors = []
for iocid in self.iocs:
ioc_xml = self.iocs[iocid]
root = ioc_xml.getroot()
if root.tag != 'ioc':
log.error('IOC root is not "ioc" [%s].' % str(iocid))
errors.append(iocid)
continue
name_10 = root.findtext('.//short_description')
keywords_10 = root.findtext('.//keywords')
description_10 = root.findtext('.//description')
author_10 = root.findtext('.//authored_by')
created_date_10 = root.findtext('.//authored_date')
last_modified_date_10 = root.get('last-modified', None)
if last_modified_date_10:
last_modified_date_10 = last_modified_date_10.rstrip('Z')
created_date_10 = created_date_10.rstrip('Z')
links_10 = []
for link in root.xpath('//link'):
link_rel = link.get('rel', None)
link_text = link.text
links_10.append((link_rel, link_text, None))
# get ioc_logic
try:
ioc_logic = root.xpath('.//definition')[0]
except IndexError:
log.exception(
'Could not find definition nodes for IOC [%s]. Did you attempt to convert OpenIOC 1.1 iocs?' % str(
iocid))
errors.append(iocid)
continue
# create 1.1 ioc obj
ioc_obj = ioc_api.IOC(name=name_10, description=description_10, author=author_10, links=links_10,
keywords=keywords_10, iocid=iocid)
ioc_obj.set_lastmodified_date(last_modified_date_10)
ioc_obj.set_created_date(created_date_10)
comment_dict = {}
tlo_10 = ioc_logic.getchildren()[0]
try:
self.convert_branch(tlo_10, ioc_obj.top_level_indicator, comment_dict)
except UpgradeError:
log.exception('Problem converting IOC [{}]'.format(iocid))
errors.append(iocid)
continue
for node_id in comment_dict:
ioc_obj.add_parameter(node_id, comment_dict[node_id])
self.iocs_11[iocid] = ioc_obj
return errors | def function[convert_to_11, parameter[self]]:
constant[
converts the iocs in self.iocs from openioc 1.0 to openioc 1.1 format.
the converted iocs are stored in the dictionary self.iocs_11
]
if compare[call[name[len], parameter[name[self]]] less[<] constant[1]] begin[:]
call[name[log].error, parameter[constant[No iocs available to modify.]]]
return[constant[False]]
call[name[log].info, parameter[constant[Converting IOCs from 1.0 to 1.1]]]
variable[errors] assign[=] list[[]]
for taget[name[iocid]] in starred[name[self].iocs] begin[:]
variable[ioc_xml] assign[=] call[name[self].iocs][name[iocid]]
variable[root] assign[=] call[name[ioc_xml].getroot, parameter[]]
if compare[name[root].tag not_equal[!=] constant[ioc]] begin[:]
call[name[log].error, parameter[binary_operation[constant[IOC root is not "ioc" [%s].] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[iocid]]]]]]
call[name[errors].append, parameter[name[iocid]]]
continue
variable[name_10] assign[=] call[name[root].findtext, parameter[constant[.//short_description]]]
variable[keywords_10] assign[=] call[name[root].findtext, parameter[constant[.//keywords]]]
variable[description_10] assign[=] call[name[root].findtext, parameter[constant[.//description]]]
variable[author_10] assign[=] call[name[root].findtext, parameter[constant[.//authored_by]]]
variable[created_date_10] assign[=] call[name[root].findtext, parameter[constant[.//authored_date]]]
variable[last_modified_date_10] assign[=] call[name[root].get, parameter[constant[last-modified], constant[None]]]
if name[last_modified_date_10] begin[:]
variable[last_modified_date_10] assign[=] call[name[last_modified_date_10].rstrip, parameter[constant[Z]]]
variable[created_date_10] assign[=] call[name[created_date_10].rstrip, parameter[constant[Z]]]
variable[links_10] assign[=] list[[]]
for taget[name[link]] in starred[call[name[root].xpath, parameter[constant[//link]]]] begin[:]
variable[link_rel] assign[=] call[name[link].get, parameter[constant[rel], constant[None]]]
variable[link_text] assign[=] name[link].text
call[name[links_10].append, parameter[tuple[[<ast.Name object at 0x7da1b1044100>, <ast.Name object at 0x7da1b1044a60>, <ast.Constant object at 0x7da1b1021840>]]]]
<ast.Try object at 0x7da1b1020070>
variable[ioc_obj] assign[=] call[name[ioc_api].IOC, parameter[]]
call[name[ioc_obj].set_lastmodified_date, parameter[name[last_modified_date_10]]]
call[name[ioc_obj].set_created_date, parameter[name[created_date_10]]]
variable[comment_dict] assign[=] dictionary[[], []]
variable[tlo_10] assign[=] call[call[name[ioc_logic].getchildren, parameter[]]][constant[0]]
<ast.Try object at 0x7da1b1022950>
for taget[name[node_id]] in starred[name[comment_dict]] begin[:]
call[name[ioc_obj].add_parameter, parameter[name[node_id], call[name[comment_dict]][name[node_id]]]]
call[name[self].iocs_11][name[iocid]] assign[=] name[ioc_obj]
return[name[errors]] | keyword[def] identifier[convert_to_11] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] )< literal[int] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return] keyword[False]
identifier[log] . identifier[info] ( literal[string] )
identifier[errors] =[]
keyword[for] identifier[iocid] keyword[in] identifier[self] . identifier[iocs] :
identifier[ioc_xml] = identifier[self] . identifier[iocs] [ identifier[iocid] ]
identifier[root] = identifier[ioc_xml] . identifier[getroot] ()
keyword[if] identifier[root] . identifier[tag] != literal[string] :
identifier[log] . identifier[error] ( literal[string] % identifier[str] ( identifier[iocid] ))
identifier[errors] . identifier[append] ( identifier[iocid] )
keyword[continue]
identifier[name_10] = identifier[root] . identifier[findtext] ( literal[string] )
identifier[keywords_10] = identifier[root] . identifier[findtext] ( literal[string] )
identifier[description_10] = identifier[root] . identifier[findtext] ( literal[string] )
identifier[author_10] = identifier[root] . identifier[findtext] ( literal[string] )
identifier[created_date_10] = identifier[root] . identifier[findtext] ( literal[string] )
identifier[last_modified_date_10] = identifier[root] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[last_modified_date_10] :
identifier[last_modified_date_10] = identifier[last_modified_date_10] . identifier[rstrip] ( literal[string] )
identifier[created_date_10] = identifier[created_date_10] . identifier[rstrip] ( literal[string] )
identifier[links_10] =[]
keyword[for] identifier[link] keyword[in] identifier[root] . identifier[xpath] ( literal[string] ):
identifier[link_rel] = identifier[link] . identifier[get] ( literal[string] , keyword[None] )
identifier[link_text] = identifier[link] . identifier[text]
identifier[links_10] . identifier[append] (( identifier[link_rel] , identifier[link_text] , keyword[None] ))
keyword[try] :
identifier[ioc_logic] = identifier[root] . identifier[xpath] ( literal[string] )[ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[log] . identifier[exception] (
literal[string] % identifier[str] (
identifier[iocid] ))
identifier[errors] . identifier[append] ( identifier[iocid] )
keyword[continue]
identifier[ioc_obj] = identifier[ioc_api] . identifier[IOC] ( identifier[name] = identifier[name_10] , identifier[description] = identifier[description_10] , identifier[author] = identifier[author_10] , identifier[links] = identifier[links_10] ,
identifier[keywords] = identifier[keywords_10] , identifier[iocid] = identifier[iocid] )
identifier[ioc_obj] . identifier[set_lastmodified_date] ( identifier[last_modified_date_10] )
identifier[ioc_obj] . identifier[set_created_date] ( identifier[created_date_10] )
identifier[comment_dict] ={}
identifier[tlo_10] = identifier[ioc_logic] . identifier[getchildren] ()[ literal[int] ]
keyword[try] :
identifier[self] . identifier[convert_branch] ( identifier[tlo_10] , identifier[ioc_obj] . identifier[top_level_indicator] , identifier[comment_dict] )
keyword[except] identifier[UpgradeError] :
identifier[log] . identifier[exception] ( literal[string] . identifier[format] ( identifier[iocid] ))
identifier[errors] . identifier[append] ( identifier[iocid] )
keyword[continue]
keyword[for] identifier[node_id] keyword[in] identifier[comment_dict] :
identifier[ioc_obj] . identifier[add_parameter] ( identifier[node_id] , identifier[comment_dict] [ identifier[node_id] ])
identifier[self] . identifier[iocs_11] [ identifier[iocid] ]= identifier[ioc_obj]
keyword[return] identifier[errors] | def convert_to_11(self):
"""
converts the iocs in self.iocs from openioc 1.0 to openioc 1.1 format.
the converted iocs are stored in the dictionary self.iocs_11
"""
if len(self) < 1:
log.error('No iocs available to modify.')
return False # depends on [control=['if'], data=[]]
log.info('Converting IOCs from 1.0 to 1.1')
errors = []
for iocid in self.iocs:
ioc_xml = self.iocs[iocid]
root = ioc_xml.getroot()
if root.tag != 'ioc':
log.error('IOC root is not "ioc" [%s].' % str(iocid))
errors.append(iocid)
continue # depends on [control=['if'], data=[]]
name_10 = root.findtext('.//short_description')
keywords_10 = root.findtext('.//keywords')
description_10 = root.findtext('.//description')
author_10 = root.findtext('.//authored_by')
created_date_10 = root.findtext('.//authored_date')
last_modified_date_10 = root.get('last-modified', None)
if last_modified_date_10:
last_modified_date_10 = last_modified_date_10.rstrip('Z') # depends on [control=['if'], data=[]]
created_date_10 = created_date_10.rstrip('Z')
links_10 = []
for link in root.xpath('//link'):
link_rel = link.get('rel', None)
link_text = link.text
links_10.append((link_rel, link_text, None)) # depends on [control=['for'], data=['link']]
# get ioc_logic
try:
ioc_logic = root.xpath('.//definition')[0] # depends on [control=['try'], data=[]]
except IndexError:
log.exception('Could not find definition nodes for IOC [%s]. Did you attempt to convert OpenIOC 1.1 iocs?' % str(iocid))
errors.append(iocid)
continue # depends on [control=['except'], data=[]]
# create 1.1 ioc obj
ioc_obj = ioc_api.IOC(name=name_10, description=description_10, author=author_10, links=links_10, keywords=keywords_10, iocid=iocid)
ioc_obj.set_lastmodified_date(last_modified_date_10)
ioc_obj.set_created_date(created_date_10)
comment_dict = {}
tlo_10 = ioc_logic.getchildren()[0]
try:
self.convert_branch(tlo_10, ioc_obj.top_level_indicator, comment_dict) # depends on [control=['try'], data=[]]
except UpgradeError:
log.exception('Problem converting IOC [{}]'.format(iocid))
errors.append(iocid)
continue # depends on [control=['except'], data=[]]
for node_id in comment_dict:
ioc_obj.add_parameter(node_id, comment_dict[node_id]) # depends on [control=['for'], data=['node_id']]
self.iocs_11[iocid] = ioc_obj # depends on [control=['for'], data=['iocid']]
return errors |
def save_user(user, name, save=None): # noqa: E501
"""Save a script
Save a script # noqa: E501
:param user: Get user with this name
:type user: str
:param name: Get status of a driver with this name
:type name: str
:param save: The data needed to save this user
:type save: dict | bytes
:rtype: Response
"""
if connexion.request.is_json:
save = Save.from_dict(connexion.request.get_json()) # noqa: E501
response = errorIfUnauthorized(role='admin')
if response:
return response
else:
response = ApitaxResponse()
driver: Driver = LoadedDrivers.getDriver(name)
user: User = mapUserToUser(save.script)
if driver.saveApitaxUser(user):
return Response(status=200, body=response.getResponseBody())
return ErrorResponse(status=500, message='Failed to create user') | def function[save_user, parameter[user, name, save]]:
constant[Save a script
Save a script # noqa: E501
:param user: Get user with this name
:type user: str
:param name: Get status of a driver with this name
:type name: str
:param save: The data needed to save this user
:type save: dict | bytes
:rtype: Response
]
if name[connexion].request.is_json begin[:]
variable[save] assign[=] call[name[Save].from_dict, parameter[call[name[connexion].request.get_json, parameter[]]]]
variable[response] assign[=] call[name[errorIfUnauthorized], parameter[]]
if name[response] begin[:]
return[name[response]]
<ast.AnnAssign object at 0x7da20c76d810>
<ast.AnnAssign object at 0x7da20c76fbb0>
if call[name[driver].saveApitaxUser, parameter[name[user]]] begin[:]
return[call[name[Response], parameter[]]]
return[call[name[ErrorResponse], parameter[]]] | keyword[def] identifier[save_user] ( identifier[user] , identifier[name] , identifier[save] = keyword[None] ):
literal[string]
keyword[if] identifier[connexion] . identifier[request] . identifier[is_json] :
identifier[save] = identifier[Save] . identifier[from_dict] ( identifier[connexion] . identifier[request] . identifier[get_json] ())
identifier[response] = identifier[errorIfUnauthorized] ( identifier[role] = literal[string] )
keyword[if] identifier[response] :
keyword[return] identifier[response]
keyword[else] :
identifier[response] = identifier[ApitaxResponse] ()
identifier[driver] : identifier[Driver] = identifier[LoadedDrivers] . identifier[getDriver] ( identifier[name] )
identifier[user] : identifier[User] = identifier[mapUserToUser] ( identifier[save] . identifier[script] )
keyword[if] identifier[driver] . identifier[saveApitaxUser] ( identifier[user] ):
keyword[return] identifier[Response] ( identifier[status] = literal[int] , identifier[body] = identifier[response] . identifier[getResponseBody] ())
keyword[return] identifier[ErrorResponse] ( identifier[status] = literal[int] , identifier[message] = literal[string] ) | def save_user(user, name, save=None): # noqa: E501
'Save a script\n\n Save a script # noqa: E501\n\n :param user: Get user with this name\n :type user: str\n :param name: Get status of a driver with this name\n :type name: str\n :param save: The data needed to save this user\n :type save: dict | bytes\n\n :rtype: Response\n '
if connexion.request.is_json:
save = Save.from_dict(connexion.request.get_json()) # noqa: E501 # depends on [control=['if'], data=[]]
response = errorIfUnauthorized(role='admin')
if response:
return response # depends on [control=['if'], data=[]]
else:
response = ApitaxResponse()
driver: Driver = LoadedDrivers.getDriver(name)
user: User = mapUserToUser(save.script)
if driver.saveApitaxUser(user):
return Response(status=200, body=response.getResponseBody()) # depends on [control=['if'], data=[]]
return ErrorResponse(status=500, message='Failed to create user') |
def set_preferences(self, user=None, **kwargs):
"""Set preferences from keyword arguments."""
if user is None:
user = current_user
d = {pref.key: pref for pref in user.preferences}
for k, v in kwargs.items():
if k in d:
d[k].value = v
else:
d[k] = UserPreference(user=user, key=k, value=v)
db.session.add(d[k]) | def function[set_preferences, parameter[self, user]]:
constant[Set preferences from keyword arguments.]
if compare[name[user] is constant[None]] begin[:]
variable[user] assign[=] name[current_user]
variable[d] assign[=] <ast.DictComp object at 0x7da18f7209d0>
for taget[tuple[[<ast.Name object at 0x7da18f723100>, <ast.Name object at 0x7da18f721960>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if compare[name[k] in name[d]] begin[:]
call[name[d]][name[k]].value assign[=] name[v] | keyword[def] identifier[set_preferences] ( identifier[self] , identifier[user] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[user] keyword[is] keyword[None] :
identifier[user] = identifier[current_user]
identifier[d] ={ identifier[pref] . identifier[key] : identifier[pref] keyword[for] identifier[pref] keyword[in] identifier[user] . identifier[preferences] }
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[k] keyword[in] identifier[d] :
identifier[d] [ identifier[k] ]. identifier[value] = identifier[v]
keyword[else] :
identifier[d] [ identifier[k] ]= identifier[UserPreference] ( identifier[user] = identifier[user] , identifier[key] = identifier[k] , identifier[value] = identifier[v] )
identifier[db] . identifier[session] . identifier[add] ( identifier[d] [ identifier[k] ]) | def set_preferences(self, user=None, **kwargs):
"""Set preferences from keyword arguments."""
if user is None:
user = current_user # depends on [control=['if'], data=['user']]
d = {pref.key: pref for pref in user.preferences}
for (k, v) in kwargs.items():
if k in d:
d[k].value = v # depends on [control=['if'], data=['k', 'd']]
else:
d[k] = UserPreference(user=user, key=k, value=v)
db.session.add(d[k]) # depends on [control=['for'], data=[]] |
def list_features(self):
''' list features the pod supports '''
response, status_code = self.__pod__.System.get_v1_admin_system_features_list(
sessionToken=self.__session__
).result()
self.logger.debug('%s: %s' % (status_code, response))
return status_code, response | def function[list_features, parameter[self]]:
constant[ list features the pod supports ]
<ast.Tuple object at 0x7da204565db0> assign[=] call[call[name[self].__pod__.System.get_v1_admin_system_features_list, parameter[]].result, parameter[]]
call[name[self].logger.debug, parameter[binary_operation[constant[%s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204564be0>, <ast.Name object at 0x7da204567a60>]]]]]
return[tuple[[<ast.Name object at 0x7da18ede5150>, <ast.Name object at 0x7da18ede5f60>]]] | keyword[def] identifier[list_features] ( identifier[self] ):
literal[string]
identifier[response] , identifier[status_code] = identifier[self] . identifier[__pod__] . identifier[System] . identifier[get_v1_admin_system_features_list] (
identifier[sessionToken] = identifier[self] . identifier[__session__]
). identifier[result] ()
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %( identifier[status_code] , identifier[response] ))
keyword[return] identifier[status_code] , identifier[response] | def list_features(self):
""" list features the pod supports """
(response, status_code) = self.__pod__.System.get_v1_admin_system_features_list(sessionToken=self.__session__).result()
self.logger.debug('%s: %s' % (status_code, response))
return (status_code, response) |
def callback(self, event):
"""
Selects cells on click.
"""
self.init_width()
if len(self.initial) > 0:
for cell in self.initial:
self.color_square(cell[0], cell[1], True)
self.initial = []
self.begin_drag = event
self.color_square(event.x, event.y) | def function[callback, parameter[self, event]]:
constant[
Selects cells on click.
]
call[name[self].init_width, parameter[]]
if compare[call[name[len], parameter[name[self].initial]] greater[>] constant[0]] begin[:]
for taget[name[cell]] in starred[name[self].initial] begin[:]
call[name[self].color_square, parameter[call[name[cell]][constant[0]], call[name[cell]][constant[1]], constant[True]]]
name[self].initial assign[=] list[[]]
name[self].begin_drag assign[=] name[event]
call[name[self].color_square, parameter[name[event].x, name[event].y]] | keyword[def] identifier[callback] ( identifier[self] , identifier[event] ):
literal[string]
identifier[self] . identifier[init_width] ()
keyword[if] identifier[len] ( identifier[self] . identifier[initial] )> literal[int] :
keyword[for] identifier[cell] keyword[in] identifier[self] . identifier[initial] :
identifier[self] . identifier[color_square] ( identifier[cell] [ literal[int] ], identifier[cell] [ literal[int] ], keyword[True] )
identifier[self] . identifier[initial] =[]
identifier[self] . identifier[begin_drag] = identifier[event]
identifier[self] . identifier[color_square] ( identifier[event] . identifier[x] , identifier[event] . identifier[y] ) | def callback(self, event):
"""
Selects cells on click.
"""
self.init_width()
if len(self.initial) > 0:
for cell in self.initial:
self.color_square(cell[0], cell[1], True) # depends on [control=['for'], data=['cell']]
self.initial = [] # depends on [control=['if'], data=[]]
self.begin_drag = event
self.color_square(event.x, event.y) |
def add_scope_ip(hostipaddress, name, description, auth, url, scopeid=None, network_address=None):
"""
Function to add new host IP address allocation to existing scope ID
:param hostipaddress: ipv4 address of the target host to be added to the target scope
:param name: name of the owner of this host
:param description: Description of the host
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param scopeid: integer of the desired scope id ( optional )
:param network_address: ipv4 network address + subnet bits of target scope
:return:
:rtype:
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_host = add_scope_ip('10.50.0.5', 'cyoung', 'New Test Host','175', auth.creds, auth.url)
"""
if network_address is not None:
scopeid = get_scope_id(network_address, auth, url)
if scopeid == "Scope Doesn't Exist":
return scopeid
new_ip = {"ip": hostipaddress,
"name": name,
"description": description}
f_url = url + '/imcrs/res/access/assignedIpScope/ip?ipScopeId=' + str(scopeid)
payload = json.dumps(new_ip)
response = requests.post(f_url, auth=auth, headers=HEADERS, data=payload)
try:
if response.status_code == 200:
# print("IP Host Successfully Created")
return response.status_code
elif response.status_code == 409:
# print("IP Host Already Exists")
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " add_ip_scope: An Error has occured" | def function[add_scope_ip, parameter[hostipaddress, name, description, auth, url, scopeid, network_address]]:
constant[
Function to add new host IP address allocation to existing scope ID
:param hostipaddress: ipv4 address of the target host to be added to the target scope
:param name: name of the owner of this host
:param description: Description of the host
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param scopeid: integer of the desired scope id ( optional )
:param network_address: ipv4 network address + subnet bits of target scope
:return:
:rtype:
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_host = add_scope_ip('10.50.0.5', 'cyoung', 'New Test Host','175', auth.creds, auth.url)
]
if compare[name[network_address] is_not constant[None]] begin[:]
variable[scopeid] assign[=] call[name[get_scope_id], parameter[name[network_address], name[auth], name[url]]]
if compare[name[scopeid] equal[==] constant[Scope Doesn't Exist]] begin[:]
return[name[scopeid]]
variable[new_ip] assign[=] dictionary[[<ast.Constant object at 0x7da18f09e230>, <ast.Constant object at 0x7da18f09c2b0>, <ast.Constant object at 0x7da18f09fca0>], [<ast.Name object at 0x7da18f09c0d0>, <ast.Name object at 0x7da18f09c130>, <ast.Name object at 0x7da18f09d5a0>]]
variable[f_url] assign[=] binary_operation[binary_operation[name[url] + constant[/imcrs/res/access/assignedIpScope/ip?ipScopeId=]] + call[name[str], parameter[name[scopeid]]]]
variable[payload] assign[=] call[name[json].dumps, parameter[name[new_ip]]]
variable[response] assign[=] call[name[requests].post, parameter[name[f_url]]]
<ast.Try object at 0x7da18f09e050> | keyword[def] identifier[add_scope_ip] ( identifier[hostipaddress] , identifier[name] , identifier[description] , identifier[auth] , identifier[url] , identifier[scopeid] = keyword[None] , identifier[network_address] = keyword[None] ):
literal[string]
keyword[if] identifier[network_address] keyword[is] keyword[not] keyword[None] :
identifier[scopeid] = identifier[get_scope_id] ( identifier[network_address] , identifier[auth] , identifier[url] )
keyword[if] identifier[scopeid] == literal[string] :
keyword[return] identifier[scopeid]
identifier[new_ip] ={ literal[string] : identifier[hostipaddress] ,
literal[string] : identifier[name] ,
literal[string] : identifier[description] }
identifier[f_url] = identifier[url] + literal[string] + identifier[str] ( identifier[scopeid] )
identifier[payload] = identifier[json] . identifier[dumps] ( identifier[new_ip] )
identifier[response] = identifier[requests] . identifier[post] ( identifier[f_url] , identifier[auth] = identifier[auth] , identifier[headers] = identifier[HEADERS] , identifier[data] = identifier[payload] )
keyword[try] :
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[return] identifier[response] . identifier[status_code]
keyword[elif] identifier[response] . identifier[status_code] == literal[int] :
keyword[return] identifier[response] . identifier[status_code]
keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] keyword[as] identifier[error] :
keyword[return] literal[string] + identifier[str] ( identifier[error] )+ literal[string] | def add_scope_ip(hostipaddress, name, description, auth, url, scopeid=None, network_address=None):
"""
Function to add new host IP address allocation to existing scope ID
:param hostipaddress: ipv4 address of the target host to be added to the target scope
:param name: name of the owner of this host
:param description: Description of the host
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param scopeid: integer of the desired scope id ( optional )
:param network_address: ipv4 network address + subnet bits of target scope
:return:
:rtype:
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_host = add_scope_ip('10.50.0.5', 'cyoung', 'New Test Host','175', auth.creds, auth.url)
"""
if network_address is not None:
scopeid = get_scope_id(network_address, auth, url)
if scopeid == "Scope Doesn't Exist":
return scopeid # depends on [control=['if'], data=['scopeid']] # depends on [control=['if'], data=['network_address']]
new_ip = {'ip': hostipaddress, 'name': name, 'description': description}
f_url = url + '/imcrs/res/access/assignedIpScope/ip?ipScopeId=' + str(scopeid)
payload = json.dumps(new_ip)
response = requests.post(f_url, auth=auth, headers=HEADERS, data=payload)
try:
if response.status_code == 200:
# print("IP Host Successfully Created")
return response.status_code # depends on [control=['if'], data=[]]
elif response.status_code == 409:
# print("IP Host Already Exists")
return response.status_code # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except requests.exceptions.RequestException as error:
return 'Error:\n' + str(error) + ' add_ip_scope: An Error has occured' # depends on [control=['except'], data=['error']] |
async def read(cls, fabric: Union[Fabric, int]):
"""Get list of `Vlan`'s for `fabric`.
:param fabric: Fabric to get all VLAN's for.
:type fabric: `Fabric` or `int`
"""
if isinstance(fabric, int):
fabric_id = fabric
elif isinstance(fabric, Fabric):
fabric_id = fabric.id
else:
raise TypeError(
"fabric must be a Fabric or int, not %s"
% type(fabric).__name__)
data = await cls._handler.read(fabric_id=fabric_id)
return cls(
cls._object(
item, local_data={"fabric_id": fabric_id})
for item in data) | <ast.AsyncFunctionDef object at 0x7da20c990280> | keyword[async] keyword[def] identifier[read] ( identifier[cls] , identifier[fabric] : identifier[Union] [ identifier[Fabric] , identifier[int] ]):
literal[string]
keyword[if] identifier[isinstance] ( identifier[fabric] , identifier[int] ):
identifier[fabric_id] = identifier[fabric]
keyword[elif] identifier[isinstance] ( identifier[fabric] , identifier[Fabric] ):
identifier[fabric_id] = identifier[fabric] . identifier[id]
keyword[else] :
keyword[raise] identifier[TypeError] (
literal[string]
% identifier[type] ( identifier[fabric] ). identifier[__name__] )
identifier[data] = keyword[await] identifier[cls] . identifier[_handler] . identifier[read] ( identifier[fabric_id] = identifier[fabric_id] )
keyword[return] identifier[cls] (
identifier[cls] . identifier[_object] (
identifier[item] , identifier[local_data] ={ literal[string] : identifier[fabric_id] })
keyword[for] identifier[item] keyword[in] identifier[data] ) | async def read(cls, fabric: Union[Fabric, int]):
"""Get list of `Vlan`'s for `fabric`.
:param fabric: Fabric to get all VLAN's for.
:type fabric: `Fabric` or `int`
"""
if isinstance(fabric, int):
fabric_id = fabric # depends on [control=['if'], data=[]]
elif isinstance(fabric, Fabric):
fabric_id = fabric.id # depends on [control=['if'], data=[]]
else:
raise TypeError('fabric must be a Fabric or int, not %s' % type(fabric).__name__)
data = await cls._handler.read(fabric_id=fabric_id)
return cls((cls._object(item, local_data={'fabric_id': fabric_id}) for item in data)) |
def recognize_array(self, byte_array):
"""
This causes OpenALPR to attempt to recognize an image passed in as a byte array.
:param byte_array: This should be a string (Python 2) or a bytes object (Python 3)
:return: An OpenALPR analysis in the form of a response dictionary
"""
if type(byte_array) != bytes:
raise TypeError("Expected a byte array (string in Python 2, bytes in Python 3)")
pb = ctypes.cast(byte_array, ctypes.POINTER(ctypes.c_ubyte))
ptr = self._recognize_array_func(self.alpr_pointer, pb, len(byte_array))
json_data = ctypes.cast(ptr, ctypes.c_char_p).value
json_data = _convert_from_charp(json_data)
response_obj = json.loads(json_data)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return response_obj | def function[recognize_array, parameter[self, byte_array]]:
constant[
This causes OpenALPR to attempt to recognize an image passed in as a byte array.
:param byte_array: This should be a string (Python 2) or a bytes object (Python 3)
:return: An OpenALPR analysis in the form of a response dictionary
]
if compare[call[name[type], parameter[name[byte_array]]] not_equal[!=] name[bytes]] begin[:]
<ast.Raise object at 0x7da2041da680>
variable[pb] assign[=] call[name[ctypes].cast, parameter[name[byte_array], call[name[ctypes].POINTER, parameter[name[ctypes].c_ubyte]]]]
variable[ptr] assign[=] call[name[self]._recognize_array_func, parameter[name[self].alpr_pointer, name[pb], call[name[len], parameter[name[byte_array]]]]]
variable[json_data] assign[=] call[name[ctypes].cast, parameter[name[ptr], name[ctypes].c_char_p]].value
variable[json_data] assign[=] call[name[_convert_from_charp], parameter[name[json_data]]]
variable[response_obj] assign[=] call[name[json].loads, parameter[name[json_data]]]
call[name[self]._free_json_mem_func, parameter[call[name[ctypes].c_void_p, parameter[name[ptr]]]]]
return[name[response_obj]] | keyword[def] identifier[recognize_array] ( identifier[self] , identifier[byte_array] ):
literal[string]
keyword[if] identifier[type] ( identifier[byte_array] )!= identifier[bytes] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[pb] = identifier[ctypes] . identifier[cast] ( identifier[byte_array] , identifier[ctypes] . identifier[POINTER] ( identifier[ctypes] . identifier[c_ubyte] ))
identifier[ptr] = identifier[self] . identifier[_recognize_array_func] ( identifier[self] . identifier[alpr_pointer] , identifier[pb] , identifier[len] ( identifier[byte_array] ))
identifier[json_data] = identifier[ctypes] . identifier[cast] ( identifier[ptr] , identifier[ctypes] . identifier[c_char_p] ). identifier[value]
identifier[json_data] = identifier[_convert_from_charp] ( identifier[json_data] )
identifier[response_obj] = identifier[json] . identifier[loads] ( identifier[json_data] )
identifier[self] . identifier[_free_json_mem_func] ( identifier[ctypes] . identifier[c_void_p] ( identifier[ptr] ))
keyword[return] identifier[response_obj] | def recognize_array(self, byte_array):
"""
This causes OpenALPR to attempt to recognize an image passed in as a byte array.
:param byte_array: This should be a string (Python 2) or a bytes object (Python 3)
:return: An OpenALPR analysis in the form of a response dictionary
"""
if type(byte_array) != bytes:
raise TypeError('Expected a byte array (string in Python 2, bytes in Python 3)') # depends on [control=['if'], data=[]]
pb = ctypes.cast(byte_array, ctypes.POINTER(ctypes.c_ubyte))
ptr = self._recognize_array_func(self.alpr_pointer, pb, len(byte_array))
json_data = ctypes.cast(ptr, ctypes.c_char_p).value
json_data = _convert_from_charp(json_data)
response_obj = json.loads(json_data)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return response_obj |
def per_delta(start: datetime, end: datetime, delta: timedelta):
"""
Iterates over time range in steps specified in delta.
:param start: Start of time range (inclusive)
:param end: End of time range (exclusive)
:param delta: Step interval
:return: Iterable collection of [(start+td*0, start+td*1), (start+td*1, start+td*2), ..., end)
"""
curr = start
while curr < end:
curr_end = curr + delta
yield curr, curr_end
curr = curr_end | def function[per_delta, parameter[start, end, delta]]:
constant[
Iterates over time range in steps specified in delta.
:param start: Start of time range (inclusive)
:param end: End of time range (exclusive)
:param delta: Step interval
:return: Iterable collection of [(start+td*0, start+td*1), (start+td*1, start+td*2), ..., end)
]
variable[curr] assign[=] name[start]
while compare[name[curr] less[<] name[end]] begin[:]
variable[curr_end] assign[=] binary_operation[name[curr] + name[delta]]
<ast.Yield object at 0x7da1b10efc40>
variable[curr] assign[=] name[curr_end] | keyword[def] identifier[per_delta] ( identifier[start] : identifier[datetime] , identifier[end] : identifier[datetime] , identifier[delta] : identifier[timedelta] ):
literal[string]
identifier[curr] = identifier[start]
keyword[while] identifier[curr] < identifier[end] :
identifier[curr_end] = identifier[curr] + identifier[delta]
keyword[yield] identifier[curr] , identifier[curr_end]
identifier[curr] = identifier[curr_end] | def per_delta(start: datetime, end: datetime, delta: timedelta):
"""
Iterates over time range in steps specified in delta.
:param start: Start of time range (inclusive)
:param end: End of time range (exclusive)
:param delta: Step interval
:return: Iterable collection of [(start+td*0, start+td*1), (start+td*1, start+td*2), ..., end)
"""
curr = start
while curr < end:
curr_end = curr + delta
yield (curr, curr_end)
curr = curr_end # depends on [control=['while'], data=['curr']] |
def bots_update(self, bot):
"""
Update Bot
:param bot: bot object to update
:type bot: Bot
"""
self.client.bots.__getattr__(bot.name).__call__(_method="PUT", _json=bot.to_json(), _params=dict(botName=bot.name)) | def function[bots_update, parameter[self, bot]]:
constant[
Update Bot
:param bot: bot object to update
:type bot: Bot
]
call[call[name[self].client.bots.__getattr__, parameter[name[bot].name]].__call__, parameter[]] | keyword[def] identifier[bots_update] ( identifier[self] , identifier[bot] ):
literal[string]
identifier[self] . identifier[client] . identifier[bots] . identifier[__getattr__] ( identifier[bot] . identifier[name] ). identifier[__call__] ( identifier[_method] = literal[string] , identifier[_json] = identifier[bot] . identifier[to_json] (), identifier[_params] = identifier[dict] ( identifier[botName] = identifier[bot] . identifier[name] )) | def bots_update(self, bot):
"""
Update Bot
:param bot: bot object to update
:type bot: Bot
"""
self.client.bots.__getattr__(bot.name).__call__(_method='PUT', _json=bot.to_json(), _params=dict(botName=bot.name)) |
def compute_jaccard_index(x_set, y_set):
"""Return the Jaccard similarity coefficient of 2 given sets.
Args:
x_set (set): first set.
y_set (set): second set.
Returns:
float: Jaccard similarity coefficient.
"""
if not x_set or not y_set:
return 0.0
intersection_cardinal = len(x_set & y_set)
union_cardinal = len(x_set | y_set)
return intersection_cardinal / float(union_cardinal) | def function[compute_jaccard_index, parameter[x_set, y_set]]:
constant[Return the Jaccard similarity coefficient of 2 given sets.
Args:
x_set (set): first set.
y_set (set): second set.
Returns:
float: Jaccard similarity coefficient.
]
if <ast.BoolOp object at 0x7da20e956170> begin[:]
return[constant[0.0]]
variable[intersection_cardinal] assign[=] call[name[len], parameter[binary_operation[name[x_set] <ast.BitAnd object at 0x7da2590d6b60> name[y_set]]]]
variable[union_cardinal] assign[=] call[name[len], parameter[binary_operation[name[x_set] <ast.BitOr object at 0x7da2590d6aa0> name[y_set]]]]
return[binary_operation[name[intersection_cardinal] / call[name[float], parameter[name[union_cardinal]]]]] | keyword[def] identifier[compute_jaccard_index] ( identifier[x_set] , identifier[y_set] ):
literal[string]
keyword[if] keyword[not] identifier[x_set] keyword[or] keyword[not] identifier[y_set] :
keyword[return] literal[int]
identifier[intersection_cardinal] = identifier[len] ( identifier[x_set] & identifier[y_set] )
identifier[union_cardinal] = identifier[len] ( identifier[x_set] | identifier[y_set] )
keyword[return] identifier[intersection_cardinal] / identifier[float] ( identifier[union_cardinal] ) | def compute_jaccard_index(x_set, y_set):
"""Return the Jaccard similarity coefficient of 2 given sets.
Args:
x_set (set): first set.
y_set (set): second set.
Returns:
float: Jaccard similarity coefficient.
"""
if not x_set or not y_set:
return 0.0 # depends on [control=['if'], data=[]]
intersection_cardinal = len(x_set & y_set)
union_cardinal = len(x_set | y_set)
return intersection_cardinal / float(union_cardinal) |
def b58encode(v):
'''Encode a string using Base58'''
if not isinstance(v, bytes):
raise TypeError("a bytes-like object is required, not '%s'" %
type(v).__name__)
origlen = len(v)
v = v.lstrip(b'\0')
newlen = len(v)
p, acc = 1, 0
for c in iseq(reversed(v)):
acc += p * c
p = p << 8
result = b58encode_int(acc, default_one=False)
return (alphabet[0] * (origlen - newlen) + result) | def function[b58encode, parameter[v]]:
constant[Encode a string using Base58]
if <ast.UnaryOp object at 0x7da207f01270> begin[:]
<ast.Raise object at 0x7da207f00400>
variable[origlen] assign[=] call[name[len], parameter[name[v]]]
variable[v] assign[=] call[name[v].lstrip, parameter[constant[b'\x00']]]
variable[newlen] assign[=] call[name[len], parameter[name[v]]]
<ast.Tuple object at 0x7da207f00a30> assign[=] tuple[[<ast.Constant object at 0x7da207f00970>, <ast.Constant object at 0x7da207f01420>]]
for taget[name[c]] in starred[call[name[iseq], parameter[call[name[reversed], parameter[name[v]]]]]] begin[:]
<ast.AugAssign object at 0x7da207f005e0>
variable[p] assign[=] binary_operation[name[p] <ast.LShift object at 0x7da2590d69e0> constant[8]]
variable[result] assign[=] call[name[b58encode_int], parameter[name[acc]]]
return[binary_operation[binary_operation[call[name[alphabet]][constant[0]] * binary_operation[name[origlen] - name[newlen]]] + name[result]]] | keyword[def] identifier[b58encode] ( identifier[v] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] , identifier[bytes] ):
keyword[raise] identifier[TypeError] ( literal[string] %
identifier[type] ( identifier[v] ). identifier[__name__] )
identifier[origlen] = identifier[len] ( identifier[v] )
identifier[v] = identifier[v] . identifier[lstrip] ( literal[string] )
identifier[newlen] = identifier[len] ( identifier[v] )
identifier[p] , identifier[acc] = literal[int] , literal[int]
keyword[for] identifier[c] keyword[in] identifier[iseq] ( identifier[reversed] ( identifier[v] )):
identifier[acc] += identifier[p] * identifier[c]
identifier[p] = identifier[p] << literal[int]
identifier[result] = identifier[b58encode_int] ( identifier[acc] , identifier[default_one] = keyword[False] )
keyword[return] ( identifier[alphabet] [ literal[int] ]*( identifier[origlen] - identifier[newlen] )+ identifier[result] ) | def b58encode(v):
"""Encode a string using Base58"""
if not isinstance(v, bytes):
raise TypeError("a bytes-like object is required, not '%s'" % type(v).__name__) # depends on [control=['if'], data=[]]
origlen = len(v)
v = v.lstrip(b'\x00')
newlen = len(v)
(p, acc) = (1, 0)
for c in iseq(reversed(v)):
acc += p * c
p = p << 8 # depends on [control=['for'], data=['c']]
result = b58encode_int(acc, default_one=False)
return alphabet[0] * (origlen - newlen) + result |
def _lsm_fix_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB.
Each series and position require separate unwrapping (undocumented).
"""
if self.filehandle.size < 2**32:
return
pages = self.pages
npages = len(pages)
series = self.series[0]
axes = series.axes
# find positions
positions = 1
for i in 0, 1:
if series.axes[i] in 'PM':
positions *= series.shape[i]
# make time axis first
if positions > 1:
ntimes = 0
for i in 1, 2:
if axes[i] == 'T':
ntimes = series.shape[i]
break
if ntimes:
div, mod = divmod(npages, 2*positions*ntimes)
assert mod == 0
shape = (positions, ntimes, div, 2)
indices = numpy.arange(product(shape)).reshape(shape)
indices = numpy.moveaxis(indices, 1, 0)
else:
indices = numpy.arange(npages).reshape(-1, 2)
# images of reduced page might be stored first
if pages[0]._offsetscounts[0][0] > pages[1]._offsetscounts[0][0]:
indices = indices[..., ::-1]
# unwrap offsets
wrap = 0
previousoffset = 0
for i in indices.flat:
page = pages[int(i)]
dataoffsets = []
for currentoffset in page._offsetscounts[0]:
if currentoffset < previousoffset:
wrap += 2**32
dataoffsets.append(currentoffset + wrap)
previousoffset = currentoffset
page._offsetscounts = dataoffsets, page._offsetscounts[1] | def function[_lsm_fix_strip_offsets, parameter[self]]:
constant[Unwrap strip offsets for LSM files greater than 4 GB.
Each series and position require separate unwrapping (undocumented).
]
if compare[name[self].filehandle.size less[<] binary_operation[constant[2] ** constant[32]]] begin[:]
return[None]
variable[pages] assign[=] name[self].pages
variable[npages] assign[=] call[name[len], parameter[name[pages]]]
variable[series] assign[=] call[name[self].series][constant[0]]
variable[axes] assign[=] name[series].axes
variable[positions] assign[=] constant[1]
for taget[name[i]] in starred[tuple[[<ast.Constant object at 0x7da1b1859060>, <ast.Constant object at 0x7da1b1858400>]]] begin[:]
if compare[call[name[series].axes][name[i]] in constant[PM]] begin[:]
<ast.AugAssign object at 0x7da1b185b160>
if compare[name[positions] greater[>] constant[1]] begin[:]
variable[ntimes] assign[=] constant[0]
for taget[name[i]] in starred[tuple[[<ast.Constant object at 0x7da1b1924b50>, <ast.Constant object at 0x7da1b1924b20>]]] begin[:]
if compare[call[name[axes]][name[i]] equal[==] constant[T]] begin[:]
variable[ntimes] assign[=] call[name[series].shape][name[i]]
break
if name[ntimes] begin[:]
<ast.Tuple object at 0x7da1b19243d0> assign[=] call[name[divmod], parameter[name[npages], binary_operation[binary_operation[constant[2] * name[positions]] * name[ntimes]]]]
assert[compare[name[mod] equal[==] constant[0]]]
variable[shape] assign[=] tuple[[<ast.Name object at 0x7da1b1925ab0>, <ast.Name object at 0x7da1b1925b70>, <ast.Name object at 0x7da1b1925ae0>, <ast.Constant object at 0x7da1b1925b10>]]
variable[indices] assign[=] call[call[name[numpy].arange, parameter[call[name[product], parameter[name[shape]]]]].reshape, parameter[name[shape]]]
variable[indices] assign[=] call[name[numpy].moveaxis, parameter[name[indices], constant[1], constant[0]]]
if compare[call[call[call[name[pages]][constant[0]]._offsetscounts][constant[0]]][constant[0]] greater[>] call[call[call[name[pages]][constant[1]]._offsetscounts][constant[0]]][constant[0]]] begin[:]
variable[indices] assign[=] call[name[indices]][tuple[[<ast.Constant object at 0x7da1b18aa4a0>, <ast.Slice object at 0x7da1b18aa260>]]]
variable[wrap] assign[=] constant[0]
variable[previousoffset] assign[=] constant[0]
for taget[name[i]] in starred[name[indices].flat] begin[:]
variable[page] assign[=] call[name[pages]][call[name[int], parameter[name[i]]]]
variable[dataoffsets] assign[=] list[[]]
for taget[name[currentoffset]] in starred[call[name[page]._offsetscounts][constant[0]]] begin[:]
if compare[name[currentoffset] less[<] name[previousoffset]] begin[:]
<ast.AugAssign object at 0x7da1b18a9630>
call[name[dataoffsets].append, parameter[binary_operation[name[currentoffset] + name[wrap]]]]
variable[previousoffset] assign[=] name[currentoffset]
name[page]._offsetscounts assign[=] tuple[[<ast.Name object at 0x7da1b18a9180>, <ast.Subscript object at 0x7da1b18ab0d0>]] | keyword[def] identifier[_lsm_fix_strip_offsets] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[filehandle] . identifier[size] < literal[int] ** literal[int] :
keyword[return]
identifier[pages] = identifier[self] . identifier[pages]
identifier[npages] = identifier[len] ( identifier[pages] )
identifier[series] = identifier[self] . identifier[series] [ literal[int] ]
identifier[axes] = identifier[series] . identifier[axes]
identifier[positions] = literal[int]
keyword[for] identifier[i] keyword[in] literal[int] , literal[int] :
keyword[if] identifier[series] . identifier[axes] [ identifier[i] ] keyword[in] literal[string] :
identifier[positions] *= identifier[series] . identifier[shape] [ identifier[i] ]
keyword[if] identifier[positions] > literal[int] :
identifier[ntimes] = literal[int]
keyword[for] identifier[i] keyword[in] literal[int] , literal[int] :
keyword[if] identifier[axes] [ identifier[i] ]== literal[string] :
identifier[ntimes] = identifier[series] . identifier[shape] [ identifier[i] ]
keyword[break]
keyword[if] identifier[ntimes] :
identifier[div] , identifier[mod] = identifier[divmod] ( identifier[npages] , literal[int] * identifier[positions] * identifier[ntimes] )
keyword[assert] identifier[mod] == literal[int]
identifier[shape] =( identifier[positions] , identifier[ntimes] , identifier[div] , literal[int] )
identifier[indices] = identifier[numpy] . identifier[arange] ( identifier[product] ( identifier[shape] )). identifier[reshape] ( identifier[shape] )
identifier[indices] = identifier[numpy] . identifier[moveaxis] ( identifier[indices] , literal[int] , literal[int] )
keyword[else] :
identifier[indices] = identifier[numpy] . identifier[arange] ( identifier[npages] ). identifier[reshape] (- literal[int] , literal[int] )
keyword[if] identifier[pages] [ literal[int] ]. identifier[_offsetscounts] [ literal[int] ][ literal[int] ]> identifier[pages] [ literal[int] ]. identifier[_offsetscounts] [ literal[int] ][ literal[int] ]:
identifier[indices] = identifier[indices] [...,::- literal[int] ]
identifier[wrap] = literal[int]
identifier[previousoffset] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[indices] . identifier[flat] :
identifier[page] = identifier[pages] [ identifier[int] ( identifier[i] )]
identifier[dataoffsets] =[]
keyword[for] identifier[currentoffset] keyword[in] identifier[page] . identifier[_offsetscounts] [ literal[int] ]:
keyword[if] identifier[currentoffset] < identifier[previousoffset] :
identifier[wrap] += literal[int] ** literal[int]
identifier[dataoffsets] . identifier[append] ( identifier[currentoffset] + identifier[wrap] )
identifier[previousoffset] = identifier[currentoffset]
identifier[page] . identifier[_offsetscounts] = identifier[dataoffsets] , identifier[page] . identifier[_offsetscounts] [ literal[int] ] | def _lsm_fix_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB.
Each series and position require separate unwrapping (undocumented).
"""
if self.filehandle.size < 2 ** 32:
return # depends on [control=['if'], data=[]]
pages = self.pages
npages = len(pages)
series = self.series[0]
axes = series.axes
# find positions
positions = 1
for i in (0, 1):
if series.axes[i] in 'PM':
positions *= series.shape[i] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# make time axis first
if positions > 1:
ntimes = 0
for i in (1, 2):
if axes[i] == 'T':
ntimes = series.shape[i]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if ntimes:
(div, mod) = divmod(npages, 2 * positions * ntimes)
assert mod == 0
shape = (positions, ntimes, div, 2)
indices = numpy.arange(product(shape)).reshape(shape)
indices = numpy.moveaxis(indices, 1, 0) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['positions']]
else:
indices = numpy.arange(npages).reshape(-1, 2)
# images of reduced page might be stored first
if pages[0]._offsetscounts[0][0] > pages[1]._offsetscounts[0][0]:
indices = indices[..., ::-1] # depends on [control=['if'], data=[]]
# unwrap offsets
wrap = 0
previousoffset = 0
for i in indices.flat:
page = pages[int(i)]
dataoffsets = []
for currentoffset in page._offsetscounts[0]:
if currentoffset < previousoffset:
wrap += 2 ** 32 # depends on [control=['if'], data=[]]
dataoffsets.append(currentoffset + wrap)
previousoffset = currentoffset # depends on [control=['for'], data=['currentoffset']]
page._offsetscounts = (dataoffsets, page._offsetscounts[1]) # depends on [control=['for'], data=['i']] |
def collect_global_best(self, best_chromosome, best_fitness_function):
"""!
@brief Stores the best chromosome and its fitness function's value.
@param[in] best_chromosome (list): The best chromosome that were observed.
@param[in] best_fitness_function (float): Fitness function value of the best chromosome.
"""
if not self._need_global_best:
return
self._global_best_result['chromosome'].append(best_chromosome)
self._global_best_result['fitness_function'].append(best_fitness_function) | def function[collect_global_best, parameter[self, best_chromosome, best_fitness_function]]:
constant[!
@brief Stores the best chromosome and its fitness function's value.
@param[in] best_chromosome (list): The best chromosome that were observed.
@param[in] best_fitness_function (float): Fitness function value of the best chromosome.
]
if <ast.UnaryOp object at 0x7da1b01e12d0> begin[:]
return[None]
call[call[name[self]._global_best_result][constant[chromosome]].append, parameter[name[best_chromosome]]]
call[call[name[self]._global_best_result][constant[fitness_function]].append, parameter[name[best_fitness_function]]] | keyword[def] identifier[collect_global_best] ( identifier[self] , identifier[best_chromosome] , identifier[best_fitness_function] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_need_global_best] :
keyword[return]
identifier[self] . identifier[_global_best_result] [ literal[string] ]. identifier[append] ( identifier[best_chromosome] )
identifier[self] . identifier[_global_best_result] [ literal[string] ]. identifier[append] ( identifier[best_fitness_function] ) | def collect_global_best(self, best_chromosome, best_fitness_function):
"""!
@brief Stores the best chromosome and its fitness function's value.
@param[in] best_chromosome (list): The best chromosome that were observed.
@param[in] best_fitness_function (float): Fitness function value of the best chromosome.
"""
if not self._need_global_best:
return # depends on [control=['if'], data=[]]
self._global_best_result['chromosome'].append(best_chromosome)
self._global_best_result['fitness_function'].append(best_fitness_function) |
def from_json(data):
"""Decode event encoded as JSON by processor"""
parsed_data = json.loads(data)
trigger = TriggerInfo(
parsed_data['trigger']['class'],
parsed_data['trigger']['kind'],
)
# extract content type, needed to decode body
content_type = parsed_data['content_type']
return Event(body=Event.decode_body(parsed_data['body'], content_type),
content_type=content_type,
trigger=trigger,
fields=parsed_data.get('fields'),
headers=parsed_data.get('headers'),
_id=parsed_data['id'],
method=parsed_data['method'],
path=parsed_data['path'],
size=parsed_data['size'],
timestamp=datetime.datetime.utcfromtimestamp(parsed_data['timestamp']),
url=parsed_data['url'],
_type=parsed_data['type'],
type_version=parsed_data['type_version'],
version=parsed_data['version']) | def function[from_json, parameter[data]]:
constant[Decode event encoded as JSON by processor]
variable[parsed_data] assign[=] call[name[json].loads, parameter[name[data]]]
variable[trigger] assign[=] call[name[TriggerInfo], parameter[call[call[name[parsed_data]][constant[trigger]]][constant[class]], call[call[name[parsed_data]][constant[trigger]]][constant[kind]]]]
variable[content_type] assign[=] call[name[parsed_data]][constant[content_type]]
return[call[name[Event], parameter[]]] | keyword[def] identifier[from_json] ( identifier[data] ):
literal[string]
identifier[parsed_data] = identifier[json] . identifier[loads] ( identifier[data] )
identifier[trigger] = identifier[TriggerInfo] (
identifier[parsed_data] [ literal[string] ][ literal[string] ],
identifier[parsed_data] [ literal[string] ][ literal[string] ],
)
identifier[content_type] = identifier[parsed_data] [ literal[string] ]
keyword[return] identifier[Event] ( identifier[body] = identifier[Event] . identifier[decode_body] ( identifier[parsed_data] [ literal[string] ], identifier[content_type] ),
identifier[content_type] = identifier[content_type] ,
identifier[trigger] = identifier[trigger] ,
identifier[fields] = identifier[parsed_data] . identifier[get] ( literal[string] ),
identifier[headers] = identifier[parsed_data] . identifier[get] ( literal[string] ),
identifier[_id] = identifier[parsed_data] [ literal[string] ],
identifier[method] = identifier[parsed_data] [ literal[string] ],
identifier[path] = identifier[parsed_data] [ literal[string] ],
identifier[size] = identifier[parsed_data] [ literal[string] ],
identifier[timestamp] = identifier[datetime] . identifier[datetime] . identifier[utcfromtimestamp] ( identifier[parsed_data] [ literal[string] ]),
identifier[url] = identifier[parsed_data] [ literal[string] ],
identifier[_type] = identifier[parsed_data] [ literal[string] ],
identifier[type_version] = identifier[parsed_data] [ literal[string] ],
identifier[version] = identifier[parsed_data] [ literal[string] ]) | def from_json(data):
"""Decode event encoded as JSON by processor"""
parsed_data = json.loads(data)
trigger = TriggerInfo(parsed_data['trigger']['class'], parsed_data['trigger']['kind'])
# extract content type, needed to decode body
content_type = parsed_data['content_type']
return Event(body=Event.decode_body(parsed_data['body'], content_type), content_type=content_type, trigger=trigger, fields=parsed_data.get('fields'), headers=parsed_data.get('headers'), _id=parsed_data['id'], method=parsed_data['method'], path=parsed_data['path'], size=parsed_data['size'], timestamp=datetime.datetime.utcfromtimestamp(parsed_data['timestamp']), url=parsed_data['url'], _type=parsed_data['type'], type_version=parsed_data['type_version'], version=parsed_data['version']) |
def undo(config='root', files=None, num_pre=None, num_post=None):
'''
Undo all file changes that happened between num_pre and num_post, leaving
the files into the state of num_pre.
.. warning::
If one of the files has changes after num_post, they will be overwritten
The snapshots are used to determine the file list, but the current
version of the files will be overwritten by the versions in num_pre.
You to undo changes between num_pre and the current version of the
files use num_post=0.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo
'''
pre, post = _get_num_interval(config, num_pre, num_post)
changes = status(config, pre, post)
changed = set(changes.keys())
requested = set(files or changed)
if not requested.issubset(changed):
raise CommandExecutionError(
'Given file list contains files that are not present'
'in the changed filelist: {0}'.format(changed - requested))
cmdret = __salt__['cmd.run']('snapper -c {0} undochange {1}..{2} {3}'.format(
config, pre, post, ' '.join(requested)))
try:
components = cmdret.split(' ')
ret = {}
for comp in components:
key, val = comp.split(':')
ret[key] = val
return ret
except ValueError as exc:
raise CommandExecutionError(
'Error while processing Snapper response: {0}'.format(cmdret)) | def function[undo, parameter[config, files, num_pre, num_post]]:
constant[
Undo all file changes that happened between num_pre and num_post, leaving
the files into the state of num_pre.
.. warning::
If one of the files has changes after num_post, they will be overwritten
The snapshots are used to determine the file list, but the current
version of the files will be overwritten by the versions in num_pre.
You to undo changes between num_pre and the current version of the
files use num_post=0.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo
]
<ast.Tuple object at 0x7da2045657e0> assign[=] call[name[_get_num_interval], parameter[name[config], name[num_pre], name[num_post]]]
variable[changes] assign[=] call[name[status], parameter[name[config], name[pre], name[post]]]
variable[changed] assign[=] call[name[set], parameter[call[name[changes].keys, parameter[]]]]
variable[requested] assign[=] call[name[set], parameter[<ast.BoolOp object at 0x7da204564070>]]
if <ast.UnaryOp object at 0x7da2045658d0> begin[:]
<ast.Raise object at 0x7da204567910>
variable[cmdret] assign[=] call[call[name[__salt__]][constant[cmd.run]], parameter[call[constant[snapper -c {0} undochange {1}..{2} {3}].format, parameter[name[config], name[pre], name[post], call[constant[ ].join, parameter[name[requested]]]]]]]
<ast.Try object at 0x7da2044c2e30> | keyword[def] identifier[undo] ( identifier[config] = literal[string] , identifier[files] = keyword[None] , identifier[num_pre] = keyword[None] , identifier[num_post] = keyword[None] ):
literal[string]
identifier[pre] , identifier[post] = identifier[_get_num_interval] ( identifier[config] , identifier[num_pre] , identifier[num_post] )
identifier[changes] = identifier[status] ( identifier[config] , identifier[pre] , identifier[post] )
identifier[changed] = identifier[set] ( identifier[changes] . identifier[keys] ())
identifier[requested] = identifier[set] ( identifier[files] keyword[or] identifier[changed] )
keyword[if] keyword[not] identifier[requested] . identifier[issubset] ( identifier[changed] ):
keyword[raise] identifier[CommandExecutionError] (
literal[string]
literal[string] . identifier[format] ( identifier[changed] - identifier[requested] ))
identifier[cmdret] = identifier[__salt__] [ literal[string] ]( literal[string] . identifier[format] (
identifier[config] , identifier[pre] , identifier[post] , literal[string] . identifier[join] ( identifier[requested] )))
keyword[try] :
identifier[components] = identifier[cmdret] . identifier[split] ( literal[string] )
identifier[ret] ={}
keyword[for] identifier[comp] keyword[in] identifier[components] :
identifier[key] , identifier[val] = identifier[comp] . identifier[split] ( literal[string] )
identifier[ret] [ identifier[key] ]= identifier[val]
keyword[return] identifier[ret]
keyword[except] identifier[ValueError] keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] (
literal[string] . identifier[format] ( identifier[cmdret] )) | def undo(config='root', files=None, num_pre=None, num_post=None):
"""
Undo all file changes that happened between num_pre and num_post, leaving
the files into the state of num_pre.
.. warning::
If one of the files has changes after num_post, they will be overwritten
The snapshots are used to determine the file list, but the current
version of the files will be overwritten by the versions in num_pre.
You to undo changes between num_pre and the current version of the
files use num_post=0.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo
"""
(pre, post) = _get_num_interval(config, num_pre, num_post)
changes = status(config, pre, post)
changed = set(changes.keys())
requested = set(files or changed)
if not requested.issubset(changed):
raise CommandExecutionError('Given file list contains files that are not presentin the changed filelist: {0}'.format(changed - requested)) # depends on [control=['if'], data=[]]
cmdret = __salt__['cmd.run']('snapper -c {0} undochange {1}..{2} {3}'.format(config, pre, post, ' '.join(requested)))
try:
components = cmdret.split(' ')
ret = {}
for comp in components:
(key, val) = comp.split(':')
ret[key] = val # depends on [control=['for'], data=['comp']]
return ret # depends on [control=['try'], data=[]]
except ValueError as exc:
raise CommandExecutionError('Error while processing Snapper response: {0}'.format(cmdret)) # depends on [control=['except'], data=[]] |
def _check_exists(database: Database, table: LdapObjectClass, key: str, value: str):
""" Check if a given LDAP object exists. """
try:
get_one(table, Q(**{key: value}), database=database)
return True
except ObjectDoesNotExist:
return False | def function[_check_exists, parameter[database, table, key, value]]:
constant[ Check if a given LDAP object exists. ]
<ast.Try object at 0x7da1b1083430> | keyword[def] identifier[_check_exists] ( identifier[database] : identifier[Database] , identifier[table] : identifier[LdapObjectClass] , identifier[key] : identifier[str] , identifier[value] : identifier[str] ):
literal[string]
keyword[try] :
identifier[get_one] ( identifier[table] , identifier[Q] (**{ identifier[key] : identifier[value] }), identifier[database] = identifier[database] )
keyword[return] keyword[True]
keyword[except] identifier[ObjectDoesNotExist] :
keyword[return] keyword[False] | def _check_exists(database: Database, table: LdapObjectClass, key: str, value: str):
""" Check if a given LDAP object exists. """
try:
get_one(table, Q(**{key: value}), database=database)
return True # depends on [control=['try'], data=[]]
except ObjectDoesNotExist:
return False # depends on [control=['except'], data=[]] |
def update_placeholder_formats(self, format_string, placeholder_formats):
"""
Update a format string adding formats if they are not already present.
"""
# Tokenize the format string and process them
output = []
for token in self.tokens(format_string):
if (
token.group("placeholder")
and (not token.group("format"))
and token.group("key") in placeholder_formats
):
output.append(
"{%s%s}"
% (token.group("key"), placeholder_formats[token.group("key")])
)
continue
value = token.group(0)
output.append(value)
return u"".join(output) | def function[update_placeholder_formats, parameter[self, format_string, placeholder_formats]]:
constant[
Update a format string adding formats if they are not already present.
]
variable[output] assign[=] list[[]]
for taget[name[token]] in starred[call[name[self].tokens, parameter[name[format_string]]]] begin[:]
if <ast.BoolOp object at 0x7da1b1d8ccd0> begin[:]
call[name[output].append, parameter[binary_operation[constant[{%s%s}] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b1d8c940>, <ast.Subscript object at 0x7da1b1d8cd30>]]]]]
continue
variable[value] assign[=] call[name[token].group, parameter[constant[0]]]
call[name[output].append, parameter[name[value]]]
return[call[constant[].join, parameter[name[output]]]] | keyword[def] identifier[update_placeholder_formats] ( identifier[self] , identifier[format_string] , identifier[placeholder_formats] ):
literal[string]
identifier[output] =[]
keyword[for] identifier[token] keyword[in] identifier[self] . identifier[tokens] ( identifier[format_string] ):
keyword[if] (
identifier[token] . identifier[group] ( literal[string] )
keyword[and] ( keyword[not] identifier[token] . identifier[group] ( literal[string] ))
keyword[and] identifier[token] . identifier[group] ( literal[string] ) keyword[in] identifier[placeholder_formats]
):
identifier[output] . identifier[append] (
literal[string]
%( identifier[token] . identifier[group] ( literal[string] ), identifier[placeholder_formats] [ identifier[token] . identifier[group] ( literal[string] )])
)
keyword[continue]
identifier[value] = identifier[token] . identifier[group] ( literal[int] )
identifier[output] . identifier[append] ( identifier[value] )
keyword[return] literal[string] . identifier[join] ( identifier[output] ) | def update_placeholder_formats(self, format_string, placeholder_formats):
"""
Update a format string adding formats if they are not already present.
"""
# Tokenize the format string and process them
output = []
for token in self.tokens(format_string):
if token.group('placeholder') and (not token.group('format')) and (token.group('key') in placeholder_formats):
output.append('{%s%s}' % (token.group('key'), placeholder_formats[token.group('key')]))
continue # depends on [control=['if'], data=[]]
value = token.group(0)
output.append(value) # depends on [control=['for'], data=['token']]
return u''.join(output) |
async def on_raw_cap_list(self, params):
""" Update active capabilities. """
self._capabilities = { capab: False for capab in self._capabilities }
for capab in params[0].split():
capab, value = self._capability_normalize(capab)
self._capabilities[capab] = value if value else True | <ast.AsyncFunctionDef object at 0x7da207f00a30> | keyword[async] keyword[def] identifier[on_raw_cap_list] ( identifier[self] , identifier[params] ):
literal[string]
identifier[self] . identifier[_capabilities] ={ identifier[capab] : keyword[False] keyword[for] identifier[capab] keyword[in] identifier[self] . identifier[_capabilities] }
keyword[for] identifier[capab] keyword[in] identifier[params] [ literal[int] ]. identifier[split] ():
identifier[capab] , identifier[value] = identifier[self] . identifier[_capability_normalize] ( identifier[capab] )
identifier[self] . identifier[_capabilities] [ identifier[capab] ]= identifier[value] keyword[if] identifier[value] keyword[else] keyword[True] | async def on_raw_cap_list(self, params):
""" Update active capabilities. """
self._capabilities = {capab: False for capab in self._capabilities}
for capab in params[0].split():
(capab, value) = self._capability_normalize(capab)
self._capabilities[capab] = value if value else True # depends on [control=['for'], data=['capab']] |
def confirm_authorization_request(self):
"""When consumer confirm the authorization."""
server = self.server
scope = request.values.get('scope') or ''
scopes = scope.split()
credentials = dict(
client_id=request.values.get('client_id'),
redirect_uri=request.values.get('redirect_uri', None),
response_type=request.values.get('response_type', None),
state=request.values.get('state', None)
)
log.debug('Fetched credentials from request %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect_uri %s.', redirect_uri)
uri, http_method, body, headers = extract_params()
try:
ret = server.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful.')
return create_response(*ret)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e, exc_info=True)
return self._on_exception(e, e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
log.debug('OAuth2Error: %r', e, exc_info=True)
# on auth error, we should preserve state if it's present according to RFC 6749
state = request.values.get('state')
if state and not e.state:
e.state = state # set e.state so e.in_uri() can add the state query parameter to redirect uri
return self._on_exception(e, e.in_uri(redirect_uri or self.error_uri))
except Exception as e:
log.exception(e)
return self._on_exception(e, add_params_to_uri(
self.error_uri, {'error': str(e)}
)) | def function[confirm_authorization_request, parameter[self]]:
constant[When consumer confirm the authorization.]
variable[server] assign[=] name[self].server
variable[scope] assign[=] <ast.BoolOp object at 0x7da1b03149d0>
variable[scopes] assign[=] call[name[scope].split, parameter[]]
variable[credentials] assign[=] call[name[dict], parameter[]]
call[name[log].debug, parameter[constant[Fetched credentials from request %r.], name[credentials]]]
variable[redirect_uri] assign[=] call[name[credentials].get, parameter[constant[redirect_uri]]]
call[name[log].debug, parameter[constant[Found redirect_uri %s.], name[redirect_uri]]]
<ast.Tuple object at 0x7da1b03146a0> assign[=] call[name[extract_params], parameter[]]
<ast.Try object at 0x7da1b0316890> | keyword[def] identifier[confirm_authorization_request] ( identifier[self] ):
literal[string]
identifier[server] = identifier[self] . identifier[server]
identifier[scope] = identifier[request] . identifier[values] . identifier[get] ( literal[string] ) keyword[or] literal[string]
identifier[scopes] = identifier[scope] . identifier[split] ()
identifier[credentials] = identifier[dict] (
identifier[client_id] = identifier[request] . identifier[values] . identifier[get] ( literal[string] ),
identifier[redirect_uri] = identifier[request] . identifier[values] . identifier[get] ( literal[string] , keyword[None] ),
identifier[response_type] = identifier[request] . identifier[values] . identifier[get] ( literal[string] , keyword[None] ),
identifier[state] = identifier[request] . identifier[values] . identifier[get] ( literal[string] , keyword[None] )
)
identifier[log] . identifier[debug] ( literal[string] , identifier[credentials] )
identifier[redirect_uri] = identifier[credentials] . identifier[get] ( literal[string] )
identifier[log] . identifier[debug] ( literal[string] , identifier[redirect_uri] )
identifier[uri] , identifier[http_method] , identifier[body] , identifier[headers] = identifier[extract_params] ()
keyword[try] :
identifier[ret] = identifier[server] . identifier[create_authorization_response] (
identifier[uri] , identifier[http_method] , identifier[body] , identifier[headers] , identifier[scopes] , identifier[credentials] )
identifier[log] . identifier[debug] ( literal[string] )
keyword[return] identifier[create_response] (* identifier[ret] )
keyword[except] identifier[oauth2] . identifier[FatalClientError] keyword[as] identifier[e] :
identifier[log] . identifier[debug] ( literal[string] , identifier[e] , identifier[exc_info] = keyword[True] )
keyword[return] identifier[self] . identifier[_on_exception] ( identifier[e] , identifier[e] . identifier[in_uri] ( identifier[self] . identifier[error_uri] ))
keyword[except] identifier[oauth2] . identifier[OAuth2Error] keyword[as] identifier[e] :
identifier[log] . identifier[debug] ( literal[string] , identifier[e] , identifier[exc_info] = keyword[True] )
identifier[state] = identifier[request] . identifier[values] . identifier[get] ( literal[string] )
keyword[if] identifier[state] keyword[and] keyword[not] identifier[e] . identifier[state] :
identifier[e] . identifier[state] = identifier[state]
keyword[return] identifier[self] . identifier[_on_exception] ( identifier[e] , identifier[e] . identifier[in_uri] ( identifier[redirect_uri] keyword[or] identifier[self] . identifier[error_uri] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[log] . identifier[exception] ( identifier[e] )
keyword[return] identifier[self] . identifier[_on_exception] ( identifier[e] , identifier[add_params_to_uri] (
identifier[self] . identifier[error_uri] ,{ literal[string] : identifier[str] ( identifier[e] )}
)) | def confirm_authorization_request(self):
"""When consumer confirm the authorization."""
server = self.server
scope = request.values.get('scope') or ''
scopes = scope.split()
credentials = dict(client_id=request.values.get('client_id'), redirect_uri=request.values.get('redirect_uri', None), response_type=request.values.get('response_type', None), state=request.values.get('state', None))
log.debug('Fetched credentials from request %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect_uri %s.', redirect_uri)
(uri, http_method, body, headers) = extract_params()
try:
ret = server.create_authorization_response(uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful.')
return create_response(*ret) # depends on [control=['try'], data=[]]
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e, exc_info=True)
return self._on_exception(e, e.in_uri(self.error_uri)) # depends on [control=['except'], data=['e']]
except oauth2.OAuth2Error as e:
log.debug('OAuth2Error: %r', e, exc_info=True)
# on auth error, we should preserve state if it's present according to RFC 6749
state = request.values.get('state')
if state and (not e.state):
e.state = state # set e.state so e.in_uri() can add the state query parameter to redirect uri # depends on [control=['if'], data=[]]
return self._on_exception(e, e.in_uri(redirect_uri or self.error_uri)) # depends on [control=['except'], data=['e']]
except Exception as e:
log.exception(e)
return self._on_exception(e, add_params_to_uri(self.error_uri, {'error': str(e)})) # depends on [control=['except'], data=['e']] |
def b_cubed(clusters, mention_to_gold):
"""
Averaged per-mention precision and recall.
<https://pdfs.semanticscholar.org/cfe3/c24695f1c14b78a5b8e95bcbd1c666140fd1.pdf>
"""
numerator, denominator = 0, 0
for cluster in clusters:
if len(cluster) == 1:
continue
gold_counts = Counter()
correct = 0
for mention in cluster:
if mention in mention_to_gold:
gold_counts[tuple(mention_to_gold[mention])] += 1
for cluster2, count in gold_counts.items():
if len(cluster2) != 1:
correct += count * count
numerator += correct / float(len(cluster))
denominator += len(cluster)
return numerator, denominator | def function[b_cubed, parameter[clusters, mention_to_gold]]:
constant[
Averaged per-mention precision and recall.
<https://pdfs.semanticscholar.org/cfe3/c24695f1c14b78a5b8e95bcbd1c666140fd1.pdf>
]
<ast.Tuple object at 0x7da20c7c9960> assign[=] tuple[[<ast.Constant object at 0x7da20c7c84f0>, <ast.Constant object at 0x7da20c7ca6b0>]]
for taget[name[cluster]] in starred[name[clusters]] begin[:]
if compare[call[name[len], parameter[name[cluster]]] equal[==] constant[1]] begin[:]
continue
variable[gold_counts] assign[=] call[name[Counter], parameter[]]
variable[correct] assign[=] constant[0]
for taget[name[mention]] in starred[name[cluster]] begin[:]
if compare[name[mention] in name[mention_to_gold]] begin[:]
<ast.AugAssign object at 0x7da20c7c8e50>
for taget[tuple[[<ast.Name object at 0x7da20c7c8f70>, <ast.Name object at 0x7da20c7c8fd0>]]] in starred[call[name[gold_counts].items, parameter[]]] begin[:]
if compare[call[name[len], parameter[name[cluster2]]] not_equal[!=] constant[1]] begin[:]
<ast.AugAssign object at 0x7da20c7c90c0>
<ast.AugAssign object at 0x7da20c7ca110>
<ast.AugAssign object at 0x7da20c7c8970>
return[tuple[[<ast.Name object at 0x7da20c7cb370>, <ast.Name object at 0x7da20c7c8400>]]] | keyword[def] identifier[b_cubed] ( identifier[clusters] , identifier[mention_to_gold] ):
literal[string]
identifier[numerator] , identifier[denominator] = literal[int] , literal[int]
keyword[for] identifier[cluster] keyword[in] identifier[clusters] :
keyword[if] identifier[len] ( identifier[cluster] )== literal[int] :
keyword[continue]
identifier[gold_counts] = identifier[Counter] ()
identifier[correct] = literal[int]
keyword[for] identifier[mention] keyword[in] identifier[cluster] :
keyword[if] identifier[mention] keyword[in] identifier[mention_to_gold] :
identifier[gold_counts] [ identifier[tuple] ( identifier[mention_to_gold] [ identifier[mention] ])]+= literal[int]
keyword[for] identifier[cluster2] , identifier[count] keyword[in] identifier[gold_counts] . identifier[items] ():
keyword[if] identifier[len] ( identifier[cluster2] )!= literal[int] :
identifier[correct] += identifier[count] * identifier[count]
identifier[numerator] += identifier[correct] / identifier[float] ( identifier[len] ( identifier[cluster] ))
identifier[denominator] += identifier[len] ( identifier[cluster] )
keyword[return] identifier[numerator] , identifier[denominator] | def b_cubed(clusters, mention_to_gold):
"""
Averaged per-mention precision and recall.
<https://pdfs.semanticscholar.org/cfe3/c24695f1c14b78a5b8e95bcbd1c666140fd1.pdf>
"""
(numerator, denominator) = (0, 0)
for cluster in clusters:
if len(cluster) == 1:
continue # depends on [control=['if'], data=[]]
gold_counts = Counter()
correct = 0
for mention in cluster:
if mention in mention_to_gold:
gold_counts[tuple(mention_to_gold[mention])] += 1 # depends on [control=['if'], data=['mention', 'mention_to_gold']] # depends on [control=['for'], data=['mention']]
for (cluster2, count) in gold_counts.items():
if len(cluster2) != 1:
correct += count * count # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
numerator += correct / float(len(cluster))
denominator += len(cluster) # depends on [control=['for'], data=['cluster']]
return (numerator, denominator) |
def _get_block_publisher(self, state_hash):
"""Returns the block publisher based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
state_hash (str): The current state root hash for reading settings.
Raises:
InvalidGenesisStateError: if any errors occur getting the
BlockPublisher.
"""
state_view = self._state_view_factory.create_view(state_hash)
try:
class BatchPublisher:
def send(self, transactions):
# Consensus implementations are expected to have handling
# in place for genesis operation. This should includes
# adding any authorization and registrations required
# for the genesis node to the Genesis Batch list and
# detecting validation of the Genesis Block and handle it
# correctly. Batch publication is not allowed during
# genesis operation since there is no network to validate
# the batch yet.
raise InvalidGenesisConsensusError(
'Consensus cannot send transactions during genesis.')
consensus = ConsensusFactory.get_configured_consensus_module(
NULL_BLOCK_IDENTIFIER,
state_view)
return consensus.BlockPublisher(
BlockCache(self._block_store),
state_view_factory=self._state_view_factory,
batch_publisher=BatchPublisher(),
data_dir=self._data_dir,
config_dir=self._config_dir,
validator_id=self._identity_signer.get_public_key().as_hex())
except UnknownConsensusModuleError as e:
raise InvalidGenesisStateError(e) | def function[_get_block_publisher, parameter[self, state_hash]]:
constant[Returns the block publisher based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
state_hash (str): The current state root hash for reading settings.
Raises:
InvalidGenesisStateError: if any errors occur getting the
BlockPublisher.
]
variable[state_view] assign[=] call[name[self]._state_view_factory.create_view, parameter[name[state_hash]]]
<ast.Try object at 0x7da20c6c7d90> | keyword[def] identifier[_get_block_publisher] ( identifier[self] , identifier[state_hash] ):
literal[string]
identifier[state_view] = identifier[self] . identifier[_state_view_factory] . identifier[create_view] ( identifier[state_hash] )
keyword[try] :
keyword[class] identifier[BatchPublisher] :
keyword[def] identifier[send] ( identifier[self] , identifier[transactions] ):
keyword[raise] identifier[InvalidGenesisConsensusError] (
literal[string] )
identifier[consensus] = identifier[ConsensusFactory] . identifier[get_configured_consensus_module] (
identifier[NULL_BLOCK_IDENTIFIER] ,
identifier[state_view] )
keyword[return] identifier[consensus] . identifier[BlockPublisher] (
identifier[BlockCache] ( identifier[self] . identifier[_block_store] ),
identifier[state_view_factory] = identifier[self] . identifier[_state_view_factory] ,
identifier[batch_publisher] = identifier[BatchPublisher] (),
identifier[data_dir] = identifier[self] . identifier[_data_dir] ,
identifier[config_dir] = identifier[self] . identifier[_config_dir] ,
identifier[validator_id] = identifier[self] . identifier[_identity_signer] . identifier[get_public_key] (). identifier[as_hex] ())
keyword[except] identifier[UnknownConsensusModuleError] keyword[as] identifier[e] :
keyword[raise] identifier[InvalidGenesisStateError] ( identifier[e] ) | def _get_block_publisher(self, state_hash):
"""Returns the block publisher based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
state_hash (str): The current state root hash for reading settings.
Raises:
InvalidGenesisStateError: if any errors occur getting the
BlockPublisher.
"""
state_view = self._state_view_factory.create_view(state_hash)
try:
class BatchPublisher:
def send(self, transactions):
# Consensus implementations are expected to have handling
# in place for genesis operation. This should includes
# adding any authorization and registrations required
# for the genesis node to the Genesis Batch list and
# detecting validation of the Genesis Block and handle it
# correctly. Batch publication is not allowed during
# genesis operation since there is no network to validate
# the batch yet.
raise InvalidGenesisConsensusError('Consensus cannot send transactions during genesis.')
consensus = ConsensusFactory.get_configured_consensus_module(NULL_BLOCK_IDENTIFIER, state_view)
return consensus.BlockPublisher(BlockCache(self._block_store), state_view_factory=self._state_view_factory, batch_publisher=BatchPublisher(), data_dir=self._data_dir, config_dir=self._config_dir, validator_id=self._identity_signer.get_public_key().as_hex()) # depends on [control=['try'], data=[]]
except UnknownConsensusModuleError as e:
raise InvalidGenesisStateError(e) # depends on [control=['except'], data=['e']] |
def _build_authorization_request_url(
self,
response_type,
state=None
):
"""Form URL to request an auth code or access token.
Parameters
response_type (str)
Only 'code' (Authorization Code Grant) supported at this time
state (str)
Optional CSRF State token to send to server.
Returns
(str)
The fully constructed authorization request URL.
Raises
LyftIllegalState (ApiError)
Raised if response_type parameter is invalid.
"""
if response_type not in auth.VALID_RESPONSE_TYPES:
message = '{} is not a valid response type.'
raise LyftIllegalState(message.format(response_type))
args = OrderedDict([
('scope', ' '.join(self.scopes)),
('state', state),
('response_type', response_type),
('client_id', self.client_id),
])
return build_url(auth.SERVER_HOST, auth.AUTHORIZE_PATH, args) | def function[_build_authorization_request_url, parameter[self, response_type, state]]:
constant[Form URL to request an auth code or access token.
Parameters
response_type (str)
Only 'code' (Authorization Code Grant) supported at this time
state (str)
Optional CSRF State token to send to server.
Returns
(str)
The fully constructed authorization request URL.
Raises
LyftIllegalState (ApiError)
Raised if response_type parameter is invalid.
]
if compare[name[response_type] <ast.NotIn object at 0x7da2590d7190> name[auth].VALID_RESPONSE_TYPES] begin[:]
variable[message] assign[=] constant[{} is not a valid response type.]
<ast.Raise object at 0x7da207f9b8b0>
variable[args] assign[=] call[name[OrderedDict], parameter[list[[<ast.Tuple object at 0x7da207f9af80>, <ast.Tuple object at 0x7da207f99390>, <ast.Tuple object at 0x7da207f9a800>, <ast.Tuple object at 0x7da207f9b250>]]]]
return[call[name[build_url], parameter[name[auth].SERVER_HOST, name[auth].AUTHORIZE_PATH, name[args]]]] | keyword[def] identifier[_build_authorization_request_url] (
identifier[self] ,
identifier[response_type] ,
identifier[state] = keyword[None]
):
literal[string]
keyword[if] identifier[response_type] keyword[not] keyword[in] identifier[auth] . identifier[VALID_RESPONSE_TYPES] :
identifier[message] = literal[string]
keyword[raise] identifier[LyftIllegalState] ( identifier[message] . identifier[format] ( identifier[response_type] ))
identifier[args] = identifier[OrderedDict] ([
( literal[string] , literal[string] . identifier[join] ( identifier[self] . identifier[scopes] )),
( literal[string] , identifier[state] ),
( literal[string] , identifier[response_type] ),
( literal[string] , identifier[self] . identifier[client_id] ),
])
keyword[return] identifier[build_url] ( identifier[auth] . identifier[SERVER_HOST] , identifier[auth] . identifier[AUTHORIZE_PATH] , identifier[args] ) | def _build_authorization_request_url(self, response_type, state=None):
"""Form URL to request an auth code or access token.
Parameters
response_type (str)
Only 'code' (Authorization Code Grant) supported at this time
state (str)
Optional CSRF State token to send to server.
Returns
(str)
The fully constructed authorization request URL.
Raises
LyftIllegalState (ApiError)
Raised if response_type parameter is invalid.
"""
if response_type not in auth.VALID_RESPONSE_TYPES:
message = '{} is not a valid response type.'
raise LyftIllegalState(message.format(response_type)) # depends on [control=['if'], data=['response_type']]
args = OrderedDict([('scope', ' '.join(self.scopes)), ('state', state), ('response_type', response_type), ('client_id', self.client_id)])
return build_url(auth.SERVER_HOST, auth.AUTHORIZE_PATH, args) |
def total_num_lines(self):
"""
Return the total number of lines in the diff for
which we have coverage info.
"""
return sum([len(summary.measured_lines) for summary
in self._diff_violations().values()]) | def function[total_num_lines, parameter[self]]:
constant[
Return the total number of lines in the diff for
which we have coverage info.
]
return[call[name[sum], parameter[<ast.ListComp object at 0x7da1b26aed70>]]] | keyword[def] identifier[total_num_lines] ( identifier[self] ):
literal[string]
keyword[return] identifier[sum] ([ identifier[len] ( identifier[summary] . identifier[measured_lines] ) keyword[for] identifier[summary]
keyword[in] identifier[self] . identifier[_diff_violations] (). identifier[values] ()]) | def total_num_lines(self):
"""
Return the total number of lines in the diff for
which we have coverage info.
"""
return sum([len(summary.measured_lines) for summary in self._diff_violations().values()]) |
def fix_argument_only(self):
'''
fix_argument_only() -> Either or Unit(Argument)
`<arg> | ARG | <arg3>` ->
`Required(Argument('<arg>', 'ARG', '<arg3>'))`
`[<arg>] | [ARG] | [<arg3>]` ->
`Optional(Argument('<arg>', 'ARG', '<arg3>'))`
`(<arg>) | [ARG]` -> not change, return self
`-a | --better` -> not change
'''
# for idx, branch in enumerate(self):
# if isinstance(branch[0], Either):
# self[idx] = branch.fix()
first_type = type(self[0])
if first_type not in (Required, Optional):
return self
for branch in self:
if not (len(branch) == 1 and
isinstance(branch, first_type) and
isinstance(branch[0], Argument)):
logger.debug('fix %r not change', self)
return self
else:
first = self[0][0]
for each in self:
first.names.update(each[0].names)
result = first_type(first)
logger.debug('fix %r -> %r', self, result)
return result | def function[fix_argument_only, parameter[self]]:
constant[
fix_argument_only() -> Either or Unit(Argument)
`<arg> | ARG | <arg3>` ->
`Required(Argument('<arg>', 'ARG', '<arg3>'))`
`[<arg>] | [ARG] | [<arg3>]` ->
`Optional(Argument('<arg>', 'ARG', '<arg3>'))`
`(<arg>) | [ARG]` -> not change, return self
`-a | --better` -> not change
]
variable[first_type] assign[=] call[name[type], parameter[call[name[self]][constant[0]]]]
if compare[name[first_type] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Name object at 0x7da1b2380760>, <ast.Name object at 0x7da1b2381510>]]] begin[:]
return[name[self]]
for taget[name[branch]] in starred[name[self]] begin[:]
if <ast.UnaryOp object at 0x7da1b2380190> begin[:]
call[name[logger].debug, parameter[constant[fix %r not change], name[self]]]
return[name[self]] | keyword[def] identifier[fix_argument_only] ( identifier[self] ):
literal[string]
identifier[first_type] = identifier[type] ( identifier[self] [ literal[int] ])
keyword[if] identifier[first_type] keyword[not] keyword[in] ( identifier[Required] , identifier[Optional] ):
keyword[return] identifier[self]
keyword[for] identifier[branch] keyword[in] identifier[self] :
keyword[if] keyword[not] ( identifier[len] ( identifier[branch] )== literal[int] keyword[and]
identifier[isinstance] ( identifier[branch] , identifier[first_type] ) keyword[and]
identifier[isinstance] ( identifier[branch] [ literal[int] ], identifier[Argument] )):
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] )
keyword[return] identifier[self]
keyword[else] :
identifier[first] = identifier[self] [ literal[int] ][ literal[int] ]
keyword[for] identifier[each] keyword[in] identifier[self] :
identifier[first] . identifier[names] . identifier[update] ( identifier[each] [ literal[int] ]. identifier[names] )
identifier[result] = identifier[first_type] ( identifier[first] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] , identifier[result] )
keyword[return] identifier[result] | def fix_argument_only(self):
"""
fix_argument_only() -> Either or Unit(Argument)
`<arg> | ARG | <arg3>` ->
`Required(Argument('<arg>', 'ARG', '<arg3>'))`
`[<arg>] | [ARG] | [<arg3>]` ->
`Optional(Argument('<arg>', 'ARG', '<arg3>'))`
`(<arg>) | [ARG]` -> not change, return self
`-a | --better` -> not change
""" # for idx, branch in enumerate(self):
# if isinstance(branch[0], Either):
# self[idx] = branch.fix()
first_type = type(self[0])
if first_type not in (Required, Optional):
return self # depends on [control=['if'], data=[]]
for branch in self:
if not (len(branch) == 1 and isinstance(branch, first_type) and isinstance(branch[0], Argument)):
logger.debug('fix %r not change', self)
return self # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['branch']]
else:
first = self[0][0]
for each in self:
first.names.update(each[0].names) # depends on [control=['for'], data=['each']]
result = first_type(first)
logger.debug('fix %r -> %r', self, result)
return result |
def get_concepts_to_recalculate(self, users, lang, concepts=None):
"""
Get concept which have same changes and have to be recalculated
Args:
users (list of users or user): users whose user stats we are interesting in
lang (str): language of used concepts
concepts (Optional[list of concepts]): list of primary keys of concepts or concepts
Defaults to None meaning all concepts.
Returns:
dict: user -> set of concepts (int) - in case of list of users
list of stats (str) - in case of one user
"""
only_one_user = False
if not isinstance(users, list):
only_one_user = True
users = [users]
mapping = self.get_item_concept_mapping(lang)
current_user_stats = defaultdict(lambda: {})
user_stats_qs = UserStat.objects.filter(user__in=users, stat="answer_count") # we need only one type
if concepts is not None:
user_stats_qs = user_stats_qs.filter(concept__in=concepts)
for user_stat in user_stats_qs:
current_user_stats[user_stat.user_id][user_stat.concept_id] = user_stat
concepts_to_recalculate = defaultdict(lambda: set())
for user, item, time in Answer.objects.filter(user__in=users)\
.values_list("user_id", "item").annotate(Max("time")):
if item not in mapping:
# in reality this should by corner case, so it is efficient to not filter Answers
continue # item is not in concept
time_expiration_lower_bound = get_config('proso_models', 'knowledge_overview.time_shift_hours', default=4)
time_expiration_factor = get_config('proso_models', 'knowledge_overview.time_expiration_factor', default=2)
for concept in mapping[item]:
if user in current_user_stats and concept in current_user_stats[user] \
and current_user_stats[user][concept].time > time:
if not self.has_time_expired(current_user_stats[user][concept].time, time, time_expiration_lower_bound, time_expiration_factor):
continue # cache is up to date
if concepts is None or concept in ([c.pk for c in concepts] if type(concepts[0]) == Concept else Concept):
concepts_to_recalculate[user].add(concept)
if only_one_user:
return concepts_to_recalculate[users[0]]
return concepts_to_recalculate | def function[get_concepts_to_recalculate, parameter[self, users, lang, concepts]]:
constant[
Get concept which have same changes and have to be recalculated
Args:
users (list of users or user): users whose user stats we are interesting in
lang (str): language of used concepts
concepts (Optional[list of concepts]): list of primary keys of concepts or concepts
Defaults to None meaning all concepts.
Returns:
dict: user -> set of concepts (int) - in case of list of users
list of stats (str) - in case of one user
]
variable[only_one_user] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da18f58ebc0> begin[:]
variable[only_one_user] assign[=] constant[True]
variable[users] assign[=] list[[<ast.Name object at 0x7da18ede4f40>]]
variable[mapping] assign[=] call[name[self].get_item_concept_mapping, parameter[name[lang]]]
variable[current_user_stats] assign[=] call[name[defaultdict], parameter[<ast.Lambda object at 0x7da18ede6320>]]
variable[user_stats_qs] assign[=] call[name[UserStat].objects.filter, parameter[]]
if compare[name[concepts] is_not constant[None]] begin[:]
variable[user_stats_qs] assign[=] call[name[user_stats_qs].filter, parameter[]]
for taget[name[user_stat]] in starred[name[user_stats_qs]] begin[:]
call[call[name[current_user_stats]][name[user_stat].user_id]][name[user_stat].concept_id] assign[=] name[user_stat]
variable[concepts_to_recalculate] assign[=] call[name[defaultdict], parameter[<ast.Lambda object at 0x7da20e74a260>]]
for taget[tuple[[<ast.Name object at 0x7da20e74afe0>, <ast.Name object at 0x7da20c6a90f0>, <ast.Name object at 0x7da20c6a8e50>]]] in starred[call[call[call[name[Answer].objects.filter, parameter[]].values_list, parameter[constant[user_id], constant[item]]].annotate, parameter[call[name[Max], parameter[constant[time]]]]]] begin[:]
if compare[name[item] <ast.NotIn object at 0x7da2590d7190> name[mapping]] begin[:]
continue
variable[time_expiration_lower_bound] assign[=] call[name[get_config], parameter[constant[proso_models], constant[knowledge_overview.time_shift_hours]]]
variable[time_expiration_factor] assign[=] call[name[get_config], parameter[constant[proso_models], constant[knowledge_overview.time_expiration_factor]]]
for taget[name[concept]] in starred[call[name[mapping]][name[item]]] begin[:]
if <ast.BoolOp object at 0x7da20c6a9a50> begin[:]
if <ast.UnaryOp object at 0x7da20c6abb20> begin[:]
continue
if <ast.BoolOp object at 0x7da20c6a8d90> begin[:]
call[call[name[concepts_to_recalculate]][name[user]].add, parameter[name[concept]]]
if name[only_one_user] begin[:]
return[call[name[concepts_to_recalculate]][call[name[users]][constant[0]]]]
return[name[concepts_to_recalculate]] | keyword[def] identifier[get_concepts_to_recalculate] ( identifier[self] , identifier[users] , identifier[lang] , identifier[concepts] = keyword[None] ):
literal[string]
identifier[only_one_user] = keyword[False]
keyword[if] keyword[not] identifier[isinstance] ( identifier[users] , identifier[list] ):
identifier[only_one_user] = keyword[True]
identifier[users] =[ identifier[users] ]
identifier[mapping] = identifier[self] . identifier[get_item_concept_mapping] ( identifier[lang] )
identifier[current_user_stats] = identifier[defaultdict] ( keyword[lambda] :{})
identifier[user_stats_qs] = identifier[UserStat] . identifier[objects] . identifier[filter] ( identifier[user__in] = identifier[users] , identifier[stat] = literal[string] )
keyword[if] identifier[concepts] keyword[is] keyword[not] keyword[None] :
identifier[user_stats_qs] = identifier[user_stats_qs] . identifier[filter] ( identifier[concept__in] = identifier[concepts] )
keyword[for] identifier[user_stat] keyword[in] identifier[user_stats_qs] :
identifier[current_user_stats] [ identifier[user_stat] . identifier[user_id] ][ identifier[user_stat] . identifier[concept_id] ]= identifier[user_stat]
identifier[concepts_to_recalculate] = identifier[defaultdict] ( keyword[lambda] : identifier[set] ())
keyword[for] identifier[user] , identifier[item] , identifier[time] keyword[in] identifier[Answer] . identifier[objects] . identifier[filter] ( identifier[user__in] = identifier[users] ). identifier[values_list] ( literal[string] , literal[string] ). identifier[annotate] ( identifier[Max] ( literal[string] )):
keyword[if] identifier[item] keyword[not] keyword[in] identifier[mapping] :
keyword[continue]
identifier[time_expiration_lower_bound] = identifier[get_config] ( literal[string] , literal[string] , identifier[default] = literal[int] )
identifier[time_expiration_factor] = identifier[get_config] ( literal[string] , literal[string] , identifier[default] = literal[int] )
keyword[for] identifier[concept] keyword[in] identifier[mapping] [ identifier[item] ]:
keyword[if] identifier[user] keyword[in] identifier[current_user_stats] keyword[and] identifier[concept] keyword[in] identifier[current_user_stats] [ identifier[user] ] keyword[and] identifier[current_user_stats] [ identifier[user] ][ identifier[concept] ]. identifier[time] > identifier[time] :
keyword[if] keyword[not] identifier[self] . identifier[has_time_expired] ( identifier[current_user_stats] [ identifier[user] ][ identifier[concept] ]. identifier[time] , identifier[time] , identifier[time_expiration_lower_bound] , identifier[time_expiration_factor] ):
keyword[continue]
keyword[if] identifier[concepts] keyword[is] keyword[None] keyword[or] identifier[concept] keyword[in] ([ identifier[c] . identifier[pk] keyword[for] identifier[c] keyword[in] identifier[concepts] ] keyword[if] identifier[type] ( identifier[concepts] [ literal[int] ])== identifier[Concept] keyword[else] identifier[Concept] ):
identifier[concepts_to_recalculate] [ identifier[user] ]. identifier[add] ( identifier[concept] )
keyword[if] identifier[only_one_user] :
keyword[return] identifier[concepts_to_recalculate] [ identifier[users] [ literal[int] ]]
keyword[return] identifier[concepts_to_recalculate] | def get_concepts_to_recalculate(self, users, lang, concepts=None):
"""
Get concept which have same changes and have to be recalculated
Args:
users (list of users or user): users whose user stats we are interesting in
lang (str): language of used concepts
concepts (Optional[list of concepts]): list of primary keys of concepts or concepts
Defaults to None meaning all concepts.
Returns:
dict: user -> set of concepts (int) - in case of list of users
list of stats (str) - in case of one user
"""
only_one_user = False
if not isinstance(users, list):
only_one_user = True
users = [users] # depends on [control=['if'], data=[]]
mapping = self.get_item_concept_mapping(lang)
current_user_stats = defaultdict(lambda : {})
user_stats_qs = UserStat.objects.filter(user__in=users, stat='answer_count') # we need only one type
if concepts is not None:
user_stats_qs = user_stats_qs.filter(concept__in=concepts) # depends on [control=['if'], data=['concepts']]
for user_stat in user_stats_qs:
current_user_stats[user_stat.user_id][user_stat.concept_id] = user_stat # depends on [control=['for'], data=['user_stat']]
concepts_to_recalculate = defaultdict(lambda : set())
for (user, item, time) in Answer.objects.filter(user__in=users).values_list('user_id', 'item').annotate(Max('time')):
if item not in mapping:
# in reality this should by corner case, so it is efficient to not filter Answers
continue # item is not in concept # depends on [control=['if'], data=[]]
time_expiration_lower_bound = get_config('proso_models', 'knowledge_overview.time_shift_hours', default=4)
time_expiration_factor = get_config('proso_models', 'knowledge_overview.time_expiration_factor', default=2)
for concept in mapping[item]:
if user in current_user_stats and concept in current_user_stats[user] and (current_user_stats[user][concept].time > time):
if not self.has_time_expired(current_user_stats[user][concept].time, time, time_expiration_lower_bound, time_expiration_factor):
continue # cache is up to date # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if concepts is None or concept in ([c.pk for c in concepts] if type(concepts[0]) == Concept else Concept):
concepts_to_recalculate[user].add(concept) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['concept']] # depends on [control=['for'], data=[]]
if only_one_user:
return concepts_to_recalculate[users[0]] # depends on [control=['if'], data=[]]
return concepts_to_recalculate |
def GetPartitionResolver(self, database_link):
"""Gets the partition resolver associated with the database link
:param str database_link:
Database self link or ID based link.
:return:
An instance of PartitionResolver.
:rtype: object
"""
if not database_link:
raise ValueError("database_link is None or empty.")
return self.partition_resolvers.get(base.TrimBeginningAndEndingSlashes(database_link)) | def function[GetPartitionResolver, parameter[self, database_link]]:
constant[Gets the partition resolver associated with the database link
:param str database_link:
Database self link or ID based link.
:return:
An instance of PartitionResolver.
:rtype: object
]
if <ast.UnaryOp object at 0x7da20c6a90c0> begin[:]
<ast.Raise object at 0x7da20c6a99c0>
return[call[name[self].partition_resolvers.get, parameter[call[name[base].TrimBeginningAndEndingSlashes, parameter[name[database_link]]]]]] | keyword[def] identifier[GetPartitionResolver] ( identifier[self] , identifier[database_link] ):
literal[string]
keyword[if] keyword[not] identifier[database_link] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[partition_resolvers] . identifier[get] ( identifier[base] . identifier[TrimBeginningAndEndingSlashes] ( identifier[database_link] )) | def GetPartitionResolver(self, database_link):
"""Gets the partition resolver associated with the database link
:param str database_link:
Database self link or ID based link.
:return:
An instance of PartitionResolver.
:rtype: object
"""
if not database_link:
raise ValueError('database_link is None or empty.') # depends on [control=['if'], data=[]]
return self.partition_resolvers.get(base.TrimBeginningAndEndingSlashes(database_link)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.