body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def scope_name():
'Returns the name of current scope as a string, e.g. deepq/q_func'
return tf.compat.v1.get_variable_scope().name
| 4,613,101,357,403,974,000
|
Returns the name of current scope as a string, e.g. deepq/q_func
|
baselines/deepq/build_graph.py
|
scope_name
|
rwill128/baselines
|
python
|
def scope_name():
return tf.compat.v1.get_variable_scope().name
|
def absolute_scope_name(relative_scope_name):
'Appends parent scope name to `relative_scope_name`'
return ((scope_name() + '/') + relative_scope_name)
| 7,051,420,257,098,793,000
|
Appends parent scope name to `relative_scope_name`
|
baselines/deepq/build_graph.py
|
absolute_scope_name
|
rwill128/baselines
|
python
|
def absolute_scope_name(relative_scope_name):
return ((scope_name() + '/') + relative_scope_name)
|
def build_act(make_obs_ph, q_func, num_actions, scope='deepq', reuse=None):
'Creates the act function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.compat.v1.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n '
with tf.compat.v1.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph('observation')
stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name='stochastic')
update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name='update_eps')
eps = tf.compat.v1.get_variable('eps', (), initializer=tf.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope='q_func')
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = (tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps)
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, (lambda : stochastic_actions), (lambda : deterministic_actions))
update_eps_expr = eps.assign(tf.cond((update_eps_ph >= 0), (lambda : update_eps_ph), (lambda : eps)))
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: (- 1.0), stochastic_ph: True}, updates=[update_eps_expr])
def act(ob, stochastic=True, update_eps=(- 1)):
return _act(ob, stochastic, update_eps)
return act
| 2,112,412,584,453,471,000
|
Creates the act function:
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
|
baselines/deepq/build_graph.py
|
build_act
|
rwill128/baselines
|
python
|
def build_act(make_obs_ph, q_func, num_actions, scope='deepq', reuse=None):
'Creates the act function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.compat.v1.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n '
with tf.compat.v1.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph('observation')
stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name='stochastic')
update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name='update_eps')
eps = tf.compat.v1.get_variable('eps', (), initializer=tf.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope='q_func')
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = (tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps)
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, (lambda : stochastic_actions), (lambda : deterministic_actions))
update_eps_expr = eps.assign(tf.cond((update_eps_ph >= 0), (lambda : update_eps_ph), (lambda : eps)))
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: (- 1.0), stochastic_ph: True}, updates=[update_eps_expr])
def act(ob, stochastic=True, update_eps=(- 1)):
return _act(ob, stochastic, update_eps)
return act
|
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope='deepq', reuse=None, param_noise_filter_func=None):
'Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):\n\n Parameters\n ----------\n make_obs_ph: str -> tf.compat.v1.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n '
if (param_noise_filter_func is None):
param_noise_filter_func = default_param_noise_filter
with tf.compat.v1.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph('observation')
stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name='stochastic')
update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name='update_eps')
update_param_noise_threshold_ph = tf.compat.v1.placeholder(tf.float32, (), name='update_param_noise_threshold')
update_param_noise_scale_ph = tf.compat.v1.placeholder(tf.bool, (), name='update_param_noise_scale')
reset_ph = tf.compat.v1.placeholder(tf.bool, (), name='reset')
eps = tf.compat.v1.get_variable('eps', (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.compat.v1.get_variable('param_noise_scale', (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.compat.v1.get_variable('param_noise_threshold', (), initializer=tf.constant_initializer(0.05), trainable=False)
q_values = q_func(observations_ph.get(), num_actions, scope='q_func')
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope='perturbed_q_func')
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert (len(all_vars) == len(all_perturbed_vars))
perturb_ops = []
for (var, perturbed_var) in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
op = tf.compat.v1.assign(perturbed_var, (var + tf.compat.v1.random_normal(shape=tf.shape(var), mean=0.0, stddev=param_noise_scale)))
else:
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert (len(perturb_ops) == len(all_vars))
return tf.group(*perturb_ops)
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope='adaptive_q_func')
perturb_for_adaption = perturb_vars(original_scope='q_func', perturbed_scope='adaptive_q_func')
kl = tf.reduce_sum((tf.nn.softmax(q_values) * (tf.compat.v1.log(tf.nn.softmax(q_values)) - tf.compat.v1.log(tf.nn.softmax(q_values_adaptive)))), axis=(- 1))
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond((mean_kl < param_noise_threshold), (lambda : param_noise_scale.assign((param_noise_scale * 1.01))), (lambda : param_noise_scale.assign((param_noise_scale / 1.01))))
return update_scale_expr
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond((update_param_noise_threshold_ph >= 0), (lambda : update_param_noise_threshold_ph), (lambda : param_noise_threshold)))
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = (tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps)
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, (lambda : stochastic_actions), (lambda : deterministic_actions))
update_eps_expr = eps.assign(tf.cond((update_eps_ph >= 0), (lambda : update_eps_ph), (lambda : eps)))
updates = [update_eps_expr, tf.cond(reset_ph, (lambda : perturb_vars(original_scope='q_func', perturbed_scope='perturbed_q_func')), (lambda : tf.group(*[]))), tf.cond(update_param_noise_scale_ph, (lambda : update_scale()), (lambda : tf.Variable(0.0, trainable=False))), update_param_noise_threshold_expr]
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph], outputs=output_actions, givens={update_eps_ph: (- 1.0), stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False}, updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=(- 1)):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act
| -6,425,649,312,369,713,000
|
Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
|
baselines/deepq/build_graph.py
|
build_act_with_param_noise
|
rwill128/baselines
|
python
|
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope='deepq', reuse=None, param_noise_filter_func=None):
'Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):\n\n Parameters\n ----------\n make_obs_ph: str -> tf.compat.v1.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n '
if (param_noise_filter_func is None):
param_noise_filter_func = default_param_noise_filter
with tf.compat.v1.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph('observation')
stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name='stochastic')
update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name='update_eps')
update_param_noise_threshold_ph = tf.compat.v1.placeholder(tf.float32, (), name='update_param_noise_threshold')
update_param_noise_scale_ph = tf.compat.v1.placeholder(tf.bool, (), name='update_param_noise_scale')
reset_ph = tf.compat.v1.placeholder(tf.bool, (), name='reset')
eps = tf.compat.v1.get_variable('eps', (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.compat.v1.get_variable('param_noise_scale', (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.compat.v1.get_variable('param_noise_threshold', (), initializer=tf.constant_initializer(0.05), trainable=False)
q_values = q_func(observations_ph.get(), num_actions, scope='q_func')
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope='perturbed_q_func')
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert (len(all_vars) == len(all_perturbed_vars))
perturb_ops = []
for (var, perturbed_var) in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
op = tf.compat.v1.assign(perturbed_var, (var + tf.compat.v1.random_normal(shape=tf.shape(var), mean=0.0, stddev=param_noise_scale)))
else:
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert (len(perturb_ops) == len(all_vars))
return tf.group(*perturb_ops)
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope='adaptive_q_func')
perturb_for_adaption = perturb_vars(original_scope='q_func', perturbed_scope='adaptive_q_func')
kl = tf.reduce_sum((tf.nn.softmax(q_values) * (tf.compat.v1.log(tf.nn.softmax(q_values)) - tf.compat.v1.log(tf.nn.softmax(q_values_adaptive)))), axis=(- 1))
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond((mean_kl < param_noise_threshold), (lambda : param_noise_scale.assign((param_noise_scale * 1.01))), (lambda : param_noise_scale.assign((param_noise_scale / 1.01))))
return update_scale_expr
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond((update_param_noise_threshold_ph >= 0), (lambda : update_param_noise_threshold_ph), (lambda : param_noise_threshold)))
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = (tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps)
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, (lambda : stochastic_actions), (lambda : deterministic_actions))
update_eps_expr = eps.assign(tf.cond((update_eps_ph >= 0), (lambda : update_eps_ph), (lambda : eps)))
updates = [update_eps_expr, tf.cond(reset_ph, (lambda : perturb_vars(original_scope='q_func', perturbed_scope='perturbed_q_func')), (lambda : tf.group(*[]))), tf.cond(update_param_noise_scale_ph, (lambda : update_scale()), (lambda : tf.Variable(0.0, trainable=False))), update_param_noise_threshold_expr]
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph], outputs=output_actions, givens={update_eps_ph: (- 1.0), stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False}, updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=(- 1)):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act
|
def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, double_q=True, scope='deepq', reuse=None, param_noise=False, param_noise_filter_func=None):
"Creates the train function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.compat.v1.placeholder or TfInput\n a function that takes a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions\n reuse: bool\n whether or not to reuse the graph variables\n optimizer: tf.train.Optimizer\n optimizer to use for the Q-learning objective.\n grad_norm_clipping: float or None\n clip gradient norms to this value. If None no clipping is performed.\n gamma: float\n discount rate.\n double_q: bool\n if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).\n In general it is a good idea to keep it enabled.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise: bool\n whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n train: (object, np.array, np.array, object, np.array, np.array) -> np.array\n optimize the error in Bellman's equation.\n` See the top of the file for details.\n update_target: () -> ()\n copy the parameters from optimized Q function to the target Q function.\n` See the top of the file for details.\n debug: {str: function}\n a bunch of functions to print debug data like q_values.\n "
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse, param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.compat.v1.variable_scope(scope, reuse=reuse):
obs_t_input = make_obs_ph('obs_t')
act_t_ph = tf.compat.v1.placeholder(tf.int32, [None], name='action')
rew_t_ph = tf.compat.v1.placeholder(tf.float32, [None], name='reward')
obs_tp1_input = make_obs_ph('obs_tp1')
done_mask_ph = tf.compat.v1.placeholder(tf.float32, [None], name='done')
importance_weights_ph = tf.compat.v1.placeholder(tf.float32, [None], name='weight')
q_t = q_func(obs_t_input.get(), num_actions, scope='q_func', reuse=True)
q_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=(tf.compat.v1.get_variable_scope().name + '/q_func'))
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope='target_q_func')
target_q_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=(tf.compat.v1.get_variable_scope().name + '/target_q_func'))
q_t_selected = tf.reduce_sum((q_t * tf.one_hot(act_t_ph, num_actions)), 1)
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope='q_func', reuse=True)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum((q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions)), 1)
else:
q_tp1_best = tf.reduce_max(q_tp1, 1)
q_tp1_best_masked = ((1.0 - done_mask_ph) * q_tp1_best)
q_t_selected_target = (rew_t_ph + (gamma * q_tp1_best_masked))
td_error = (q_t_selected - tf.stop_gradient(q_t_selected_target))
errors = U.huber_loss(td_error)
weighted_error = tf.reduce_mean((importance_weights_ph * errors))
if (grad_norm_clipping is not None):
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for (i, (grad, var)) in enumerate(gradients):
if (grad is not None):
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
update_target_expr = []
for (var, var_target) in zip(sorted(q_func_vars, key=(lambda v: v.name)), sorted(target_q_func_vars, key=(lambda v: v.name))):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
train = U.function(inputs=[obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, importance_weights_ph], outputs=td_error, updates=[optimize_expr])
update_target = U.function([], [], updates=[update_target_expr])
q_values = U.function([obs_t_input], q_t)
return (act_f, train, update_target, {'q_values': q_values})
| 7,110,669,786,864,565,000
|
Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
|
baselines/deepq/build_graph.py
|
build_train
|
rwill128/baselines
|
python
|
def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, double_q=True, scope='deepq', reuse=None, param_noise=False, param_noise_filter_func=None):
"Creates the train function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.compat.v1.placeholder or TfInput\n a function that takes a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions\n reuse: bool\n whether or not to reuse the graph variables\n optimizer: tf.train.Optimizer\n optimizer to use for the Q-learning objective.\n grad_norm_clipping: float or None\n clip gradient norms to this value. If None no clipping is performed.\n gamma: float\n discount rate.\n double_q: bool\n if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).\n In general it is a good idea to keep it enabled.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise: bool\n whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n train: (object, np.array, np.array, object, np.array, np.array) -> np.array\n optimize the error in Bellman's equation.\n` See the top of the file for details.\n update_target: () -> ()\n copy the parameters from optimized Q function to the target Q function.\n` See the top of the file for details.\n debug: {str: function}\n a bunch of functions to print debug data like q_values.\n "
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse, param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.compat.v1.variable_scope(scope, reuse=reuse):
obs_t_input = make_obs_ph('obs_t')
act_t_ph = tf.compat.v1.placeholder(tf.int32, [None], name='action')
rew_t_ph = tf.compat.v1.placeholder(tf.float32, [None], name='reward')
obs_tp1_input = make_obs_ph('obs_tp1')
done_mask_ph = tf.compat.v1.placeholder(tf.float32, [None], name='done')
importance_weights_ph = tf.compat.v1.placeholder(tf.float32, [None], name='weight')
q_t = q_func(obs_t_input.get(), num_actions, scope='q_func', reuse=True)
q_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=(tf.compat.v1.get_variable_scope().name + '/q_func'))
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope='target_q_func')
target_q_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=(tf.compat.v1.get_variable_scope().name + '/target_q_func'))
q_t_selected = tf.reduce_sum((q_t * tf.one_hot(act_t_ph, num_actions)), 1)
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope='q_func', reuse=True)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum((q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions)), 1)
else:
q_tp1_best = tf.reduce_max(q_tp1, 1)
q_tp1_best_masked = ((1.0 - done_mask_ph) * q_tp1_best)
q_t_selected_target = (rew_t_ph + (gamma * q_tp1_best_masked))
td_error = (q_t_selected - tf.stop_gradient(q_t_selected_target))
errors = U.huber_loss(td_error)
weighted_error = tf.reduce_mean((importance_weights_ph * errors))
if (grad_norm_clipping is not None):
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for (i, (grad, var)) in enumerate(gradients):
if (grad is not None):
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
update_target_expr = []
for (var, var_target) in zip(sorted(q_func_vars, key=(lambda v: v.name)), sorted(target_q_func_vars, key=(lambda v: v.name))):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
train = U.function(inputs=[obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, importance_weights_ph], outputs=td_error, updates=[optimize_expr])
update_target = U.function([], [], updates=[update_target_expr])
q_values = U.function([obs_t_input], q_t)
return (act_f, train, update_target, {'q_values': q_values})
|
def hash_password(mapper, connect, target):
'\n Helper function that is a listener and hashes passwords before\n insertion into the database.\n\n :param mapper:\n :param connect:\n :param target:\n '
target.hash_password()
| -4,729,170,507,548,841,000
|
Helper function that is a listener and hashes passwords before
insertion into the database.
:param mapper:
:param connect:
:param target:
|
lemur/users/models.py
|
hash_password
|
Brett-Wood/lemur
|
python
|
def hash_password(mapper, connect, target):
'\n Helper function that is a listener and hashes passwords before\n insertion into the database.\n\n :param mapper:\n :param connect:\n :param target:\n '
target.hash_password()
|
def check_password(self, password):
"\n Hash a given password and check it against the stored value\n to determine it's validity.\n\n :param password:\n :return:\n "
if self.password:
return bcrypt.check_password_hash(self.password, password)
| -8,769,349,069,280,813,000
|
Hash a given password and check it against the stored value
to determine it's validity.
:param password:
:return:
|
lemur/users/models.py
|
check_password
|
Brett-Wood/lemur
|
python
|
def check_password(self, password):
"\n Hash a given password and check it against the stored value\n to determine it's validity.\n\n :param password:\n :return:\n "
if self.password:
return bcrypt.check_password_hash(self.password, password)
|
def hash_password(self):
'\n Generate the secure hash for the password.\n\n :return:\n '
if self.password:
self.password = bcrypt.generate_password_hash(self.password).decode('utf-8')
| -2,520,262,000,120,174,600
|
Generate the secure hash for the password.
:return:
|
lemur/users/models.py
|
hash_password
|
Brett-Wood/lemur
|
python
|
def hash_password(self):
'\n Generate the secure hash for the password.\n\n :return:\n '
if self.password:
self.password = bcrypt.generate_password_hash(self.password).decode('utf-8')
|
@property
def is_admin(self):
"\n Determine if the current user has the 'admin' role associated\n with it.\n\n :return:\n "
for role in self.roles:
if (role.name == 'admin'):
return True
| 6,724,741,202,377,436,000
|
Determine if the current user has the 'admin' role associated
with it.
:return:
|
lemur/users/models.py
|
is_admin
|
Brett-Wood/lemur
|
python
|
@property
def is_admin(self):
"\n Determine if the current user has the 'admin' role associated\n with it.\n\n :return:\n "
for role in self.roles:
if (role.name == 'admin'):
return True
|
def ceiling_thresh(progress, maximum):
'Creates a progress object\n Ensures that 0 < progress < maximum'
effective_progress = max(0, progress)
if (maximum > 0):
return Progress(min(effective_progress, maximum), maximum)
else:
return Progress(effective_progress, maximum)
| -8,003,298,595,079,599,000
|
Creates a progress object
Ensures that 0 < progress < maximum
|
requirements/progress.py
|
ceiling_thresh
|
georgiashay/fireroad-server2
|
python
|
def ceiling_thresh(progress, maximum):
'Creates a progress object\n Ensures that 0 < progress < maximum'
effective_progress = max(0, progress)
if (maximum > 0):
return Progress(min(effective_progress, maximum), maximum)
else:
return Progress(effective_progress, maximum)
|
def total_units(courses):
'Finds the total units in a list of Course objects'
total = 0
for course in courses:
total += course.total_units
return total
| 4,904,410,876,397,925,000
|
Finds the total units in a list of Course objects
|
requirements/progress.py
|
total_units
|
georgiashay/fireroad-server2
|
python
|
def total_units(courses):
total = 0
for course in courses:
total += course.total_units
return total
|
def sum_progresses(progresses, criterion_type, maxFunc):
'Adds together a list of Progress objects by combining them one by one\n criterion_type: either subjects or units\n maxFunc: describes how to combine the maximums of the Progress objects'
if (criterion_type == CRITERION_SUBJECTS):
mapfunc = (lambda p: p.subject_fulfillment)
elif (criterion_type == CRITERION_UNITS):
mapfunc = (lambda p: p.unit_fulfillment)
sum_progress = reduce((lambda p1, p2: p1.combine(p2, maxFunc)), map(mapfunc, progresses))
return sum_progress
| -5,932,178,459,262,459,000
|
Adds together a list of Progress objects by combining them one by one
criterion_type: either subjects or units
maxFunc: describes how to combine the maximums of the Progress objects
|
requirements/progress.py
|
sum_progresses
|
georgiashay/fireroad-server2
|
python
|
def sum_progresses(progresses, criterion_type, maxFunc):
'Adds together a list of Progress objects by combining them one by one\n criterion_type: either subjects or units\n maxFunc: describes how to combine the maximums of the Progress objects'
if (criterion_type == CRITERION_SUBJECTS):
mapfunc = (lambda p: p.subject_fulfillment)
elif (criterion_type == CRITERION_UNITS):
mapfunc = (lambda p: p.unit_fulfillment)
sum_progress = reduce((lambda p1, p2: p1.combine(p2, maxFunc)), map(mapfunc, progresses))
return sum_progress
|
def force_unfill_progresses(satisfied_by_category, current_distinct_threshold, current_threshold):
'Adjusts the fulfillment and progress of RequirementsProgress object with both distinct thresholds and thresholds\n These requirements follow the form "X subjects/units from at least N categories"\n satisfied_by_category: list of lists of Courses for each category\n current_distinct_threshold: threshold object for distinct threshold\n current_threshold: threshold object for regular threshold'
subject_cutoff = current_threshold.cutoff_for_criterion(CRITERION_SUBJECTS)
unit_cutoff = current_threshold.cutoff_for_criterion(CRITERION_UNITS)
max_unit_subjects = map((lambda sat_cat: sorted(sat_cat, key=(lambda s: s.total_units))), satisfied_by_category)
fixed_subject_progress = 0
fixed_subject_max = current_distinct_threshold.get_actual_cutoff()
fixed_unit_progress = 0
fixed_unit_max = 0
for category_subjects in max_unit_subjects:
if (len(category_subjects) > 0):
subject_to_count = category_subjects.pop()
fixed_subject_progress += 1
fixed_unit_progress += subject_to_count.total_units
fixed_unit_max += subject_to_count.total_units
else:
fixed_unit_max += DEFAULT_UNIT_COUNT
remaining_subject_progress = (subject_cutoff - fixed_subject_max)
remaining_unit_progress = (unit_cutoff - fixed_unit_max)
free_courses = sorted([course for category in max_unit_subjects for course in category], key=(lambda s: s.total_units), reverse=True)
free_subject_max = (subject_cutoff - fixed_subject_max)
free_unit_max = (unit_cutoff - fixed_unit_max)
free_subject_progress = min(len(free_courses), free_subject_max)
free_unit_progress = min(total_units(free_courses), free_unit_max)
subject_progress = Progress((fixed_subject_progress + free_subject_progress), subject_cutoff)
unit_progress = Progress((fixed_unit_progress + free_unit_progress), unit_cutoff)
return (subject_progress, unit_progress)
| 3,881,115,969,647,407,600
|
Adjusts the fulfillment and progress of RequirementsProgress object with both distinct thresholds and thresholds
These requirements follow the form "X subjects/units from at least N categories"
satisfied_by_category: list of lists of Courses for each category
current_distinct_threshold: threshold object for distinct threshold
current_threshold: threshold object for regular threshold
|
requirements/progress.py
|
force_unfill_progresses
|
georgiashay/fireroad-server2
|
python
|
def force_unfill_progresses(satisfied_by_category, current_distinct_threshold, current_threshold):
'Adjusts the fulfillment and progress of RequirementsProgress object with both distinct thresholds and thresholds\n These requirements follow the form "X subjects/units from at least N categories"\n satisfied_by_category: list of lists of Courses for each category\n current_distinct_threshold: threshold object for distinct threshold\n current_threshold: threshold object for regular threshold'
subject_cutoff = current_threshold.cutoff_for_criterion(CRITERION_SUBJECTS)
unit_cutoff = current_threshold.cutoff_for_criterion(CRITERION_UNITS)
max_unit_subjects = map((lambda sat_cat: sorted(sat_cat, key=(lambda s: s.total_units))), satisfied_by_category)
fixed_subject_progress = 0
fixed_subject_max = current_distinct_threshold.get_actual_cutoff()
fixed_unit_progress = 0
fixed_unit_max = 0
for category_subjects in max_unit_subjects:
if (len(category_subjects) > 0):
subject_to_count = category_subjects.pop()
fixed_subject_progress += 1
fixed_unit_progress += subject_to_count.total_units
fixed_unit_max += subject_to_count.total_units
else:
fixed_unit_max += DEFAULT_UNIT_COUNT
remaining_subject_progress = (subject_cutoff - fixed_subject_max)
remaining_unit_progress = (unit_cutoff - fixed_unit_max)
free_courses = sorted([course for category in max_unit_subjects for course in category], key=(lambda s: s.total_units), reverse=True)
free_subject_max = (subject_cutoff - fixed_subject_max)
free_unit_max = (unit_cutoff - fixed_unit_max)
free_subject_progress = min(len(free_courses), free_subject_max)
free_unit_progress = min(total_units(free_courses), free_unit_max)
subject_progress = Progress((fixed_subject_progress + free_subject_progress), subject_cutoff)
unit_progress = Progress((fixed_unit_progress + free_unit_progress), unit_cutoff)
return (subject_progress, unit_progress)
|
def __init__(self, statement, list_path):
'Initializes a progress object with the given requirements statement.'
self.statement = statement
self.threshold = self.statement.get_threshold()
self.distinct_threshold = self.statement.get_distinct_threshold()
self.list_path = list_path
self.children = []
if (self.statement.requirement is None):
for (index, child) in enumerate(self.statement.requirements.iterator()):
self.children.append(RequirementsProgress(child, ((list_path + '.') + str(index))))
| 7,988,293,559,431,877,000
|
Initializes a progress object with the given requirements statement.
|
requirements/progress.py
|
__init__
|
georgiashay/fireroad-server2
|
python
|
def __init__(self, statement, list_path):
self.statement = statement
self.threshold = self.statement.get_threshold()
self.distinct_threshold = self.statement.get_distinct_threshold()
self.list_path = list_path
self.children = []
if (self.statement.requirement is None):
for (index, child) in enumerate(self.statement.requirements.iterator()):
self.children.append(RequirementsProgress(child, ((list_path + '.') + str(index))))
|
def courses_satisfying_req(self, courses):
'\n Returns the whole courses and the half courses satisfying this requirement\n separately.\n '
if (self.statement.requirement is not None):
req = self.statement.requirement
if (('GIR:' in req) or ('HASS' in req) or ('CI-' in req)):
whole_courses = []
half_courses = []
for c in courses:
if (not c.satisfies(req, courses)):
continue
if c.is_half_class:
half_courses.append(c)
else:
whole_courses.append(c)
return (whole_courses, half_courses)
else:
return ([c for c in courses if c.satisfies(req, courses)], [])
return ([], [])
| -7,514,238,843,185,547,000
|
Returns the whole courses and the half courses satisfying this requirement
separately.
|
requirements/progress.py
|
courses_satisfying_req
|
georgiashay/fireroad-server2
|
python
|
def courses_satisfying_req(self, courses):
'\n Returns the whole courses and the half courses satisfying this requirement\n separately.\n '
if (self.statement.requirement is not None):
req = self.statement.requirement
if (('GIR:' in req) or ('HASS' in req) or ('CI-' in req)):
whole_courses = []
half_courses = []
for c in courses:
if (not c.satisfies(req, courses)):
continue
if c.is_half_class:
half_courses.append(c)
else:
whole_courses.append(c)
return (whole_courses, half_courses)
else:
return ([c for c in courses if c.satisfies(req, courses)], [])
return ([], [])
|
def override_requirement(self, manual_progress):
"\n Sets the progress fulfillment variables based on a manual progress value, which is\n expressed in either units or subjects depending on the requirement's threshold.\n "
self.is_fulfilled = (manual_progress >= self.threshold.get_actual_cutoff())
subjects = 0
units = 0
satisfied_courses = set()
if (self.threshold.criterion == CRITERION_UNITS):
units = manual_progress
subjects = (manual_progress / DEFAULT_UNIT_COUNT)
else:
units = (manual_progress * DEFAULT_UNIT_COUNT)
subjects = manual_progress
subject_progress = ceiling_thresh(subjects, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = ceiling_thresh(units, self.threshold.cutoff_for_criterion(CRITERION_UNITS))
random_ids = random.sample(range(1000, max(10000, (subject_progress.progress + 1000))), subject_progress.progress)
for rand_id in random_ids:
dummy_course = Course(id=((self.list_path + '_') + str(rand_id)), subject_id=((('gen_course_' + self.list_path) + '_') + str(rand_id)), title=((('Generated Course ' + self.list_path) + ' ') + str(rand_id)))
satisfied_courses.add(dummy_course)
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
progress = (unit_progress if ((self.threshold is not None) and (self.threshold.criterion == CRITERION_UNITS)) else subject_progress)
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = list(satisfied_courses)
| -7,453,064,142,810,164,000
|
Sets the progress fulfillment variables based on a manual progress value, which is
expressed in either units or subjects depending on the requirement's threshold.
|
requirements/progress.py
|
override_requirement
|
georgiashay/fireroad-server2
|
python
|
def override_requirement(self, manual_progress):
"\n Sets the progress fulfillment variables based on a manual progress value, which is\n expressed in either units or subjects depending on the requirement's threshold.\n "
self.is_fulfilled = (manual_progress >= self.threshold.get_actual_cutoff())
subjects = 0
units = 0
satisfied_courses = set()
if (self.threshold.criterion == CRITERION_UNITS):
units = manual_progress
subjects = (manual_progress / DEFAULT_UNIT_COUNT)
else:
units = (manual_progress * DEFAULT_UNIT_COUNT)
subjects = manual_progress
subject_progress = ceiling_thresh(subjects, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = ceiling_thresh(units, self.threshold.cutoff_for_criterion(CRITERION_UNITS))
random_ids = random.sample(range(1000, max(10000, (subject_progress.progress + 1000))), subject_progress.progress)
for rand_id in random_ids:
dummy_course = Course(id=((self.list_path + '_') + str(rand_id)), subject_id=((('gen_course_' + self.list_path) + '_') + str(rand_id)), title=((('Generated Course ' + self.list_path) + ' ') + str(rand_id)))
satisfied_courses.add(dummy_course)
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
progress = (unit_progress if ((self.threshold is not None) and (self.threshold.criterion == CRITERION_UNITS)) else subject_progress)
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = list(satisfied_courses)
|
def compute_assertions(self, courses, progress_assertions):
'\n Computes the fulfillment of this requirement based on progress assertions, and returns\n True if the requirement has an assertion available or False otherwise.\n\n Assertions are in the format of a dictionary keyed by requirements list paths, where the\n values are dictionaries containing three possible keys: "substitutions", which should be a\n list of course IDs that combine to substitute for the requirement, "ignore", which\n indicates that the requirement is not to be used when satisfying later requirements, and\n "override", which is equivalent to the old manual progress value and indicates a progress\n toward the requirement in the unit specified by the requirement\'s threshold type (only\n used if the requirement is a plain string requirement and has a threshold). The order of\n precedence is override, ignore, substitutions.\n '
self.assertion = progress_assertions.get(self.list_path, None)
self.is_bypassed = False
if (self.assertion is not None):
substitutions = self.assertion.get('substitutions', None)
ignore = self.assertion.get('ignore', False)
override = self.assertion.get('override', 0)
else:
substitutions = None
ignore = False
override = 0
if (self.statement.is_plain_string and (self.threshold is not None) and override):
self.override_requirement(override)
return True
if ignore:
self.is_fulfilled = False
subject_progress = Progress(0, 0)
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
unit_progress = Progress(0, 0)
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
progress = Progress(0, 0)
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = []
return True
if (substitutions is not None):
satisfied_courses = set()
subs_satisfied = 0
units_satisfied = 0
for sub in substitutions:
for course in courses:
if course.satisfies(sub, courses):
subs_satisfied += 1
units_satisfied += course.total_units
satisfied_courses.add(course)
break
if (self.statement.is_plain_string and (self.threshold is not None)):
subject_progress = Progress(subs_satisfied, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = Progress(units_satisfied, self.threshold.cutoff_for_criterion(CRITERION_UNITS))
progress = (subject_progress if (self.threshold.criterion == CRITERION_SUBJECTS) else unit_progress)
self.is_fulfilled = (progress.progress == progress.max)
else:
subject_progress = Progress(subs_satisfied, len(substitutions))
self.is_fulfilled = (subs_satisfied == len(substitutions))
unit_progress = Progress((subs_satisfied * DEFAULT_UNIT_COUNT), (len(substitutions) * DEFAULT_UNIT_COUNT))
progress = subject_progress
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = list(satisfied_courses)
return True
return False
| 998,024,915,000,721,000
|
Computes the fulfillment of this requirement based on progress assertions, and returns
True if the requirement has an assertion available or False otherwise.
Assertions are in the format of a dictionary keyed by requirements list paths, where the
values are dictionaries containing three possible keys: "substitutions", which should be a
list of course IDs that combine to substitute for the requirement, "ignore", which
indicates that the requirement is not to be used when satisfying later requirements, and
"override", which is equivalent to the old manual progress value and indicates a progress
toward the requirement in the unit specified by the requirement's threshold type (only
used if the requirement is a plain string requirement and has a threshold). The order of
precedence is override, ignore, substitutions.
|
requirements/progress.py
|
compute_assertions
|
georgiashay/fireroad-server2
|
python
|
def compute_assertions(self, courses, progress_assertions):
'\n Computes the fulfillment of this requirement based on progress assertions, and returns\n True if the requirement has an assertion available or False otherwise.\n\n Assertions are in the format of a dictionary keyed by requirements list paths, where the\n values are dictionaries containing three possible keys: "substitutions", which should be a\n list of course IDs that combine to substitute for the requirement, "ignore", which\n indicates that the requirement is not to be used when satisfying later requirements, and\n "override", which is equivalent to the old manual progress value and indicates a progress\n toward the requirement in the unit specified by the requirement\'s threshold type (only\n used if the requirement is a plain string requirement and has a threshold). The order of\n precedence is override, ignore, substitutions.\n '
self.assertion = progress_assertions.get(self.list_path, None)
self.is_bypassed = False
if (self.assertion is not None):
substitutions = self.assertion.get('substitutions', None)
ignore = self.assertion.get('ignore', False)
override = self.assertion.get('override', 0)
else:
substitutions = None
ignore = False
override = 0
if (self.statement.is_plain_string and (self.threshold is not None) and override):
self.override_requirement(override)
return True
if ignore:
self.is_fulfilled = False
subject_progress = Progress(0, 0)
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
unit_progress = Progress(0, 0)
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
progress = Progress(0, 0)
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = []
return True
if (substitutions is not None):
satisfied_courses = set()
subs_satisfied = 0
units_satisfied = 0
for sub in substitutions:
for course in courses:
if course.satisfies(sub, courses):
subs_satisfied += 1
units_satisfied += course.total_units
satisfied_courses.add(course)
break
if (self.statement.is_plain_string and (self.threshold is not None)):
subject_progress = Progress(subs_satisfied, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = Progress(units_satisfied, self.threshold.cutoff_for_criterion(CRITERION_UNITS))
progress = (subject_progress if (self.threshold.criterion == CRITERION_SUBJECTS) else unit_progress)
self.is_fulfilled = (progress.progress == progress.max)
else:
subject_progress = Progress(subs_satisfied, len(substitutions))
self.is_fulfilled = (subs_satisfied == len(substitutions))
unit_progress = Progress((subs_satisfied * DEFAULT_UNIT_COUNT), (len(substitutions) * DEFAULT_UNIT_COUNT))
progress = subject_progress
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = list(satisfied_courses)
return True
return False
|
def bypass_children(self):
'Sets the is_bypassed flag of the recursive children of this progress object to True.'
for child in self.children:
child.is_bypassed = True
child.is_fulfilled = False
child.subject_fulfillment = Progress(0, 0)
child.subject_progress = 0
child.subject_max = 0
child.unit_fulfillment = Progress(0, 0)
child.unit_progress = 0
child.unit_max = 0
child.progress = 0
child.progress_max = 0
child.percent_fulfilled = 0
child.fraction_fulfilled = 0
child.satisfied_courses = []
child.assertion = None
child.bypass_children()
| -4,900,640,971,778,047,000
|
Sets the is_bypassed flag of the recursive children of this progress object to True.
|
requirements/progress.py
|
bypass_children
|
georgiashay/fireroad-server2
|
python
|
def bypass_children(self):
for child in self.children:
child.is_bypassed = True
child.is_fulfilled = False
child.subject_fulfillment = Progress(0, 0)
child.subject_progress = 0
child.subject_max = 0
child.unit_fulfillment = Progress(0, 0)
child.unit_progress = 0
child.unit_max = 0
child.progress = 0
child.progress_max = 0
child.percent_fulfilled = 0
child.fraction_fulfilled = 0
child.satisfied_courses = []
child.assertion = None
child.bypass_children()
|
def compute(self, courses, progress_overrides, progress_assertions):
'Computes and stores the status of the requirements statement using the\n given list of Course objects.'
satisfied_courses = set()
if self.compute_assertions(courses, progress_assertions):
self.bypass_children()
return
if (self.list_path in progress_overrides):
manual_progress = progress_overrides[self.list_path]
else:
manual_progress = 0
self.is_bypassed = False
self.assertion = None
if (self.statement.requirement is not None):
if (self.statement.is_plain_string and (manual_progress != 0) and (self.threshold is not None)):
self.override_requirement(manual_progress)
return
else:
(whole_courses, half_courses) = self.courses_satisfying_req(courses)
satisfied_courses = (whole_courses + half_courses)
if (not (self.threshold is None)):
subject_progress = ceiling_thresh((len(whole_courses) + (len(half_courses) // 2)), self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = ceiling_thresh(total_units(satisfied_courses), self.threshold.cutoff_for_criterion(CRITERION_UNITS))
is_fulfilled = self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress)
else:
progress_subjects = min(len(satisfied_courses), 1)
is_fulfilled = (len(satisfied_courses) > 0)
subject_progress = ceiling_thresh(progress_subjects, 1)
if (len(satisfied_courses) > 0):
unit_progress = ceiling_thresh(list(satisfied_courses)[0].total_units, DEFAULT_UNIT_COUNT)
else:
unit_progress = ceiling_thresh(0, DEFAULT_UNIT_COUNT)
progress = (unit_progress if ((self.threshold is not None) and (self.threshold.criterion == CRITERION_UNITS)) else subject_progress)
if (len(self.children) > 0):
num_reqs_satisfied = 0
satisfied_by_category = []
satisfied_courses = set()
num_courses_satisfied = 0
open_children = []
for req_progress in self.children:
req_progress.compute(courses, progress_overrides, progress_assertions)
req_satisfied_courses = req_progress.satisfied_courses
if (req_progress.assertion and req_progress.assertion.get('ignore', False)):
continue
open_children.append(req_progress)
if (req_progress.is_fulfilled and (len(req_progress.satisfied_courses) > 0)):
num_reqs_satisfied += 1
satisfied_courses.update(req_satisfied_courses)
satisfied_by_category.append(list(req_satisfied_courses))
if ((req_progress.statement.connection_type == CONNECTION_TYPE_ALL) and req_progress.children):
num_courses_satisfied += (req_progress.is_fulfilled and (len(req_progress.satisfied_courses) > 0))
else:
num_courses_satisfied += len(req_satisfied_courses)
satisfied_by_category = [sat for (prog, sat) in sorted(zip(open_children, satisfied_by_category), key=(lambda z: z[0].raw_fraction_fulfilled), reverse=True)]
sorted_progresses = sorted(open_children, key=(lambda req: req.raw_fraction_fulfilled), reverse=True)
if ((self.threshold is None) and (self.distinct_threshold is None)):
is_fulfilled = (num_reqs_satisfied > 0)
if (self.statement.connection_type == CONNECTION_TYPE_ANY):
if (len(sorted_progresses) > 0):
subject_progress = sorted_progresses[0].subject_fulfillment
unit_progress = sorted_progresses[0].unit_fulfillment
else:
subject_progress = Progress(0, 0)
unit_progress = Progress(0, 0)
else:
subject_progress = sum_progresses(sorted_progresses, CRITERION_SUBJECTS, None)
unit_progress = sum_progresses(sorted_progresses, CRITERION_UNITS, None)
else:
if (self.distinct_threshold is not None):
num_progresses_to_count = min(self.distinct_threshold.get_actual_cutoff(), len(sorted_progresses))
sorted_progresses = sorted_progresses[:num_progresses_to_count]
satisfied_by_category = satisfied_by_category[:num_progresses_to_count]
satisfied_courses = set()
num_courses_satisfied = 0
for (i, child) in zip(range(num_progresses_to_count), open_children):
satisfied_courses.update(satisfied_by_category[i])
if (child.statement.connection_type == CONNECTION_TYPE_ALL):
num_courses_satisfied += (child.is_fulfilled and (len(child.satisfied_courses) > 0))
else:
num_courses_satisfied += len(satisfied_by_category[i])
if ((self.threshold is None) and (self.distinct_threshold is not None)):
if ((self.distinct_threshold == THRESHOLD_TYPE_GTE) or (self.distinct_threshold.type == THRESHOLD_TYPE_GT)):
is_fulfilled = (num_reqs_satisfied >= self.distinct_threshold.get_actual_cutoff())
else:
is_fulfilled = True
subject_progress = sum_progresses(sorted_progresses, CRITERION_SUBJECTS, (lambda x: max(x, 1)))
unit_progress = sum_progresses(sorted_progresses, CRITERION_UNITS, (lambda x: (x, DEFAULT_UNIT_COUNT)[(x == 0)]))
elif (self.threshold is not None):
subject_progress = Progress(num_courses_satisfied, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = Progress(total_units(satisfied_courses), self.threshold.cutoff_for_criterion(CRITERION_UNITS))
if ((self.distinct_threshold is not None) and ((self.distinct_threshold.type == THRESHOLD_TYPE_GT) or (self.distinct_threshold.type == THRESHOLD_TYPE_GTE))):
is_fulfilled = (self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress) and (num_reqs_satisfied >= self.distinct_threshold.get_actual_cutoff()))
if (num_reqs_satisfied < self.distinct_threshold.get_actual_cutoff()):
(subject_progress, unit_progress) = force_unfill_progresses(satisfied_by_category, self.distinct_threshold, self.threshold)
else:
is_fulfilled = self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress)
if (self.statement.connection_type == CONNECTION_TYPE_ALL):
is_fulfilled = (is_fulfilled and (num_reqs_satisfied == len(open_children)))
if ((subject_progress.progress == subject_progress.max) and (len(open_children) > num_reqs_satisfied)):
subject_progress.max += (len(open_children) - num_reqs_satisfied)
unit_progress.max += ((len(open_children) - num_reqs_satisfied) * DEFAULT_UNIT_COUNT)
subject_progress = ceiling_thresh(subject_progress.progress, subject_progress.max)
unit_progress = ceiling_thresh(unit_progress.progress, unit_progress.max)
progress = (unit_progress if ((self.threshold is not None) and (self.threshold.criterion == CRITERION_UNITS)) else subject_progress)
progress_units = (CRITERION_SUBJECTS if (self.threshold is None) else self.threshold.criterion)
self.is_fulfilled = is_fulfilled
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.raw_fraction_fulfilled = progress.get_raw_fraction(progress_units)
self.satisfied_courses = list(satisfied_courses)
| 8,909,662,200,190,154,000
|
Computes and stores the status of the requirements statement using the
given list of Course objects.
|
requirements/progress.py
|
compute
|
georgiashay/fireroad-server2
|
python
|
def compute(self, courses, progress_overrides, progress_assertions):
'Computes and stores the status of the requirements statement using the\n given list of Course objects.'
satisfied_courses = set()
if self.compute_assertions(courses, progress_assertions):
self.bypass_children()
return
if (self.list_path in progress_overrides):
manual_progress = progress_overrides[self.list_path]
else:
manual_progress = 0
self.is_bypassed = False
self.assertion = None
if (self.statement.requirement is not None):
if (self.statement.is_plain_string and (manual_progress != 0) and (self.threshold is not None)):
self.override_requirement(manual_progress)
return
else:
(whole_courses, half_courses) = self.courses_satisfying_req(courses)
satisfied_courses = (whole_courses + half_courses)
if (not (self.threshold is None)):
subject_progress = ceiling_thresh((len(whole_courses) + (len(half_courses) // 2)), self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = ceiling_thresh(total_units(satisfied_courses), self.threshold.cutoff_for_criterion(CRITERION_UNITS))
is_fulfilled = self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress)
else:
progress_subjects = min(len(satisfied_courses), 1)
is_fulfilled = (len(satisfied_courses) > 0)
subject_progress = ceiling_thresh(progress_subjects, 1)
if (len(satisfied_courses) > 0):
unit_progress = ceiling_thresh(list(satisfied_courses)[0].total_units, DEFAULT_UNIT_COUNT)
else:
unit_progress = ceiling_thresh(0, DEFAULT_UNIT_COUNT)
progress = (unit_progress if ((self.threshold is not None) and (self.threshold.criterion == CRITERION_UNITS)) else subject_progress)
if (len(self.children) > 0):
num_reqs_satisfied = 0
satisfied_by_category = []
satisfied_courses = set()
num_courses_satisfied = 0
open_children = []
for req_progress in self.children:
req_progress.compute(courses, progress_overrides, progress_assertions)
req_satisfied_courses = req_progress.satisfied_courses
if (req_progress.assertion and req_progress.assertion.get('ignore', False)):
continue
open_children.append(req_progress)
if (req_progress.is_fulfilled and (len(req_progress.satisfied_courses) > 0)):
num_reqs_satisfied += 1
satisfied_courses.update(req_satisfied_courses)
satisfied_by_category.append(list(req_satisfied_courses))
if ((req_progress.statement.connection_type == CONNECTION_TYPE_ALL) and req_progress.children):
num_courses_satisfied += (req_progress.is_fulfilled and (len(req_progress.satisfied_courses) > 0))
else:
num_courses_satisfied += len(req_satisfied_courses)
satisfied_by_category = [sat for (prog, sat) in sorted(zip(open_children, satisfied_by_category), key=(lambda z: z[0].raw_fraction_fulfilled), reverse=True)]
sorted_progresses = sorted(open_children, key=(lambda req: req.raw_fraction_fulfilled), reverse=True)
if ((self.threshold is None) and (self.distinct_threshold is None)):
is_fulfilled = (num_reqs_satisfied > 0)
if (self.statement.connection_type == CONNECTION_TYPE_ANY):
if (len(sorted_progresses) > 0):
subject_progress = sorted_progresses[0].subject_fulfillment
unit_progress = sorted_progresses[0].unit_fulfillment
else:
subject_progress = Progress(0, 0)
unit_progress = Progress(0, 0)
else:
subject_progress = sum_progresses(sorted_progresses, CRITERION_SUBJECTS, None)
unit_progress = sum_progresses(sorted_progresses, CRITERION_UNITS, None)
else:
if (self.distinct_threshold is not None):
num_progresses_to_count = min(self.distinct_threshold.get_actual_cutoff(), len(sorted_progresses))
sorted_progresses = sorted_progresses[:num_progresses_to_count]
satisfied_by_category = satisfied_by_category[:num_progresses_to_count]
satisfied_courses = set()
num_courses_satisfied = 0
for (i, child) in zip(range(num_progresses_to_count), open_children):
satisfied_courses.update(satisfied_by_category[i])
if (child.statement.connection_type == CONNECTION_TYPE_ALL):
num_courses_satisfied += (child.is_fulfilled and (len(child.satisfied_courses) > 0))
else:
num_courses_satisfied += len(satisfied_by_category[i])
if ((self.threshold is None) and (self.distinct_threshold is not None)):
if ((self.distinct_threshold == THRESHOLD_TYPE_GTE) or (self.distinct_threshold.type == THRESHOLD_TYPE_GT)):
is_fulfilled = (num_reqs_satisfied >= self.distinct_threshold.get_actual_cutoff())
else:
is_fulfilled = True
subject_progress = sum_progresses(sorted_progresses, CRITERION_SUBJECTS, (lambda x: max(x, 1)))
unit_progress = sum_progresses(sorted_progresses, CRITERION_UNITS, (lambda x: (x, DEFAULT_UNIT_COUNT)[(x == 0)]))
elif (self.threshold is not None):
subject_progress = Progress(num_courses_satisfied, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = Progress(total_units(satisfied_courses), self.threshold.cutoff_for_criterion(CRITERION_UNITS))
if ((self.distinct_threshold is not None) and ((self.distinct_threshold.type == THRESHOLD_TYPE_GT) or (self.distinct_threshold.type == THRESHOLD_TYPE_GTE))):
is_fulfilled = (self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress) and (num_reqs_satisfied >= self.distinct_threshold.get_actual_cutoff()))
if (num_reqs_satisfied < self.distinct_threshold.get_actual_cutoff()):
(subject_progress, unit_progress) = force_unfill_progresses(satisfied_by_category, self.distinct_threshold, self.threshold)
else:
is_fulfilled = self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress)
if (self.statement.connection_type == CONNECTION_TYPE_ALL):
is_fulfilled = (is_fulfilled and (num_reqs_satisfied == len(open_children)))
if ((subject_progress.progress == subject_progress.max) and (len(open_children) > num_reqs_satisfied)):
subject_progress.max += (len(open_children) - num_reqs_satisfied)
unit_progress.max += ((len(open_children) - num_reqs_satisfied) * DEFAULT_UNIT_COUNT)
subject_progress = ceiling_thresh(subject_progress.progress, subject_progress.max)
unit_progress = ceiling_thresh(unit_progress.progress, unit_progress.max)
progress = (unit_progress if ((self.threshold is not None) and (self.threshold.criterion == CRITERION_UNITS)) else subject_progress)
progress_units = (CRITERION_SUBJECTS if (self.threshold is None) else self.threshold.criterion)
self.is_fulfilled = is_fulfilled
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.raw_fraction_fulfilled = progress.get_raw_fraction(progress_units)
self.satisfied_courses = list(satisfied_courses)
|
def to_json_object(self, full=True, child_fn=None):
'Returns a JSON dictionary containing the dictionary representation of\n the enclosed requirements statement, as well as progress information.'
stmt_json = self.statement.to_json_object(full=False)
stmt_json[JSONProgressConstants.is_fulfilled] = self.is_fulfilled
stmt_json[JSONProgressConstants.progress] = self.progress
stmt_json[JSONProgressConstants.progress_max] = self.progress_max
stmt_json[JSONProgressConstants.percent_fulfilled] = self.percent_fulfilled
stmt_json[JSONProgressConstants.satisfied_courses] = map((lambda c: c.subject_id), self.satisfied_courses)
if self.is_bypassed:
stmt_json[JSONProgressConstants.is_bypassed] = self.is_bypassed
if self.assertion:
stmt_json[JSONProgressConstants.assertion] = self.assertion
if full:
if self.children:
if (child_fn is None):
child_fn = (lambda c: c.to_json_object(full=full))
stmt_json[JSONConstants.requirements] = [child_fn(child) for child in self.children]
return stmt_json
| -2,686,563,129,011,772,000
|
Returns a JSON dictionary containing the dictionary representation of
the enclosed requirements statement, as well as progress information.
|
requirements/progress.py
|
to_json_object
|
georgiashay/fireroad-server2
|
python
|
def to_json_object(self, full=True, child_fn=None):
'Returns a JSON dictionary containing the dictionary representation of\n the enclosed requirements statement, as well as progress information.'
stmt_json = self.statement.to_json_object(full=False)
stmt_json[JSONProgressConstants.is_fulfilled] = self.is_fulfilled
stmt_json[JSONProgressConstants.progress] = self.progress
stmt_json[JSONProgressConstants.progress_max] = self.progress_max
stmt_json[JSONProgressConstants.percent_fulfilled] = self.percent_fulfilled
stmt_json[JSONProgressConstants.satisfied_courses] = map((lambda c: c.subject_id), self.satisfied_courses)
if self.is_bypassed:
stmt_json[JSONProgressConstants.is_bypassed] = self.is_bypassed
if self.assertion:
stmt_json[JSONProgressConstants.assertion] = self.assertion
if full:
if self.children:
if (child_fn is None):
child_fn = (lambda c: c.to_json_object(full=full))
stmt_json[JSONConstants.requirements] = [child_fn(child) for child in self.children]
return stmt_json
|
def test_create_user_with_lowercase_email(self):
' Test creating a new user with an lowercase email words '
payload = {'email': 'example@example.com', 'password': '1111qqqq='}
user = get_user_model().objects.create_user(email=payload['email'], password=payload['password'])
self.assertEqual(user.email, payload['email'].lower())
| -5,877,496,336,141,041,000
|
Test creating a new user with an lowercase email words
|
app/core/tests/test_models.py
|
test_create_user_with_lowercase_email
|
pudka/recipe-app-api
|
python
|
def test_create_user_with_lowercase_email(self):
' '
payload = {'email': 'example@example.com', 'password': '1111qqqq='}
user = get_user_model().objects.create_user(email=payload['email'], password=payload['password'])
self.assertEqual(user.email, payload['email'].lower())
|
def test_create_user_with_invalid_email(self):
' Test creating a new user with an invalid email address '
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, '1234325')
| -6,157,888,896,015,371,000
|
Test creating a new user with an invalid email address
|
app/core/tests/test_models.py
|
test_create_user_with_invalid_email
|
pudka/recipe-app-api
|
python
|
def test_create_user_with_invalid_email(self):
' '
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, '1234325')
|
def test_create_superuser_is_successful(self):
' Test that create a new superuser '
user = get_user_model().objects.create_superuser('example@example.com', '1234')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| -7,088,548,888,936,505,000
|
Test that create a new superuser
|
app/core/tests/test_models.py
|
test_create_superuser_is_successful
|
pudka/recipe-app-api
|
python
|
def test_create_superuser_is_successful(self):
' '
user = get_user_model().objects.create_superuser('example@example.com', '1234')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
|
@staticmethod
def getSubtableClass(format):
'Return the subtable class for a format.'
return cmap_classes.get(format, cmap_format_unknown)
| 6,981,419,533,710,048,000
|
Return the subtable class for a format.
|
FontTools/fontTools/ttLib/tables/_c_m_a_p.py
|
getSubtableClass
|
johanoren/IncrementalNumbers
|
python
|
@staticmethod
def getSubtableClass(format):
return cmap_classes.get(format, cmap_format_unknown)
|
@staticmethod
def newSubtable(format):
'Return a new instance of a subtable for format.'
subtableClass = CmapSubtable.getSubtableClass(format)
return subtableClass(format)
| -8,252,059,341,019,018,000
|
Return a new instance of a subtable for format.
|
FontTools/fontTools/ttLib/tables/_c_m_a_p.py
|
newSubtable
|
johanoren/IncrementalNumbers
|
python
|
@staticmethod
def newSubtable(format):
subtableClass = CmapSubtable.getSubtableClass(format)
return subtableClass(format)
|
def getEncoding(self, default=None):
'Returns the Python encoding name for this cmap subtable based on its platformID,\n\t\tplatEncID, and language. If encoding for these values is not known, by default\n\t\tNone is returned. That can be overriden by passing a value to the default\n\t\targument.\n\n\t\tNote that if you want to choose a "preferred" cmap subtable, most of the time\n\t\tself.isUnicode() is what you want as that one only returns true for the modern,\n\t\tcommonly used, Unicode-compatible triplets, not the legacy ones.\n\t\t'
return getEncoding(self.platformID, self.platEncID, self.language, default)
| -8,219,492,945,941,369,000
|
Returns the Python encoding name for this cmap subtable based on its platformID,
platEncID, and language. If encoding for these values is not known, by default
None is returned. That can be overriden by passing a value to the default
argument.
Note that if you want to choose a "preferred" cmap subtable, most of the time
self.isUnicode() is what you want as that one only returns true for the modern,
commonly used, Unicode-compatible triplets, not the legacy ones.
|
FontTools/fontTools/ttLib/tables/_c_m_a_p.py
|
getEncoding
|
johanoren/IncrementalNumbers
|
python
|
def getEncoding(self, default=None):
'Returns the Python encoding name for this cmap subtable based on its platformID,\n\t\tplatEncID, and language. If encoding for these values is not known, by default\n\t\tNone is returned. That can be overriden by passing a value to the default\n\t\targument.\n\n\t\tNote that if you want to choose a "preferred" cmap subtable, most of the time\n\t\tself.isUnicode() is what you want as that one only returns true for the modern,\n\t\tcommonly used, Unicode-compatible triplets, not the legacy ones.\n\t\t'
return getEncoding(self.platformID, self.platEncID, self.language, default)
|
def __init__(self, rate, validate_args=False, allow_nan_stats=True, name='Exponential'):
'Construct Exponential distribution with parameter `rate`.\n\n Args:\n rate: Floating point tensor, equivalent to `1 / mean`. Must contain only\n positive values.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value "`NaN`" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic\'s batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n '
parameters = locals()
with ops.name_scope(name, values=[rate]):
self._rate = ops.convert_to_tensor(rate, name='rate')
super(Exponential, self).__init__(concentration=array_ops.ones([], dtype=self._rate.dtype), rate=self._rate, allow_nan_stats=allow_nan_stats, validate_args=validate_args, name=name)
self._reparameterization_type = True
self._parameters = parameters
self._graph_parents += [self._rate]
| 6,270,380,275,110,105,000
|
Construct Exponential distribution with parameter `rate`.
Args:
rate: Floating point tensor, equivalent to `1 / mean`. Must contain only
positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
|
venv1/Lib/site-packages/tensorflow/python/ops/distributions/exponential.py
|
__init__
|
Soum-Soum/Tensorflow_Face_Finder
|
python
|
def __init__(self, rate, validate_args=False, allow_nan_stats=True, name='Exponential'):
'Construct Exponential distribution with parameter `rate`.\n\n Args:\n rate: Floating point tensor, equivalent to `1 / mean`. Must contain only\n positive values.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value "`NaN`" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic\'s batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n '
parameters = locals()
with ops.name_scope(name, values=[rate]):
self._rate = ops.convert_to_tensor(rate, name='rate')
super(Exponential, self).__init__(concentration=array_ops.ones([], dtype=self._rate.dtype), rate=self._rate, allow_nan_stats=allow_nan_stats, validate_args=validate_args, name=name)
self._reparameterization_type = True
self._parameters = parameters
self._graph_parents += [self._rate]
|
def set_model_constants(xx=50000.0, nx=100, va=10.0, tmax=(((60 * 360) * 24) * 3600.0), avep=(24 * 3600.0), dt=3600.0, period=(((3600 * 24) * 360) * 1), B=2.0, T0=(273.15 + 6), dT=2.0, Cs=0.001, Cp=1030.0, ra=1.5, ro=1030.0, ri=900.0, Cpo=4000.0, Cpi=2900.0, H=200.0, vo=0.2, Hb=1000.0, Li=3300000.0, Tf=(273.15 - 1.8), SW0=50.0, SW_anom=100.0, emissivity=0.99, Da=1000000.0, Do=500.0, tau_entrainment=((30 * 24) * 3600.0), **args):
'Setup model constants. All of the constants have fixed values, but one can pass in own values or even some arbitrary values via **args.'
C = {}
C['xx'] = xx
C['nx'] = nx
C['va'] = va
C['tmax'] = tmax
C['dt'] = dt
C['avep'] = avep
C['period'] = period
C['Cs'] = Cs
C['Cp'] = Cp
C['ra'] = ra
C['ro'] = ro
C['ri'] = ri
C['Cpo'] = Cpo
C['T0'] = T0
C['dT'] = dT
C['H'] = H
C['vo'] = vo
C['Hb'] = Hb
C['Cpi'] = Cpi
C['Li'] = Li
C['Tf'] = Tf
C['B'] = B
C['emissivity'] = emissivity
C['SW0'] = SW0
C['SW_anom'] = SW_anom
C['Da'] = Da
C['Do'] = Do
C['tau_entrainment'] = tau_entrainment
for var in args.keys():
C[var] = args[var]
return C
| 2,590,239,401,571,567,600
|
Setup model constants. All of the constants have fixed values, but one can pass in own values or even some arbitrary values via **args.
|
coupled_channel/cutils.py
|
set_model_constants
|
AleksiNummelin/coupled_channel
|
python
|
def set_model_constants(xx=50000.0, nx=100, va=10.0, tmax=(((60 * 360) * 24) * 3600.0), avep=(24 * 3600.0), dt=3600.0, period=(((3600 * 24) * 360) * 1), B=2.0, T0=(273.15 + 6), dT=2.0, Cs=0.001, Cp=1030.0, ra=1.5, ro=1030.0, ri=900.0, Cpo=4000.0, Cpi=2900.0, H=200.0, vo=0.2, Hb=1000.0, Li=3300000.0, Tf=(273.15 - 1.8), SW0=50.0, SW_anom=100.0, emissivity=0.99, Da=1000000.0, Do=500.0, tau_entrainment=((30 * 24) * 3600.0), **args):
C = {}
C['xx'] = xx
C['nx'] = nx
C['va'] = va
C['tmax'] = tmax
C['dt'] = dt
C['avep'] = avep
C['period'] = period
C['Cs'] = Cs
C['Cp'] = Cp
C['ra'] = ra
C['ro'] = ro
C['ri'] = ri
C['Cpo'] = Cpo
C['T0'] = T0
C['dT'] = dT
C['H'] = H
C['vo'] = vo
C['Hb'] = Hb
C['Cpi'] = Cpi
C['Li'] = Li
C['Tf'] = Tf
C['B'] = B
C['emissivity'] = emissivity
C['SW0'] = SW0
C['SW_anom'] = SW_anom
C['Da'] = Da
C['Do'] = Do
C['tau_entrainment'] = tau_entrainment
for var in args.keys():
C[var] = args[var]
return C
|
def CoupledChannel(C, forcing, T_boundary=None, dt_f=((30 * 24) * 3600), restoring=False, ice_model=True, atm_adv=True, spatial_pattern=None, atm_DA_tendencies=None, ocn_DA_tendencies=None, return_coupled_fluxes=False, random_amp=0.1):
'\n This is the main function for the coupled ocean--atm channel model.\n \n ## INPUT VARIABLES ##\n \n tmax: running time in seconds\n avep: averaging period for the ouput\n T0: initial temperature\n forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2\n dt_f: timestep of the forcing\n atm_adv: boolean, advective atmosphere\n atm_ocn: boolean, advective ocean\n '
nt = int((C['tmax'] / C['dt']))
nt1 = int((C['tmax'] / C['avep']))
sst = (C['T0'] * np.ones((2, C['nx'])))
tas = (C['T0'] * np.ones((2, C['nx'])))
hice = np.zeros((2, C['nx']))
SW0 = np.tile(C['SW0'][:, np.newaxis], (1, nt))
naxis = np.tile(np.arange(nt)[(np.newaxis,)], (C['nx'], 1))
SW_warming = np.max(np.concatenate([(SW0 - (C['SW_anom'] * np.cos((((2 * np.pi) * (naxis * C['dt'])) / ((360 * 24) * 3600)))))[(np.newaxis,)], np.zeros((C['nx'], nt))[(np.newaxis,)]], axis=0), 0)
if np.all((T_boundary == None)):
T_boundary = (C['T0'] * np.ones(nt))
sst_boundary = (T_boundary[0] * np.ones(2))
if np.all((forcing != None)):
forcing = np.interp(np.arange(0, (len(forcing) * dt_f), C['dt']), np.arange(0, (len(forcing) * dt_f), dt_f), forcing)
else:
forcing = np.zeros((nt + 1))
sst_out = np.zeros((nt1, C['nx']))
tas_out = np.zeros((nt1, C['nx']))
hice_out = np.zeros((nt1, C['nx']))
sflx_f_out = np.zeros((nt1, C['nx']))
sflx_out = np.zeros((nt1, C['nx']))
if np.all((spatial_pattern == None)):
spatial_pattern = np.ones(C['nx'])
if np.all((atm_DA_tendencies != None)):
use_atm_tendencies = True
else:
use_atm_tendencies = False
if np.all((ocn_DA_tendencies != None)):
use_ocn_tendencies = True
else:
use_ocn_tendencies = False
if return_coupled_fluxes:
atm_DA_tendencies = np.zeros((nt, C['nx']))
ocn_DA_tendencies = np.zeros((nt, C['nx']))
c = 0
c2 = 0
c3 = 0
n = 1
for nn in range(nt):
sflx = (forcing[nn] * spatial_pattern)
sflx_f_out[c, :] = (sflx_f_out[c, :] + sflx)
sflx = (sflx + ((((C['ra'] * C['Cp']) * C['va']) * C['Cs']) * (sst[(n - 1), :] - tas[(n - 1), :])))
LW_cooling = ((C['emissivity'] * 5.67e-08) * (tas[(n - 1), :] ** 4))
sst_boundary_tendency = ((((SW_warming[(0, nn)] * C['dt']) / ((C['H'] * C['Cpo']) * C['ro'])) - ((((C['emissivity'] * 5.67e-08) * (sst_boundary[(n - 1)] ** 4)) * C['dt']) / ((C['H'] * C['Cpo']) * C['ro']))) + (((T_boundary[nn] - sst_boundary[(n - 1)]) * C['dt']) / C['period']))
if atm_adv:
a_adv = (np.concatenate([(sst_boundary[(n - 1)] - tas[(n - 1), :1]), (tas[(n - 1), :(- 1)] - tas[(n - 1), 1:])], axis=0) * ((C['va'] * C['dt']) / C['xx']))
else:
a_adv = 0
a_diff = (((tas[(n - 1), 2:] + tas[(n - 1), :(- 2)]) - (2 * tas[(n - 1), 1:(- 1)])) * ((C['Da'] * C['dt']) / (C['xx'] ** 2)))
a_diff0 = (((tas[((n - 1), 1)] + sst_boundary[(n - 1)]) - (2 * tas[((n - 1), 0)])) * ((C['Da'] * C['dt']) / (C['xx'] ** 2)))
a_diff = np.concatenate([np.array([a_diff0]), a_diff, a_diff[(- 1):]], axis=0)
a_netsflx = (((sflx * C['dt']) / ((C['Hb'] * C['Cp']) * C['ra'])) - ((LW_cooling * C['dt']) / ((C['Hb'] * C['Cp']) * C['ra'])))
if return_coupled_fluxes:
atm_DA_tendencies[nn, :] = (a_adv + a_diff)
if use_atm_tendencies:
tas[n, :] = ((tas[(n - 1), :] + a_netsflx) + atm_DA_tendencies[c3, :])
else:
tas[n, :] = (((tas[(n - 1), :] + a_netsflx) + a_adv) + a_diff)
o_adv = (np.concatenate([(sst_boundary[(n - 1)] - sst[(n - 1), :1]), (sst[(n - 1), :(- 1)] - sst[(n - 1), 1:])], axis=0) * ((C['vo'] * C['dt']) / C['xx']))
o_diff = (((sst[(n - 1), 2:] + sst[(n - 1), :(- 2)]) - (2 * sst[(n - 1), 1:(- 1)])) * ((C['Do'] * C['dt']) / (C['xx'] ** 2)))
o_diff0 = (((sst[((n - 1), 1)] + sst_boundary[(n - 1)]) - (2 * sst[((n - 1), 0)])) * ((C['Do'] * C['dt']) / (C['xx'] ** 2)))
o_diff = np.concatenate([np.array([o_diff0]), o_diff, o_diff[(- 1):]], axis=0)
if (C['tau_entrainment'] > 0):
o_entrain = (((C['T0'] - sst[(n - 1), :]) * C['dt']) / C['tau_entrainment'])
else:
o_entrain = 0
o_netsflx = ((((- sflx) * C['dt']) / ((C['H'] * C['Cpo']) * C['ro'])) + ((SW_warming[:, nn] * C['dt']) / ((C['H'] * C['Cpo']) * C['ro'])))
if return_coupled_fluxes:
ocn_DA_tendencies[nn, :] = ((o_adv + o_diff) + o_entrain)
if use_ocn_tendencies:
sst[n, :] = ((sst[(n - 1), :] + o_netsflx) + ocn_DA_tendencies[c3, :])
else:
sst[n, :] = ((((sst[(n - 1), :] + o_netsflx) + o_adv) + o_diff) + o_entrain)
if ice_model:
ice_mask = (hice[(n - 1), :] > 0).astype(np.float)
freezing_mask = (sst[n, :] < C['Tf']).astype(np.float)
dEdt = ((((C['H'] * C['ro']) * C['Cpo']) * (sst[n, :] - sst[(n - 1), :])) / C['dt'])
excess_freeze = (freezing_mask * np.max([(- dEdt), np.zeros(C['nx'])], axis=0))
excess_melt = (ice_mask * np.max([dEdt, np.zeros(C['nx'])], axis=0))
dhice_freeze = ((C['dt'] * excess_freeze) / (C['Li'] * C['ri']))
dhice_melt = ((C['dt'] * excess_melt) / (C['Li'] * C['ri']))
hice[n, :] = ((hice[(n - 1), :] + dhice_freeze) - dhice_melt)
hice_melt = ((dhice_melt > 0).astype(np.float) * np.min([dhice_melt, hice[(n - 1), :]], axis=0))
hice[n, :] = np.max([hice[n, :], np.zeros(C['nx'])], axis=0)
sst[n, :] = (sst[n, :] + ((C['dt'] * excess_freeze) / ((C['H'] * C['Cpo']) * C['ro'])))
sst[n, :] = (sst[n, :] - ((hice_melt * (C['Li'] * C['ri'])) / ((C['ro'] * C['Cpo']) * C['H'])))
tas_out[c, :] = (tas_out[c, :] + tas[n, :])
sst_out[c, :] = (sst_out[c, :] + sst[n, :])
hice_out[c, :] = (hice_out[c, :] + hice[n, :])
sflx_out[c, :] = (sflx_out[c, :] + sflx)
c2 = (c2 + 1)
c3 = (c3 + 1)
if ((((nn + 1) * C['dt']) % ((360 * 24) * 3600)) == 0):
c3 = 0
if (((((nn + 1) * C['dt']) % C['avep']) == 0) and (nn > 0)):
tas_out[c, :] = (tas_out[c, :] / c2)
sst_out[c, :] = (sst_out[c, :] / c2)
sflx_out[c, :] = (sflx_out[c, :] / c2)
sflx_f_out[c, :] = (sflx_f_out[c, :] / c2)
hice_out[c, :] = (hice_out[c, :] / c2)
c = (c + 1)
c2 = 0
if ((((nn + 1) * C['dt']) % ((360 * 24) * 3600)) == 0):
print('Year ', (((nn + 1) * C['dt']) / ((360 * 24) * 3600)), sst[(1, int((C['nx'] / 4)))], sst[(1, int(((3 * C['nx']) / 4)))])
tas[0, :] = tas[1, :].copy()
sst[0, :] = sst[1, :].copy()
hice[0, :] = hice[1, :].copy()
sst_boundary[(n - 1)] = (sst_boundary[(n - 1)] + sst_boundary_tendency)
hice_out[np.where((hice_out == 0))] = np.nan
if return_coupled_fluxes:
return (tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt, atm_DA_tendencies, ocn_DA_tendencies)
else:
return (tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt)
| 6,091,658,717,478,800,000
|
This is the main function for the coupled ocean--atm channel model.
## INPUT VARIABLES ##
tmax: running time in seconds
avep: averaging period for the ouput
T0: initial temperature
forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2
dt_f: timestep of the forcing
atm_adv: boolean, advective atmosphere
atm_ocn: boolean, advective ocean
|
coupled_channel/cutils.py
|
CoupledChannel
|
AleksiNummelin/coupled_channel
|
python
|
def CoupledChannel(C, forcing, T_boundary=None, dt_f=((30 * 24) * 3600), restoring=False, ice_model=True, atm_adv=True, spatial_pattern=None, atm_DA_tendencies=None, ocn_DA_tendencies=None, return_coupled_fluxes=False, random_amp=0.1):
'\n This is the main function for the coupled ocean--atm channel model.\n \n ## INPUT VARIABLES ##\n \n tmax: running time in seconds\n avep: averaging period for the ouput\n T0: initial temperature\n forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2\n dt_f: timestep of the forcing\n atm_adv: boolean, advective atmosphere\n atm_ocn: boolean, advective ocean\n '
nt = int((C['tmax'] / C['dt']))
nt1 = int((C['tmax'] / C['avep']))
sst = (C['T0'] * np.ones((2, C['nx'])))
tas = (C['T0'] * np.ones((2, C['nx'])))
hice = np.zeros((2, C['nx']))
SW0 = np.tile(C['SW0'][:, np.newaxis], (1, nt))
naxis = np.tile(np.arange(nt)[(np.newaxis,)], (C['nx'], 1))
SW_warming = np.max(np.concatenate([(SW0 - (C['SW_anom'] * np.cos((((2 * np.pi) * (naxis * C['dt'])) / ((360 * 24) * 3600)))))[(np.newaxis,)], np.zeros((C['nx'], nt))[(np.newaxis,)]], axis=0), 0)
if np.all((T_boundary == None)):
T_boundary = (C['T0'] * np.ones(nt))
sst_boundary = (T_boundary[0] * np.ones(2))
if np.all((forcing != None)):
forcing = np.interp(np.arange(0, (len(forcing) * dt_f), C['dt']), np.arange(0, (len(forcing) * dt_f), dt_f), forcing)
else:
forcing = np.zeros((nt + 1))
sst_out = np.zeros((nt1, C['nx']))
tas_out = np.zeros((nt1, C['nx']))
hice_out = np.zeros((nt1, C['nx']))
sflx_f_out = np.zeros((nt1, C['nx']))
sflx_out = np.zeros((nt1, C['nx']))
if np.all((spatial_pattern == None)):
spatial_pattern = np.ones(C['nx'])
if np.all((atm_DA_tendencies != None)):
use_atm_tendencies = True
else:
use_atm_tendencies = False
if np.all((ocn_DA_tendencies != None)):
use_ocn_tendencies = True
else:
use_ocn_tendencies = False
if return_coupled_fluxes:
atm_DA_tendencies = np.zeros((nt, C['nx']))
ocn_DA_tendencies = np.zeros((nt, C['nx']))
c = 0
c2 = 0
c3 = 0
n = 1
for nn in range(nt):
sflx = (forcing[nn] * spatial_pattern)
sflx_f_out[c, :] = (sflx_f_out[c, :] + sflx)
sflx = (sflx + ((((C['ra'] * C['Cp']) * C['va']) * C['Cs']) * (sst[(n - 1), :] - tas[(n - 1), :])))
LW_cooling = ((C['emissivity'] * 5.67e-08) * (tas[(n - 1), :] ** 4))
sst_boundary_tendency = ((((SW_warming[(0, nn)] * C['dt']) / ((C['H'] * C['Cpo']) * C['ro'])) - ((((C['emissivity'] * 5.67e-08) * (sst_boundary[(n - 1)] ** 4)) * C['dt']) / ((C['H'] * C['Cpo']) * C['ro']))) + (((T_boundary[nn] - sst_boundary[(n - 1)]) * C['dt']) / C['period']))
if atm_adv:
a_adv = (np.concatenate([(sst_boundary[(n - 1)] - tas[(n - 1), :1]), (tas[(n - 1), :(- 1)] - tas[(n - 1), 1:])], axis=0) * ((C['va'] * C['dt']) / C['xx']))
else:
a_adv = 0
a_diff = (((tas[(n - 1), 2:] + tas[(n - 1), :(- 2)]) - (2 * tas[(n - 1), 1:(- 1)])) * ((C['Da'] * C['dt']) / (C['xx'] ** 2)))
a_diff0 = (((tas[((n - 1), 1)] + sst_boundary[(n - 1)]) - (2 * tas[((n - 1), 0)])) * ((C['Da'] * C['dt']) / (C['xx'] ** 2)))
a_diff = np.concatenate([np.array([a_diff0]), a_diff, a_diff[(- 1):]], axis=0)
a_netsflx = (((sflx * C['dt']) / ((C['Hb'] * C['Cp']) * C['ra'])) - ((LW_cooling * C['dt']) / ((C['Hb'] * C['Cp']) * C['ra'])))
if return_coupled_fluxes:
atm_DA_tendencies[nn, :] = (a_adv + a_diff)
if use_atm_tendencies:
tas[n, :] = ((tas[(n - 1), :] + a_netsflx) + atm_DA_tendencies[c3, :])
else:
tas[n, :] = (((tas[(n - 1), :] + a_netsflx) + a_adv) + a_diff)
o_adv = (np.concatenate([(sst_boundary[(n - 1)] - sst[(n - 1), :1]), (sst[(n - 1), :(- 1)] - sst[(n - 1), 1:])], axis=0) * ((C['vo'] * C['dt']) / C['xx']))
o_diff = (((sst[(n - 1), 2:] + sst[(n - 1), :(- 2)]) - (2 * sst[(n - 1), 1:(- 1)])) * ((C['Do'] * C['dt']) / (C['xx'] ** 2)))
o_diff0 = (((sst[((n - 1), 1)] + sst_boundary[(n - 1)]) - (2 * sst[((n - 1), 0)])) * ((C['Do'] * C['dt']) / (C['xx'] ** 2)))
o_diff = np.concatenate([np.array([o_diff0]), o_diff, o_diff[(- 1):]], axis=0)
if (C['tau_entrainment'] > 0):
o_entrain = (((C['T0'] - sst[(n - 1), :]) * C['dt']) / C['tau_entrainment'])
else:
o_entrain = 0
o_netsflx = ((((- sflx) * C['dt']) / ((C['H'] * C['Cpo']) * C['ro'])) + ((SW_warming[:, nn] * C['dt']) / ((C['H'] * C['Cpo']) * C['ro'])))
if return_coupled_fluxes:
ocn_DA_tendencies[nn, :] = ((o_adv + o_diff) + o_entrain)
if use_ocn_tendencies:
sst[n, :] = ((sst[(n - 1), :] + o_netsflx) + ocn_DA_tendencies[c3, :])
else:
sst[n, :] = ((((sst[(n - 1), :] + o_netsflx) + o_adv) + o_diff) + o_entrain)
if ice_model:
ice_mask = (hice[(n - 1), :] > 0).astype(np.float)
freezing_mask = (sst[n, :] < C['Tf']).astype(np.float)
dEdt = ((((C['H'] * C['ro']) * C['Cpo']) * (sst[n, :] - sst[(n - 1), :])) / C['dt'])
excess_freeze = (freezing_mask * np.max([(- dEdt), np.zeros(C['nx'])], axis=0))
excess_melt = (ice_mask * np.max([dEdt, np.zeros(C['nx'])], axis=0))
dhice_freeze = ((C['dt'] * excess_freeze) / (C['Li'] * C['ri']))
dhice_melt = ((C['dt'] * excess_melt) / (C['Li'] * C['ri']))
hice[n, :] = ((hice[(n - 1), :] + dhice_freeze) - dhice_melt)
hice_melt = ((dhice_melt > 0).astype(np.float) * np.min([dhice_melt, hice[(n - 1), :]], axis=0))
hice[n, :] = np.max([hice[n, :], np.zeros(C['nx'])], axis=0)
sst[n, :] = (sst[n, :] + ((C['dt'] * excess_freeze) / ((C['H'] * C['Cpo']) * C['ro'])))
sst[n, :] = (sst[n, :] - ((hice_melt * (C['Li'] * C['ri'])) / ((C['ro'] * C['Cpo']) * C['H'])))
tas_out[c, :] = (tas_out[c, :] + tas[n, :])
sst_out[c, :] = (sst_out[c, :] + sst[n, :])
hice_out[c, :] = (hice_out[c, :] + hice[n, :])
sflx_out[c, :] = (sflx_out[c, :] + sflx)
c2 = (c2 + 1)
c3 = (c3 + 1)
if ((((nn + 1) * C['dt']) % ((360 * 24) * 3600)) == 0):
c3 = 0
if (((((nn + 1) * C['dt']) % C['avep']) == 0) and (nn > 0)):
tas_out[c, :] = (tas_out[c, :] / c2)
sst_out[c, :] = (sst_out[c, :] / c2)
sflx_out[c, :] = (sflx_out[c, :] / c2)
sflx_f_out[c, :] = (sflx_f_out[c, :] / c2)
hice_out[c, :] = (hice_out[c, :] / c2)
c = (c + 1)
c2 = 0
if ((((nn + 1) * C['dt']) % ((360 * 24) * 3600)) == 0):
print('Year ', (((nn + 1) * C['dt']) / ((360 * 24) * 3600)), sst[(1, int((C['nx'] / 4)))], sst[(1, int(((3 * C['nx']) / 4)))])
tas[0, :] = tas[1, :].copy()
sst[0, :] = sst[1, :].copy()
hice[0, :] = hice[1, :].copy()
sst_boundary[(n - 1)] = (sst_boundary[(n - 1)] + sst_boundary_tendency)
hice_out[np.where((hice_out == 0))] = np.nan
if return_coupled_fluxes:
return (tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt, atm_DA_tendencies, ocn_DA_tendencies)
else:
return (tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt)
|
def CoupledChannel_time(nt, nx, xx, dt, avep, sst, tas, hice, sst_boundary, sst_out, tas_out, hice_out, sflx_f_out, sflx_out, forcing, spatial_pattern, ra, Cp, va, vo, Da, Do, Cs, T0, Tf, emissivity, SW0, SW_anom, H, Hb, Cpo, ro, tau_entrainment, Li, ri, use_ocn_tendencies, use_atm_tendencies, atm_DA_tendencies, ocn_DA_tendencies, ice_model, atm_adv, return_coupled_fluxes):
'\n Separate time loop to enable numba\n '
c = 0
c2 = 0
c3 = 0
n = 1
for nn in range(nt):
sflx = (forcing[nn] * spatial_pattern)
sflx_f_out[c, :] = (sflx_f_out[c, :] + sflx)
sflx = (sflx + ((((ra * Cp) * va) * Cs) * (sst[(n - 1), :] - tas[(n - 1), :])))
LW_cooling = ((emissivity * 5.67e-08) * (tas[(n - 1), :] ** 4))
SW_warming = (SW0 + max((SW_anom * np.sin(((((2 * float(nn)) * dt) * np.pi) / ((360 * 24) * 3600)))), 0.0))
net_radiation = (- LW_cooling)
sst_boundary[n] = (((sst_boundary[(n - 1)] + ((SW_warming[0] * dt) / ((H * Cpo) * ro))) - ((((emissivity * 5.67e-08) * (sst_boundary[(n - 1)] ** 4)) * dt) / ((H * Cpo) * ro))) + (((T0 - sst_boundary[(n - 1)]) * dt) / ((360 * 24) * 3600)))
if atm_adv:
a_adv = (np.concatenate(((sst_boundary[(n - 1)] - tas[(n - 1), :1]), (tas[(n - 1), :(- 1)] - tas[(n - 1), 1:])), axis=0) * ((va * dt) / xx))
else:
a_adv = np.zeros(nx)
a_diff = (((tas[(n - 1), 2:] + tas[(n - 1), :(- 2)]) - (2 * tas[(n - 1), 1:(- 1)])) * ((Da * dt) / (xx ** 2)))
a_diff0 = (((tas[((n - 1), 1)] + sst_boundary[(n - 1)]) - (2 * tas[((n - 1), 0)])) * ((Da * dt) / (xx ** 2)))
a_diff = np.concatenate((np.array([a_diff0]), a_diff, a_diff[(- 1):]), axis=0)
a_netsflx = (((sflx * dt) / ((Hb * Cp) * ra)) + ((net_radiation * dt) / ((Hb * Cp) * ra)))
if return_coupled_fluxes:
atm_DA_tendencies[nn, :] = np.sum((a_adv, a_diff), axis=0)
if use_atm_tendencies:
tas[n, :] = ((tas[(n - 1), :] + a_netsflx) + atm_DA_tendencies[c3, :])
else:
tas[n, :] = (((tas[(n - 1), :] + a_netsflx) + a_adv) + a_diff)
o_adv = (np.concatenate(((sst_boundary[(n - 1)] - sst[(n - 1), :1]), (sst[(n - 1), :(- 1)] - sst[(n - 1), 1:])), axis=0) * ((vo * dt) / xx))
o_diff = (((sst[(n - 1), 2:] + sst[(n - 1), :(- 2)]) - (2 * sst[(n - 1), 1:(- 1)])) * ((Do * dt) / (xx ** 2)))
o_diff0 = (((sst[((n - 1), 1)] + sst_boundary[(n - 1)]) - (2 * sst[((n - 1), 0)])) * ((Do * dt) / (xx ** 2)))
o_diff = np.concatenate((np.array([o_diff0]), o_diff, o_diff[(- 1):]), axis=0)
o_entrain = (((T0 - sst[(n - 1), :]) * dt) / tau_entrainment)
o_netsflx = ((((- sflx) * dt) / ((H * Cpo) * ro)) + ((SW_warming * dt) / ((H * Cpo) * ro)))
if return_coupled_fluxes:
ocn_DA_tendencies[nn, :] = ((o_adv + o_diff) + o_entrain)
if use_ocn_tendencies:
sst[n, :] = ((sst[(n - 1), :] + o_netsflx) + ocn_DA_tendencies[c3, :])
else:
sst[n, :] = ((((sst[(n - 1), :] + o_netsflx) + o_adv) + o_diff) + o_entrain)
if ice_model:
ice_mask = (hice[(n - 1), :] > 0).astype(np.float)
freezing_mask = (sst[n, :] < Tf).astype(np.float)
dEdt = ((((H * ro) * Cpo) * (sst[n, :] - sst[(n - 1), :])) / dt)
excess_freeze = (freezing_mask * np.max([(- dEdt), np.zeros(nx)], axis=0))
excess_melt = (ice_mask * np.max([dEdt, np.zeros(nx)], axis=0))
dhice_freeze = ((dt * excess_freeze) / (Li * ri))
dhice_melt = ((dt * excess_melt) / (Li * ri))
hice[n, :] = ((hice[(n - 1), :] + dhice_freeze) - dhice_melt)
hice_melt = ((dhice_melt > 0).astype(np.float) * np.min([dhice_melt, hice[(n - 1), :]], axis=0))
hice[n, :] = np.max([hice[n, :], np.zeros(nx)], axis=0)
sst[n, :] = (sst[n, :] + ((dt * excess_freeze) / ((H * Cpo) * ro)))
sst[n, :] = (sst[n, :] - ((hice_melt * (Li * ri)) / ((ro * Cpo) * H)))
tas_out[c, :] = (tas_out[c, :] + tas[n, :])
sst_out[c, :] = (sst_out[c, :] + sst[n, :])
hice_out[c, :] = (hice_out[c, :] + hice[n, :])
sflx_out[c, :] = (sflx_out[c, :] + sflx)
c2 = (c2 + 1)
c3 = (c3 + 1)
if ((((nn + 1) * dt) % ((360 * 24) * 3600)) == 0):
c3 = 0
if (((((nn + 1) * dt) % avep) == 0) and (nn > 0)):
tas_out[c, :] = (tas_out[c, :] / c2)
sst_out[c, :] = (sst_out[c, :] / c2)
sflx_out[c, :] = (sflx_out[c, :] / c2)
sflx_f_out[c, :] = (sflx_f_out[c, :] / c2)
hice_out[c, :] = (hice_out[c, :] / c2)
c = (c + 1)
c2 = 0
tas[0, :] = tas[1, :].copy()
sst[0, :] = sst[1, :].copy()
hice[0, :] = hice[1, :].copy()
sst_boundary[0] = sst_boundary[1].copy()
hice_out[np.where((hice_out == 0))] = np.nan
return (tas_out, sst_out, hice_out, sflx_out, sflx_f_out, atm_DA_tendencies, ocn_DA_tendencies)
| -8,831,610,648,961,246,000
|
Separate time loop to enable numba
|
coupled_channel/cutils.py
|
CoupledChannel_time
|
AleksiNummelin/coupled_channel
|
python
|
def CoupledChannel_time(nt, nx, xx, dt, avep, sst, tas, hice, sst_boundary, sst_out, tas_out, hice_out, sflx_f_out, sflx_out, forcing, spatial_pattern, ra, Cp, va, vo, Da, Do, Cs, T0, Tf, emissivity, SW0, SW_anom, H, Hb, Cpo, ro, tau_entrainment, Li, ri, use_ocn_tendencies, use_atm_tendencies, atm_DA_tendencies, ocn_DA_tendencies, ice_model, atm_adv, return_coupled_fluxes):
'\n \n '
c = 0
c2 = 0
c3 = 0
n = 1
for nn in range(nt):
sflx = (forcing[nn] * spatial_pattern)
sflx_f_out[c, :] = (sflx_f_out[c, :] + sflx)
sflx = (sflx + ((((ra * Cp) * va) * Cs) * (sst[(n - 1), :] - tas[(n - 1), :])))
LW_cooling = ((emissivity * 5.67e-08) * (tas[(n - 1), :] ** 4))
SW_warming = (SW0 + max((SW_anom * np.sin(((((2 * float(nn)) * dt) * np.pi) / ((360 * 24) * 3600)))), 0.0))
net_radiation = (- LW_cooling)
sst_boundary[n] = (((sst_boundary[(n - 1)] + ((SW_warming[0] * dt) / ((H * Cpo) * ro))) - ((((emissivity * 5.67e-08) * (sst_boundary[(n - 1)] ** 4)) * dt) / ((H * Cpo) * ro))) + (((T0 - sst_boundary[(n - 1)]) * dt) / ((360 * 24) * 3600)))
if atm_adv:
a_adv = (np.concatenate(((sst_boundary[(n - 1)] - tas[(n - 1), :1]), (tas[(n - 1), :(- 1)] - tas[(n - 1), 1:])), axis=0) * ((va * dt) / xx))
else:
a_adv = np.zeros(nx)
a_diff = (((tas[(n - 1), 2:] + tas[(n - 1), :(- 2)]) - (2 * tas[(n - 1), 1:(- 1)])) * ((Da * dt) / (xx ** 2)))
a_diff0 = (((tas[((n - 1), 1)] + sst_boundary[(n - 1)]) - (2 * tas[((n - 1), 0)])) * ((Da * dt) / (xx ** 2)))
a_diff = np.concatenate((np.array([a_diff0]), a_diff, a_diff[(- 1):]), axis=0)
a_netsflx = (((sflx * dt) / ((Hb * Cp) * ra)) + ((net_radiation * dt) / ((Hb * Cp) * ra)))
if return_coupled_fluxes:
atm_DA_tendencies[nn, :] = np.sum((a_adv, a_diff), axis=0)
if use_atm_tendencies:
tas[n, :] = ((tas[(n - 1), :] + a_netsflx) + atm_DA_tendencies[c3, :])
else:
tas[n, :] = (((tas[(n - 1), :] + a_netsflx) + a_adv) + a_diff)
o_adv = (np.concatenate(((sst_boundary[(n - 1)] - sst[(n - 1), :1]), (sst[(n - 1), :(- 1)] - sst[(n - 1), 1:])), axis=0) * ((vo * dt) / xx))
o_diff = (((sst[(n - 1), 2:] + sst[(n - 1), :(- 2)]) - (2 * sst[(n - 1), 1:(- 1)])) * ((Do * dt) / (xx ** 2)))
o_diff0 = (((sst[((n - 1), 1)] + sst_boundary[(n - 1)]) - (2 * sst[((n - 1), 0)])) * ((Do * dt) / (xx ** 2)))
o_diff = np.concatenate((np.array([o_diff0]), o_diff, o_diff[(- 1):]), axis=0)
o_entrain = (((T0 - sst[(n - 1), :]) * dt) / tau_entrainment)
o_netsflx = ((((- sflx) * dt) / ((H * Cpo) * ro)) + ((SW_warming * dt) / ((H * Cpo) * ro)))
if return_coupled_fluxes:
ocn_DA_tendencies[nn, :] = ((o_adv + o_diff) + o_entrain)
if use_ocn_tendencies:
sst[n, :] = ((sst[(n - 1), :] + o_netsflx) + ocn_DA_tendencies[c3, :])
else:
sst[n, :] = ((((sst[(n - 1), :] + o_netsflx) + o_adv) + o_diff) + o_entrain)
if ice_model:
ice_mask = (hice[(n - 1), :] > 0).astype(np.float)
freezing_mask = (sst[n, :] < Tf).astype(np.float)
dEdt = ((((H * ro) * Cpo) * (sst[n, :] - sst[(n - 1), :])) / dt)
excess_freeze = (freezing_mask * np.max([(- dEdt), np.zeros(nx)], axis=0))
excess_melt = (ice_mask * np.max([dEdt, np.zeros(nx)], axis=0))
dhice_freeze = ((dt * excess_freeze) / (Li * ri))
dhice_melt = ((dt * excess_melt) / (Li * ri))
hice[n, :] = ((hice[(n - 1), :] + dhice_freeze) - dhice_melt)
hice_melt = ((dhice_melt > 0).astype(np.float) * np.min([dhice_melt, hice[(n - 1), :]], axis=0))
hice[n, :] = np.max([hice[n, :], np.zeros(nx)], axis=0)
sst[n, :] = (sst[n, :] + ((dt * excess_freeze) / ((H * Cpo) * ro)))
sst[n, :] = (sst[n, :] - ((hice_melt * (Li * ri)) / ((ro * Cpo) * H)))
tas_out[c, :] = (tas_out[c, :] + tas[n, :])
sst_out[c, :] = (sst_out[c, :] + sst[n, :])
hice_out[c, :] = (hice_out[c, :] + hice[n, :])
sflx_out[c, :] = (sflx_out[c, :] + sflx)
c2 = (c2 + 1)
c3 = (c3 + 1)
if ((((nn + 1) * dt) % ((360 * 24) * 3600)) == 0):
c3 = 0
if (((((nn + 1) * dt) % avep) == 0) and (nn > 0)):
tas_out[c, :] = (tas_out[c, :] / c2)
sst_out[c, :] = (sst_out[c, :] / c2)
sflx_out[c, :] = (sflx_out[c, :] / c2)
sflx_f_out[c, :] = (sflx_f_out[c, :] / c2)
hice_out[c, :] = (hice_out[c, :] / c2)
c = (c + 1)
c2 = 0
tas[0, :] = tas[1, :].copy()
sst[0, :] = sst[1, :].copy()
hice[0, :] = hice[1, :].copy()
sst_boundary[0] = sst_boundary[1].copy()
hice_out[np.where((hice_out == 0))] = np.nan
return (tas_out, sst_out, hice_out, sflx_out, sflx_f_out, atm_DA_tendencies, ocn_DA_tendencies)
|
def CoupledChannel2(C, forcing, dt_f=((30 * 24) * 3600), ocn_mixing_ratio=0, restoring=False, ice_model=True, atm_adv=True, spatial_pattern=None, atm_DA_tendencies=None, ocn_DA_tendencies=None, return_coupled_fluxes=False, random_amp=0.1):
'\n This is the main function for the coupled ocean--atm channel model.\n \n ## INPUT VARIABLES ##\n \n tmax: running time in seconds\n avep: averaging period for the ouput\n T0: initial temperature\n forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2\n dt_f: timestep of the forcing\n atm_adv: boolean, advective atmosphere\n atm_ocn: boolean, advective ocean\n ocn_mixing: add non-local mixing to ocean\n ocn_mixing_ratio: 0-1 ratio between advection and mixing (0 only advection; 1 only mixing)\n \n '
nt = int((C['tmax'] / C['dt']))
nt1 = int((C['tmax'] / C['avep']))
tau = (float(C['period']) / float(C['dt']))
rtas = np.random.rand(C['nx'])
sst = (C['T0'] * np.ones((2, C['nx'])))
tas = ((C['T0'] * np.ones((2, C['nx']))) + rtas)
hice = np.zeros((2, C['nx']))
sst_boundary = (C['T0'] * np.ones(2))
if np.all((forcing != None)):
forcing = np.interp(np.arange(0, (len(forcing) * dt_f), C['dt']), np.arange(0, (len(forcing) * dt_f), dt_f), forcing)
else:
forcing = np.zeros((nt + 1))
sst_out = np.zeros((nt1, C['nx']))
tas_out = np.zeros((nt1, C['nx']))
hice_out = np.zeros((nt1, C['nx']))
sflx_f_out = np.zeros((nt1, C['nx']))
sflx_out = np.zeros((nt1, C['nx']))
if np.all((spatial_pattern == None)):
spatial_pattern = np.ones(C['nx'])
if np.all((atm_DA_tendencies != None)):
use_atm_tendencies = True
else:
use_atm_tendencies = False
if np.all((ocn_DA_tendencies != None)):
use_ocn_tendencies = True
else:
use_ocn_tendencies = False
atm_DA_tendencies = np.zeros((nt, C['nx']))
ocn_DA_tendencies = np.zeros((nt, C['nx']))
(tas_out, sst_out, hice_out, sflx_out, sflx_f_out, atm_DA_tendencies, ocn_DA_tendencies) = CoupledChannel_time(nt, C['nx'], C['xx'], C['dt'], C['avep'], sst, tas, hice, sst_boundary, sst_out, tas_out, hice_out, sflx_f_out, sflx_out, forcing, spatial_pattern, C['ra'], C['Cp'], C['va'], C['vo'], C['Da'], C['Do'], C['Cs'], C['T0'], C['Tf'], C['emissivity'], C['SW0'], C['SW_anom'], C['H'], C['Hb'], C['Cpo'], C['ro'], C['tau_entrainment'], C['Li'], C['ri'], use_ocn_tendencies, use_atm_tendencies, atm_DA_tendencies, ocn_DA_tendencies, ice_model, atm_adv, return_coupled_fluxes)
if return_coupled_fluxes:
return (tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt, atm_DA_tendencies, ocn_DA_tendencies)
else:
return (tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt)
| 149,027,019,511,530,000
|
This is the main function for the coupled ocean--atm channel model.
## INPUT VARIABLES ##
tmax: running time in seconds
avep: averaging period for the ouput
T0: initial temperature
forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2
dt_f: timestep of the forcing
atm_adv: boolean, advective atmosphere
atm_ocn: boolean, advective ocean
ocn_mixing: add non-local mixing to ocean
ocn_mixing_ratio: 0-1 ratio between advection and mixing (0 only advection; 1 only mixing)
|
coupled_channel/cutils.py
|
CoupledChannel2
|
AleksiNummelin/coupled_channel
|
python
|
def CoupledChannel2(C, forcing, dt_f=((30 * 24) * 3600), ocn_mixing_ratio=0, restoring=False, ice_model=True, atm_adv=True, spatial_pattern=None, atm_DA_tendencies=None, ocn_DA_tendencies=None, return_coupled_fluxes=False, random_amp=0.1):
'\n This is the main function for the coupled ocean--atm channel model.\n \n ## INPUT VARIABLES ##\n \n tmax: running time in seconds\n avep: averaging period for the ouput\n T0: initial temperature\n forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2\n dt_f: timestep of the forcing\n atm_adv: boolean, advective atmosphere\n atm_ocn: boolean, advective ocean\n ocn_mixing: add non-local mixing to ocean\n ocn_mixing_ratio: 0-1 ratio between advection and mixing (0 only advection; 1 only mixing)\n \n '
nt = int((C['tmax'] / C['dt']))
nt1 = int((C['tmax'] / C['avep']))
tau = (float(C['period']) / float(C['dt']))
rtas = np.random.rand(C['nx'])
sst = (C['T0'] * np.ones((2, C['nx'])))
tas = ((C['T0'] * np.ones((2, C['nx']))) + rtas)
hice = np.zeros((2, C['nx']))
sst_boundary = (C['T0'] * np.ones(2))
if np.all((forcing != None)):
forcing = np.interp(np.arange(0, (len(forcing) * dt_f), C['dt']), np.arange(0, (len(forcing) * dt_f), dt_f), forcing)
else:
forcing = np.zeros((nt + 1))
sst_out = np.zeros((nt1, C['nx']))
tas_out = np.zeros((nt1, C['nx']))
hice_out = np.zeros((nt1, C['nx']))
sflx_f_out = np.zeros((nt1, C['nx']))
sflx_out = np.zeros((nt1, C['nx']))
if np.all((spatial_pattern == None)):
spatial_pattern = np.ones(C['nx'])
if np.all((atm_DA_tendencies != None)):
use_atm_tendencies = True
else:
use_atm_tendencies = False
if np.all((ocn_DA_tendencies != None)):
use_ocn_tendencies = True
else:
use_ocn_tendencies = False
atm_DA_tendencies = np.zeros((nt, C['nx']))
ocn_DA_tendencies = np.zeros((nt, C['nx']))
(tas_out, sst_out, hice_out, sflx_out, sflx_f_out, atm_DA_tendencies, ocn_DA_tendencies) = CoupledChannel_time(nt, C['nx'], C['xx'], C['dt'], C['avep'], sst, tas, hice, sst_boundary, sst_out, tas_out, hice_out, sflx_f_out, sflx_out, forcing, spatial_pattern, C['ra'], C['Cp'], C['va'], C['vo'], C['Da'], C['Do'], C['Cs'], C['T0'], C['Tf'], C['emissivity'], C['SW0'], C['SW_anom'], C['H'], C['Hb'], C['Cpo'], C['ro'], C['tau_entrainment'], C['Li'], C['ri'], use_ocn_tendencies, use_atm_tendencies, atm_DA_tendencies, ocn_DA_tendencies, ice_model, atm_adv, return_coupled_fluxes)
if return_coupled_fluxes:
return (tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt, atm_DA_tendencies, ocn_DA_tendencies)
else:
return (tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt)
|
def RaisedCosinePulse(t, Freq, Amplitude):
'\n\tRaised-Cosine Pulse\n\t\n\t@param t time vector\n\t@param Freq Frequency in Hz\n\t@param Amplitude Real Value of Amplitude\n\t\t\n\t@return Output signal vector\n\t@retval P vector of length equals to the time vector t\n\t\n\t'
N = np.size(t, 0)
P = np.zeros((N,), dtype=np.float32)
for m in range(0, N):
if (t[m] <= (2.0 / Freq)):
P[m] = ((Amplitude * (1 - cos(((pi * Freq) * t[m])))) * cos((((2 * pi) * Freq) * t[m])))
return P
| 3,912,961,718,534,332,000
|
Raised-Cosine Pulse
@param t time vector
@param Freq Frequency in Hz
@param Amplitude Real Value of Amplitude
@return Output signal vector
@retval P vector of length equals to the time vector t
|
EFIT2D_Classes.py
|
RaisedCosinePulse
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def RaisedCosinePulse(t, Freq, Amplitude):
'\n\tRaised-Cosine Pulse\n\t\n\t@param t time vector\n\t@param Freq Frequency in Hz\n\t@param Amplitude Real Value of Amplitude\n\t\t\n\t@return Output signal vector\n\t@retval P vector of length equals to the time vector t\n\t\n\t'
N = np.size(t, 0)
P = np.zeros((N,), dtype=np.float32)
for m in range(0, N):
if (t[m] <= (2.0 / Freq)):
P[m] = ((Amplitude * (1 - cos(((pi * Freq) * t[m])))) * cos((((2 * pi) * Freq) * t[m])))
return P
|
def ricker(t, ts, fsavg):
'\n\tRicker Pulse\n\n\t@param t time vector\n\t@param ts temporal delay\n\t@param fsavg pulse width parameter\n\n\t@return Output signal vector\n\t'
a = ((fsavg * pi) * (t - ts))
a2 = (a * a)
return ((1.0 - (2.0 * a2)) * np.exp((- a2)))
| 2,047,324,342,537,119,700
|
Ricker Pulse
@param t time vector
@param ts temporal delay
@param fsavg pulse width parameter
@return Output signal vector
|
EFIT2D_Classes.py
|
ricker
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def ricker(t, ts, fsavg):
'\n\tRicker Pulse\n\n\t@param t time vector\n\t@param ts temporal delay\n\t@param fsavg pulse width parameter\n\n\t@return Output signal vector\n\t'
a = ((fsavg * pi) * (t - ts))
a2 = (a * a)
return ((1.0 - (2.0 * a2)) * np.exp((- a2)))
|
def __init__(self, Width=40, Height=40, Pixel_mm=10, label=0, SPML=False):
'\n\t\tConstructor of the Class NewImage\n\n\t\t@param Width Width of the Scenario\n\t\t@param Height Height of the Scenario\n\t\t@param Pixel_mm Ratio Pixel per mm\n\t\t@param label Label\n\t\t@param SPML Flag used to indicate the boundary conditions\n\n\n\t\t'
self.Width = Width
self.Height = Height
self.Pixel_mm = Pixel_mm
self.Label = label
self.SPML = SPML
self.M = int((self.Height * self.Pixel_mm))
self.N = int((self.Width * self.Pixel_mm))
self.I = (np.ones((self.M, self.N), dtype=np.uint8) * label)
self.Itemp = 0
self.Tap = 0
self.AirBoundary = False
| 1,345,557,265,963,583,500
|
Constructor of the Class NewImage
@param Width Width of the Scenario
@param Height Height of the Scenario
@param Pixel_mm Ratio Pixel per mm
@param label Label
@param SPML Flag used to indicate the boundary conditions
|
EFIT2D_Classes.py
|
__init__
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def __init__(self, Width=40, Height=40, Pixel_mm=10, label=0, SPML=False):
'\n\t\tConstructor of the Class NewImage\n\n\t\t@param Width Width of the Scenario\n\t\t@param Height Height of the Scenario\n\t\t@param Pixel_mm Ratio Pixel per mm\n\t\t@param label Label\n\t\t@param SPML Flag used to indicate the boundary conditions\n\n\n\t\t'
self.Width = Width
self.Height = Height
self.Pixel_mm = Pixel_mm
self.Label = label
self.SPML = SPML
self.M = int((self.Height * self.Pixel_mm))
self.N = int((self.Width * self.Pixel_mm))
self.I = (np.ones((self.M, self.N), dtype=np.uint8) * label)
self.Itemp = 0
self.Tap = 0
self.AirBoundary = False
|
def createLayer(self, centerW, centerH, Width, Height, label, Theta=0):
'\n\t\tCreate a Layer\n\n\t\t@param centerW center in width-axis of the Layer\n\t\t@param centerH center in height-axis of the Layer\n\t\t@param Width Width of the Layer\n\t\t@param Height Height of the Layer\n\t\t@param label Label of the layer\n\t\t@param Theta Rotation Angle\n\t\t'
a = int(((Height * self.Pixel_mm) / 2.0))
b = int(((Width * self.Pixel_mm) / 2.0))
for x in range((- a), a):
for y in range((- b), b):
tempX = round((x + (centerH * self.Pixel_mm)))
tempY = round((y + (centerW * self.Pixel_mm)))
self.I[(tempX, tempY)] = label
if (Theta != 0):
self.I = imrotate(self.I, Theta, interp='nearest')
| -2,616,156,985,373,886,500
|
Create a Layer
@param centerW center in width-axis of the Layer
@param centerH center in height-axis of the Layer
@param Width Width of the Layer
@param Height Height of the Layer
@param label Label of the layer
@param Theta Rotation Angle
|
EFIT2D_Classes.py
|
createLayer
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def createLayer(self, centerW, centerH, Width, Height, label, Theta=0):
'\n\t\tCreate a Layer\n\n\t\t@param centerW center in width-axis of the Layer\n\t\t@param centerH center in height-axis of the Layer\n\t\t@param Width Width of the Layer\n\t\t@param Height Height of the Layer\n\t\t@param label Label of the layer\n\t\t@param Theta Rotation Angle\n\t\t'
a = int(((Height * self.Pixel_mm) / 2.0))
b = int(((Width * self.Pixel_mm) / 2.0))
for x in range((- a), a):
for y in range((- b), b):
tempX = round((x + (centerH * self.Pixel_mm)))
tempY = round((y + (centerW * self.Pixel_mm)))
self.I[(tempX, tempY)] = label
if (Theta != 0):
self.I = imrotate(self.I, Theta, interp='nearest')
|
def createABS(self, Tap):
'\n\t\tCreate the boundary layers depending on the boundary conditions required\n\n\t\t@param Tap Layer Size\n\n\n\t\t'
self.Tap = Tap
self.SPML = True
self.AirBoundary = False
(self.M, self.N) = np.shape(self.I)
TP = round((Tap * self.Pixel_mm))
M_pml = int((self.M + (2 * TP)))
N_pml = int((self.N + (2 * TP)))
self.Itemp = (255.0 * np.ones((M_pml, N_pml), dtype=np.uint8))
self.Itemp[TP:(M_pml - TP), TP:(N_pml - TP)] = np.copy(self.I)
| 548,004,113,053,188,300
|
Create the boundary layers depending on the boundary conditions required
@param Tap Layer Size
|
EFIT2D_Classes.py
|
createABS
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def createABS(self, Tap):
'\n\t\tCreate the boundary layers depending on the boundary conditions required\n\n\t\t@param Tap Layer Size\n\n\n\t\t'
self.Tap = Tap
self.SPML = True
self.AirBoundary = False
(self.M, self.N) = np.shape(self.I)
TP = round((Tap * self.Pixel_mm))
M_pml = int((self.M + (2 * TP)))
N_pml = int((self.N + (2 * TP)))
self.Itemp = (255.0 * np.ones((M_pml, N_pml), dtype=np.uint8))
self.Itemp[TP:(M_pml - TP), TP:(N_pml - TP)] = np.copy(self.I)
|
def __init__(self, name='Water', rho=1000, c11=2190000000.0, c12=0.0, c22=0.0, c44=0.0, eta_v=0, eta_s=0, label=0):
'\n\t\tConstructor of the Material object\n\t\t'
self.name = name
self.rho = rho
self.c11 = c11
self.c12 = c12
self.c22 = c22
self.c44 = c44
self.VL = sqrt((c11 / rho))
self.VT = sqrt((c44 / rho))
self.eta_v = eta_v
self.eta_s = eta_s
self.Label = label
| -3,500,076,890,244,242,400
|
Constructor of the Material object
|
EFIT2D_Classes.py
|
__init__
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def __init__(self, name='Water', rho=1000, c11=2190000000.0, c12=0.0, c22=0.0, c44=0.0, eta_v=0, eta_s=0, label=0):
'\n\t\t\n\t\t'
self.name = name
self.rho = rho
self.c11 = c11
self.c12 = c12
self.c22 = c22
self.c44 = c44
self.VL = sqrt((c11 / rho))
self.VT = sqrt((c44 / rho))
self.eta_v = eta_v
self.eta_s = eta_s
self.Label = label
|
def pulseEcho(self):
'\n\t\tDefine Theta for PulseEcho Inspection. PulseEcho Inspection uses the same transducer acting as emitter and as receiver\n\t\t'
self.Theta = [((270 * pi) / 180), ((270 * pi) / 180)]
| 7,562,510,623,405,474,000
|
Define Theta for PulseEcho Inspection. PulseEcho Inspection uses the same transducer acting as emitter and as receiver
|
EFIT2D_Classes.py
|
pulseEcho
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def pulseEcho(self):
'\n\t\t\n\t\t'
self.Theta = [((270 * pi) / 180), ((270 * pi) / 180)]
|
def transmission(self):
'\n\t\tDefine Theta for Transmission Inspection. Transmision uses two transducers, one used as emitter and another as receiver\n\t\t'
self.Theta = [((270 * pi) / 180), ((90 * pi) / 180)]
| 1,775,700,811,088,977,200
|
Define Theta for Transmission Inspection. Transmision uses two transducers, one used as emitter and another as receiver
|
EFIT2D_Classes.py
|
transmission
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def transmission(self):
'\n\t\t\n\t\t'
self.Theta = [((270 * pi) / 180), ((90 * pi) / 180)]
|
def __init__(self, Size=10, Offset=0, BorderOffset=0, Location=0, name='emisor'):
'\n\t\tConstructor of the Class Transducer\n\t\t'
self.Size = Size
self.Offset = Offset
self.BorderOffset = BorderOffset
self.SizePixel = 0
self.Location = Location
self.name = name
| 3,218,654,689,680,988,700
|
Constructor of the Class Transducer
|
EFIT2D_Classes.py
|
__init__
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def __init__(self, Size=10, Offset=0, BorderOffset=0, Location=0, name='emisor'):
'\n\t\t\n\t\t'
self.Size = Size
self.Offset = Offset
self.BorderOffset = BorderOffset
self.SizePixel = 0
self.Location = Location
self.name = name
|
def generate(self, t):
'\n\t\tGenerate the signal waveform\n\n\t\t@param t vector time\n\t\t@return signal vector with the same length as the vector time\n\n\t\t'
if (self.name == 'RaisedCosinePulse'):
return RaisedCosinePulse(t, self.Frequency, self.Amplitude)
elif (self.name == 'RickerPulse'):
return ricker(t, self.ts, self.Frequency)
| 7,699,083,209,038,406,000
|
Generate the signal waveform
@param t vector time
@return signal vector with the same length as the vector time
|
EFIT2D_Classes.py
|
generate
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def generate(self, t):
'\n\t\tGenerate the signal waveform\n\n\t\t@param t vector time\n\t\t@return signal vector with the same length as the vector time\n\n\t\t'
if (self.name == 'RaisedCosinePulse'):
return RaisedCosinePulse(t, self.Frequency, self.Amplitude)
elif (self.name == 'RickerPulse'):
return ricker(t, self.ts, self.Frequency)
|
def saveSignal(self, t):
'\n\t\tSave the signal waveform into the object\n\t\t@param t vector time\n\n\t\t'
self.time_signal = self.generate(t)
| 5,772,529,140,745,431,000
|
Save the signal waveform into the object
@param t vector time
|
EFIT2D_Classes.py
|
saveSignal
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def saveSignal(self, t):
'\n\t\tSave the signal waveform into the object\n\t\t@param t vector time\n\n\t\t'
self.time_signal = self.generate(t)
|
def __init__(self):
'\n\t\tConstructor of the Class Inspection\n\t\t'
self.Theta = 0
self.XL = 0
self.YL = 0
self.IR = 0
| -5,232,118,787,041,142,000
|
Constructor of the Class Inspection
|
EFIT2D_Classes.py
|
__init__
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def __init__(self):
'\n\t\t\n\t\t'
self.Theta = 0
self.XL = 0
self.YL = 0
self.IR = 0
|
def addOffset(self, image, transducer, NRI):
'\n\t\tHandle Offset\n\n\t\t'
NXL = np.size(self.XL, 0)
Ntheta = np.size(self.Theta, 0)
(M_pml, N_pml) = np.shape(image.Itemp)
self.YL += np.around((((transducer.Offset * image.Pixel_mm) * NRI) / float(N_pml)))
self.IR = np.zeros((Ntheta, Ntheta), dtype=np.float32)
B = list(range(0, Ntheta))
self.IR[:, 0] = np.int32(B[:])
for i in range(1, Ntheta):
B = np.roll(B, (- 1))
self.IR[:, i] = np.int32(B)
| 8,526,326,461,510,447,000
|
Handle Offset
|
EFIT2D_Classes.py
|
addOffset
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def addOffset(self, image, transducer, NRI):
'\n\t\t\n\n\t\t'
NXL = np.size(self.XL, 0)
Ntheta = np.size(self.Theta, 0)
(M_pml, N_pml) = np.shape(image.Itemp)
self.YL += np.around((((transducer.Offset * image.Pixel_mm) * NRI) / float(N_pml)))
self.IR = np.zeros((Ntheta, Ntheta), dtype=np.float32)
B = list(range(0, Ntheta))
self.IR[:, 0] = np.int32(B[:])
for i in range(1, Ntheta):
B = np.roll(B, (- 1))
self.IR[:, i] = np.int32(B)
|
def addBorderOffset(self, image, transducer, MRI):
'\n\t\tHandle Border Offset\n\n\t\t'
(M_pml, N_pml) = np.shape(image.Itemp)
ratio = (float(MRI) / float(M_pml))
self.XL[:, 0] += np.around(((transducer.BorderOffset * image.Pixel_mm) * ratio))
self.XL[:, 1] -= np.around(((transducer.BorderOffset * image.Pixel_mm) * ratio))
| 4,878,553,628,047,036,000
|
Handle Border Offset
|
EFIT2D_Classes.py
|
addBorderOffset
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def addBorderOffset(self, image, transducer, MRI):
'\n\t\t\n\n\t\t'
(M_pml, N_pml) = np.shape(image.Itemp)
ratio = (float(MRI) / float(M_pml))
self.XL[:, 0] += np.around(((transducer.BorderOffset * image.Pixel_mm) * ratio))
self.XL[:, 1] -= np.around(((transducer.BorderOffset * image.Pixel_mm) * ratio))
|
def jobParameters(self, materiales):
'\n\t\tDefine Main Simulation Parameters\n\n\t\t@parm materiales Materials List\n\n\t\t\n\t\t'
indVL = [mat.VL for mat in materiales if (mat.VL > 400)]
indVT = [mat.VT for mat in materiales if (mat.VT > 400)]
VL = np.array(indVL)
VT = np.array(indVT)
V = np.hstack((VL, VT))
self.dx = np.float32((np.min([V]) / (self.PointCycle * self.MaxFreq)))
self.dt = (self.TimeScale * np.float32(((0.7071 * self.dx) / np.max([V]))))
self.Ntiempo = int(round((self.SimTime / self.dt)))
self.t = (self.dt * np.arange(0, self.Ntiempo))
| -244,765,449,709,255,680
|
Define Main Simulation Parameters
@parm materiales Materials List
|
EFIT2D_Classes.py
|
jobParameters
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def jobParameters(self, materiales):
'\n\t\tDefine Main Simulation Parameters\n\n\t\t@parm materiales Materials List\n\n\t\t\n\t\t'
indVL = [mat.VL for mat in materiales if (mat.VL > 400)]
indVT = [mat.VT for mat in materiales if (mat.VT > 400)]
VL = np.array(indVL)
VT = np.array(indVT)
V = np.hstack((VL, VT))
self.dx = np.float32((np.min([V]) / (self.PointCycle * self.MaxFreq)))
self.dt = (self.TimeScale * np.float32(((0.7071 * self.dx) / np.max([V]))))
self.Ntiempo = int(round((self.SimTime / self.dt)))
self.t = (self.dt * np.arange(0, self.Ntiempo))
|
def createNumericalModel(self, image):
'\n\t\tCreate the Numerical Model\n\n\t\t@param image The Scenario Object\n\t\t'
Mp = (((np.shape(image.Itemp)[0] * self.SpatialScale) / image.Pixel_mm) / self.dx)
self.Rgrid = (Mp / np.shape(image.Itemp)[0])
self.TapG = np.around(((image.Tap * self.Rgrid) * image.Pixel_mm))
self.Im = imresize(image.Itemp, self.Rgrid, interp='nearest')
(self.MRI, self.NRI) = np.shape(self.Im)
print(((((((('dt: ' + str(self.dt)) + ' dx: ') + str(self.dx)) + ' Grid: ') + str(self.MRI)) + ' x ') + str(self.NRI)))
| 7,753,574,737,775,600,000
|
Create the Numerical Model
@param image The Scenario Object
|
EFIT2D_Classes.py
|
createNumericalModel
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def createNumericalModel(self, image):
'\n\t\tCreate the Numerical Model\n\n\t\t@param image The Scenario Object\n\t\t'
Mp = (((np.shape(image.Itemp)[0] * self.SpatialScale) / image.Pixel_mm) / self.dx)
self.Rgrid = (Mp / np.shape(image.Itemp)[0])
self.TapG = np.around(((image.Tap * self.Rgrid) * image.Pixel_mm))
self.Im = imresize(image.Itemp, self.Rgrid, interp='nearest')
(self.MRI, self.NRI) = np.shape(self.Im)
print(((((((('dt: ' + str(self.dt)) + ' dx: ') + str(self.dx)) + ' Grid: ') + str(self.MRI)) + ' x ') + str(self.NRI)))
|
def initReceivers(self):
'\n\t\tInitialize the receivers\n\n\t\t'
self.receiver_signals = 0
| 6,697,424,247,738,574,000
|
Initialize the receivers
|
EFIT2D_Classes.py
|
initReceivers
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def initReceivers(self):
'\n\t\t\n\n\t\t'
self.receiver_signals = 0
|
def setDevice(self, Device):
'\n\t\tSet the Computation Device\n\n\t\t@param Device Device to be used\n\n\t\tDefine the device used to compute the simulations:\n\t\t\t - "CPU" : uses the global memory in th CPU\n\t\t\t - "GPU_Global" : uses the global memory in the GPU\n\t\t\t - "GPU_Local" : uses the local memory in the GPU\n\n\t\t'
if (Device == 0):
self.Device = 'CPU'
elif (Device == 1):
self.Device = 'GPU_Global'
elif (Device == 2):
self.Device = 'GPU_Local'
| -3,548,639,641,303,364,000
|
Set the Computation Device
@param Device Device to be used
Define the device used to compute the simulations:
- "CPU" : uses the global memory in th CPU
- "GPU_Global" : uses the global memory in the GPU
- "GPU_Local" : uses the local memory in the GPU
|
EFIT2D_Classes.py
|
setDevice
|
guillaumedavidphd/efit2d-pyopencl
|
python
|
def setDevice(self, Device):
'\n\t\tSet the Computation Device\n\n\t\t@param Device Device to be used\n\n\t\tDefine the device used to compute the simulations:\n\t\t\t - "CPU" : uses the global memory in th CPU\n\t\t\t - "GPU_Global" : uses the global memory in the GPU\n\t\t\t - "GPU_Local" : uses the local memory in the GPU\n\n\t\t'
if (Device == 0):
self.Device = 'CPU'
elif (Device == 1):
self.Device = 'GPU_Global'
elif (Device == 2):
self.Device = 'GPU_Local'
|
def parse_methods(csv_file, errors_dict):
'\n Parses the input CSV file with columns (method, usability, errors)\n and yields `MethodInfo` instances as a result.\n '
with csv_file.open(newline='') as f:
f = csv.reader(f)
next(f, None)
for (line, (method, usability, errors)) in enumerate(f, start=2):
try:
errors = [errors_dict[x] for x in errors.split()]
except KeyError:
raise ValueError('Method {} references unknown errors {}'.format(method, errors)) from None
(yield MethodInfo(method, usability, errors))
| 5,726,738,502,827,109,000
|
Parses the input CSV file with columns (method, usability, errors)
and yields `MethodInfo` instances as a result.
|
telethon_generator/parsers/methods.py
|
parse_methods
|
Thorbijoern/Telethon
|
python
|
def parse_methods(csv_file, errors_dict):
'\n Parses the input CSV file with columns (method, usability, errors)\n and yields `MethodInfo` instances as a result.\n '
with csv_file.open(newline=) as f:
f = csv.reader(f)
next(f, None)
for (line, (method, usability, errors)) in enumerate(f, start=2):
try:
errors = [errors_dict[x] for x in errors.split()]
except KeyError:
raise ValueError('Method {} references unknown errors {}'.format(method, errors)) from None
(yield MethodInfo(method, usability, errors))
|
def define_nav_elements(self):
'Return list of initialized pages or tabs accordingly.\n\n Should return, list: each item is an initialized app (ex `[AppBase(self.app)]` in the order each tab is rendered\n\n Raises:\n NotImplementedError: Child class must implement this method\n\n '
raise NotImplementedError('define_nav_elements must be implemented by child class')
| -9,071,551,403,152,067,000
|
Return list of initialized pages or tabs accordingly.
Should return, list: each item is an initialized app (ex `[AppBase(self.app)]` in the order each tab is rendered
Raises:
NotImplementedError: Child class must implement this method
|
dash_charts/utils_app_with_navigation.py
|
define_nav_elements
|
KyleKing/dash_charts
|
python
|
def define_nav_elements(self):
'Return list of initialized pages or tabs accordingly.\n\n Should return, list: each item is an initialized app (ex `[AppBase(self.app)]` in the order each tab is rendered\n\n Raises:\n NotImplementedError: Child class must implement this method\n\n '
raise NotImplementedError('define_nav_elements must be implemented by child class')
|
def create(self, **kwargs):
'Create each navigation componet, storing the layout. Then parent class to create application.\n\n Args:\n kwargs: keyword arguments passed to `self.create`\n\n '
self.nav_lookup = OrderedDict([(tab.name, tab) for tab in self.define_nav_elements()])
self.nav_layouts = {}
for (nav_name, nav) in self.nav_lookup.items():
nav.create(assign_layout=False)
self.nav_layouts[nav_name] = nav.return_layout()
self.validation_layout = [*map(deepcopy, self.nav_layouts.values())]
super().create(**kwargs)
| 1,415,531,660,317,778,000
|
Create each navigation componet, storing the layout. Then parent class to create application.
Args:
kwargs: keyword arguments passed to `self.create`
|
dash_charts/utils_app_with_navigation.py
|
create
|
KyleKing/dash_charts
|
python
|
def create(self, **kwargs):
'Create each navigation componet, storing the layout. Then parent class to create application.\n\n Args:\n kwargs: keyword arguments passed to `self.create`\n\n '
self.nav_lookup = OrderedDict([(tab.name, tab) for tab in self.define_nav_elements()])
self.nav_layouts = {}
for (nav_name, nav) in self.nav_lookup.items():
nav.create(assign_layout=False)
self.nav_layouts[nav_name] = nav.return_layout()
self.validation_layout = [*map(deepcopy, self.nav_layouts.values())]
super().create(**kwargs)
|
def initialization(self) -> None:
'Initialize ids with `self.register_uniq_ids([...])` and other one-time actions.'
super().initialization()
self.register_uniq_ids(self.app_ids)
| -6,014,535,785,036,283,000
|
Initialize ids with `self.register_uniq_ids([...])` and other one-time actions.
|
dash_charts/utils_app_with_navigation.py
|
initialization
|
KyleKing/dash_charts
|
python
|
def initialization(self) -> None:
super().initialization()
self.register_uniq_ids(self.app_ids)
|
def create_elements(self) -> None:
'Override method as not needed at navigation-level.'
...
| -7,928,977,626,787,852,000
|
Override method as not needed at navigation-level.
|
dash_charts/utils_app_with_navigation.py
|
create_elements
|
KyleKing/dash_charts
|
python
|
def create_elements(self) -> None:
...
|
def create_callbacks(self) -> None:
'Override method as not needed at navigation-level.'
...
| 728,964,613,210,143,400
|
Override method as not needed at navigation-level.
|
dash_charts/utils_app_with_navigation.py
|
create_callbacks
|
KyleKing/dash_charts
|
python
|
def create_callbacks(self) -> None:
...
|
def initialization(self) -> None:
'Initialize ids with `self.register_uniq_ids([...])` and other one-time actions.'
super().initialization()
self.register_uniq_ids(['N/A'])
| 4,057,255,263,994,259,000
|
Initialize ids with `self.register_uniq_ids([...])` and other one-time actions.
|
dash_charts/utils_app_with_navigation.py
|
initialization
|
KyleKing/dash_charts
|
python
|
def initialization(self) -> None:
super().initialization()
self.register_uniq_ids(['N/A'])
|
def create_elements(self) -> None:
'Initialize the charts, tables, and other Dash elements..'
...
| 4,531,074,656,994,668,500
|
Initialize the charts, tables, and other Dash elements..
|
dash_charts/utils_app_with_navigation.py
|
create_elements
|
KyleKing/dash_charts
|
python
|
def create_elements(self) -> None:
...
|
def create_callbacks(self) -> None:
'Register callbacks necessary for this tab.'
...
| -9,179,829,243,889,156,000
|
Register callbacks necessary for this tab.
|
dash_charts/utils_app_with_navigation.py
|
create_callbacks
|
KyleKing/dash_charts
|
python
|
def create_callbacks(self) -> None:
...
|
def return_layout(self) -> dict:
'Return Dash application layout.\n\n Returns:\n dict: Dash HTML object\n\n '
tabs = [dcc.Tab(label=name, value=name) for (name, tab) in self.nav_lookup.items()]
return html.Div(children=[dcc.Tabs(id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0], children=tabs), html.Div(id=self._il[self.id_tabs_content])])
| -2,062,648,213,333,622,800
|
Return Dash application layout.
Returns:
dict: Dash HTML object
|
dash_charts/utils_app_with_navigation.py
|
return_layout
|
KyleKing/dash_charts
|
python
|
def return_layout(self) -> dict:
'Return Dash application layout.\n\n Returns:\n dict: Dash HTML object\n\n '
tabs = [dcc.Tab(label=name, value=name) for (name, tab) in self.nav_lookup.items()]
return html.Div(children=[dcc.Tabs(id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0], children=tabs), html.Div(id=self._il[self.id_tabs_content])])
|
def create_callbacks(self) -> None:
'Register the navigation callback.'
outputs = [(self.id_tabs_content, 'children')]
inputs = [(self.id_tabs_select, 'value')]
@self.callback(outputs, inputs, [])
def render_tab(tab_name):
return [self.nav_layouts[tab_name]]
| -152,575,347,912,989,060
|
Register the navigation callback.
|
dash_charts/utils_app_with_navigation.py
|
create_callbacks
|
KyleKing/dash_charts
|
python
|
def create_callbacks(self) -> None:
outputs = [(self.id_tabs_content, 'children')]
inputs = [(self.id_tabs_select, 'value')]
@self.callback(outputs, inputs, [])
def render_tab(tab_name):
return [self.nav_layouts[tab_name]]
|
def verify_app_initialization(self):
'Check that the app was properly initialized.\n\n Raises:\n RuntimeError: if child class has not called `self.register_uniq_ids`\n\n '
super().verify_app_initialization()
allowed_locations = ('left', 'top', 'bottom', 'right')
if (self.tabs_location not in allowed_locations):
raise RuntimeError(f'`self.tabs_location = {self.tabs_location}` is not in {allowed_locations}')
| 8,322,259,039,936,427,000
|
Check that the app was properly initialized.
Raises:
RuntimeError: if child class has not called `self.register_uniq_ids`
|
dash_charts/utils_app_with_navigation.py
|
verify_app_initialization
|
KyleKing/dash_charts
|
python
|
def verify_app_initialization(self):
'Check that the app was properly initialized.\n\n Raises:\n RuntimeError: if child class has not called `self.register_uniq_ids`\n\n '
super().verify_app_initialization()
allowed_locations = ('left', 'top', 'bottom', 'right')
if (self.tabs_location not in allowed_locations):
raise RuntimeError(f'`self.tabs_location = {self.tabs_location}` is not in {allowed_locations}')
|
def return_layout(self) -> dict:
'Return Dash application layout.\n\n Returns:\n dict: Dash HTML object\n\n '
return html.Div(children=[self.tab_menu(), html.Div(style={f'margin-{self.tabs_location}': self.tabs_margin}, children=[html.Div(id=self._il[self.id_tabs_content])])])
| -6,400,689,786,300,438,000
|
Return Dash application layout.
Returns:
dict: Dash HTML object
|
dash_charts/utils_app_with_navigation.py
|
return_layout
|
KyleKing/dash_charts
|
python
|
def return_layout(self) -> dict:
'Return Dash application layout.\n\n Returns:\n dict: Dash HTML object\n\n '
return html.Div(children=[self.tab_menu(), html.Div(style={f'margin-{self.tabs_location}': self.tabs_margin}, children=[html.Div(id=self._il[self.id_tabs_content])])])
|
def generate_tab_kwargs(self):
'Create the tab keyword arguments. Intended to be modified through inheritance.\n\n Returns:\n tuple: keyword arguments and styling for the dcc.Tab elements\n\n - tab_kwargs: with at minimum keys `(style, selected_style)` for dcc.Tab\n - tabs_kwargs: to be passed to dcc.Tabs\n - tabs_style: style for the dcc.Tabs HTML element\n\n '
if self.tabs_compact:
tab_style = {'padding': '2px 4px 2px 4px'}
tabs_padding = '6px 0 0 2px'
else:
tab_style = {'padding': '10px 20px 10px 20px'}
tabs_padding = '15px 0 0 5px'
selected_style = deepcopy(tab_style)
opposite_lookup = {'top': 'bottom', 'bottom': 'top', 'left': 'right', 'right': 'left'}
tabs_style = {'backgroundColor': '#F9F9F9', 'padding': tabs_padding, 'position': 'fixed', 'zIndex': '999', f'border{opposite_lookup[self.tabs_location].title()}': '1px solid #d6d6d6', self.tabs_location: '0'}
if (self.tabs_location in ['left', 'right']):
selected_style['border-left'] = '3px solid #119DFF'
tabs_kwargs = {'vertical': True, 'style': {'width': '100%'}, 'parent_style': {'width': '100%'}}
tabs_style['top'] = '0'
tabs_style['bottom'] = '0'
tabs_style['width'] = 'auto'
else:
selected_style['border-top'] = '3px solid #119DFF'
tabs_kwargs = {}
tabs_style['height'] = 'auto'
tabs_style['right'] = '0'
tabs_style['left'] = '0'
tab_kwargs = {'style': tab_style, 'selected_style': selected_style}
return (tab_kwargs, tabs_kwargs, tabs_style)
| -7,499,227,051,544,763,000
|
Create the tab keyword arguments. Intended to be modified through inheritance.
Returns:
tuple: keyword arguments and styling for the dcc.Tab elements
- tab_kwargs: with at minimum keys `(style, selected_style)` for dcc.Tab
- tabs_kwargs: to be passed to dcc.Tabs
- tabs_style: style for the dcc.Tabs HTML element
|
dash_charts/utils_app_with_navigation.py
|
generate_tab_kwargs
|
KyleKing/dash_charts
|
python
|
def generate_tab_kwargs(self):
'Create the tab keyword arguments. Intended to be modified through inheritance.\n\n Returns:\n tuple: keyword arguments and styling for the dcc.Tab elements\n\n - tab_kwargs: with at minimum keys `(style, selected_style)` for dcc.Tab\n - tabs_kwargs: to be passed to dcc.Tabs\n - tabs_style: style for the dcc.Tabs HTML element\n\n '
if self.tabs_compact:
tab_style = {'padding': '2px 4px 2px 4px'}
tabs_padding = '6px 0 0 2px'
else:
tab_style = {'padding': '10px 20px 10px 20px'}
tabs_padding = '15px 0 0 5px'
selected_style = deepcopy(tab_style)
opposite_lookup = {'top': 'bottom', 'bottom': 'top', 'left': 'right', 'right': 'left'}
tabs_style = {'backgroundColor': '#F9F9F9', 'padding': tabs_padding, 'position': 'fixed', 'zIndex': '999', f'border{opposite_lookup[self.tabs_location].title()}': '1px solid #d6d6d6', self.tabs_location: '0'}
if (self.tabs_location in ['left', 'right']):
selected_style['border-left'] = '3px solid #119DFF'
tabs_kwargs = {'vertical': True, 'style': {'width': '100%'}, 'parent_style': {'width': '100%'}}
tabs_style['top'] = '0'
tabs_style['bottom'] = '0'
tabs_style['width'] = 'auto'
else:
selected_style['border-top'] = '3px solid #119DFF'
tabs_kwargs = {}
tabs_style['height'] = 'auto'
tabs_style['right'] = '0'
tabs_style['left'] = '0'
tab_kwargs = {'style': tab_style, 'selected_style': selected_style}
return (tab_kwargs, tabs_kwargs, tabs_style)
|
def tab_menu(self):
'Return the HTML elements for the tab menu.\n\n Returns:\n dict: Dash HTML object\n\n '
(tab_kwargs, tabs_kwargs, tabs_style) = self.generate_tab_kwargs()
tabs = [dcc.Tab(label=name, value=name, **tab_kwargs) for (name, tab) in self.nav_lookup.items()]
return html.Div(children=[dcc.Tabs(id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0], children=tabs, **tabs_kwargs)], style=tabs_style)
| -3,407,844,848,752,143,400
|
Return the HTML elements for the tab menu.
Returns:
dict: Dash HTML object
|
dash_charts/utils_app_with_navigation.py
|
tab_menu
|
KyleKing/dash_charts
|
python
|
def tab_menu(self):
'Return the HTML elements for the tab menu.\n\n Returns:\n dict: Dash HTML object\n\n '
(tab_kwargs, tabs_kwargs, tabs_style) = self.generate_tab_kwargs()
tabs = [dcc.Tab(label=name, value=name, **tab_kwargs) for (name, tab) in self.nav_lookup.items()]
return html.Div(children=[dcc.Tabs(id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0], children=tabs, **tabs_kwargs)], style=tabs_style)
|
def return_layout(self) -> dict:
'Return Dash application layout.\n\n Returns:\n dict: Dash HTML object\n\n '
return html.Div(children=[dcc.Location(id=self._il[self.id_url], refresh=False), self.nav_bar(), html.Div(id=self._il[self.id_pages_content])])
| -3,280,295,393,280,241,700
|
Return Dash application layout.
Returns:
dict: Dash HTML object
|
dash_charts/utils_app_with_navigation.py
|
return_layout
|
KyleKing/dash_charts
|
python
|
def return_layout(self) -> dict:
'Return Dash application layout.\n\n Returns:\n dict: Dash HTML object\n\n '
return html.Div(children=[dcc.Location(id=self._il[self.id_url], refresh=False), self.nav_bar(), html.Div(id=self._il[self.id_pages_content])])
|
def nav_bar(self):
'Return the HTML elements for the navigation menu.\n\n Returns:\n dict: Dash HTML object\n\n '
brand = []
if self.logo:
brand.append(dbc.Col(html.Img(src=self.logo, height='25px')))
brand.append(dbc.Col(dbc.NavbarBrand(self.name, className='ml-2')))
links = []
if self.navbar_links:
links.append(dbc.Nav(children=[dbc.NavItem(dbc.NavLink(name, href=link)) for (name, link) in self.navbar_links], fill=True, navbar=True))
if self.dropdown_links:
links.append(dbc.Nav(dbc.DropdownMenu(children=[dbc.DropdownMenuItem(name, href=link) for (name, link) in self.dropdown_links], in_navbar=True, label='Links', nav=True), navbar=True))
return dbc.Navbar(children=[dbc.NavLink([dbc.Row(children=brand, align='center', no_gutters=True)], href='/'), dbc.NavbarToggler(id=self._il[self.id_toggler]), dbc.Collapse(dbc.Row(children=links, no_gutters=True, className='flex-nowrap mt-3 mt-md-0', align='center'), id=self._il[self.id_collapse], navbar=True)], sticky='top', color='dark', dark=True)
| -4,144,133,806,008,334,300
|
Return the HTML elements for the navigation menu.
Returns:
dict: Dash HTML object
|
dash_charts/utils_app_with_navigation.py
|
nav_bar
|
KyleKing/dash_charts
|
python
|
def nav_bar(self):
'Return the HTML elements for the navigation menu.\n\n Returns:\n dict: Dash HTML object\n\n '
brand = []
if self.logo:
brand.append(dbc.Col(html.Img(src=self.logo, height='25px')))
brand.append(dbc.Col(dbc.NavbarBrand(self.name, className='ml-2')))
links = []
if self.navbar_links:
links.append(dbc.Nav(children=[dbc.NavItem(dbc.NavLink(name, href=link)) for (name, link) in self.navbar_links], fill=True, navbar=True))
if self.dropdown_links:
links.append(dbc.Nav(dbc.DropdownMenu(children=[dbc.DropdownMenuItem(name, href=link) for (name, link) in self.dropdown_links], in_navbar=True, label='Links', nav=True), navbar=True))
return dbc.Navbar(children=[dbc.NavLink([dbc.Row(children=brand, align='center', no_gutters=True)], href='/'), dbc.NavbarToggler(id=self._il[self.id_toggler]), dbc.Collapse(dbc.Row(children=links, no_gutters=True, className='flex-nowrap mt-3 mt-md-0', align='center'), id=self._il[self.id_collapse], navbar=True)], sticky='top', color='dark', dark=True)
|
def create_callbacks(self) -> None:
'Register the navigation callback.'
outputs = [(self.id_pages_content, 'children')]
inputs = [(self.id_url, 'pathname')]
@self.callback(outputs, inputs, [])
def render_page(pathname):
try:
return [self.nav_layouts[self.select_page_name(pathname)]]
except Exception as err:
return [html.Div(children=[f'''Error rendering "{pathname}":
{err}'''])]
@self.callback([(self.id_collapse, 'is_open')], [(self.id_toggler, 'n_clicks')], [(self.id_collapse, 'is_open')])
def toggle_navbar_collapse(n_clicks, is_open):
return [((not is_open) if n_clicks else is_open)]
| 4,695,464,808,721,864,000
|
Register the navigation callback.
|
dash_charts/utils_app_with_navigation.py
|
create_callbacks
|
KyleKing/dash_charts
|
python
|
def create_callbacks(self) -> None:
outputs = [(self.id_pages_content, 'children')]
inputs = [(self.id_url, 'pathname')]
@self.callback(outputs, inputs, [])
def render_page(pathname):
try:
return [self.nav_layouts[self.select_page_name(pathname)]]
except Exception as err:
return [html.Div(children=[f'Error rendering "{pathname}":
{err}'])]
@self.callback([(self.id_collapse, 'is_open')], [(self.id_toggler, 'n_clicks')], [(self.id_collapse, 'is_open')])
def toggle_navbar_collapse(n_clicks, is_open):
return [((not is_open) if n_clicks else is_open)]
|
def select_page_name(self, pathname):
'Return the page name determined based on the pathname.\n\n Should return str: page name\n\n Args:\n pathname: relative pathname from URL\n\n Raises:\n NotImplementedError: Child class must implement this method\n\n '
raise NotImplementedError('nav_bar must be implemented by child class')
| 4,873,284,732,234,408,000
|
Return the page name determined based on the pathname.
Should return str: page name
Args:
pathname: relative pathname from URL
Raises:
NotImplementedError: Child class must implement this method
|
dash_charts/utils_app_with_navigation.py
|
select_page_name
|
KyleKing/dash_charts
|
python
|
def select_page_name(self, pathname):
'Return the page name determined based on the pathname.\n\n Should return str: page name\n\n Args:\n pathname: relative pathname from URL\n\n Raises:\n NotImplementedError: Child class must implement this method\n\n '
raise NotImplementedError('nav_bar must be implemented by child class')
|
def test_run(self):
'A dummy test just to run configured workloads'
pass
| -5,225,681,682,568,962,000
|
A dummy test just to run configured workloads
|
tests/eas/rfc.py
|
test_run
|
ADVAN-ELAA-8QM-PRC1/platform-external-lisa
|
python
|
def test_run(self):
pass
|
def main() -> None:
'\n Entry point of this test project.\n '
ap.Stage(background_color='#333', stage_width=1000, stage_height=500)
sprite: ap.Sprite = ap.Sprite()
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=10))
sprite.graphics.move_to(x=50, y=30)
sprite.graphics.line_to(x=450, y=30)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=20))
sprite.graphics.move_to(x=50, y=60)
sprite.graphics.line_to(x=450, y=60)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=20, space_size=0))
sprite.graphics.move_to(x=50, y=90)
sprite.graphics.line_to(x=450, y=90)
sprite.graphics.line_style(color='#0af', thickness=3)
sprite.graphics.move_to(x=40, y=120)
sprite.graphics.line_to(x=460, y=120)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=10))
polyline: ap.Polyline = sprite.graphics.move_to(x=50, y=150)
sprite.graphics.line_to(x=450, y=150)
sprite.graphics.line_to(x=700, y=250)
sprite.graphics.line_to(x=700, y=150)
polyline.click(on_polyline_click)
ap.save_overall_html(dest_dir_path=_DEST_DIR_PATH)
| 3,645,435,936,439,092,700
|
Entry point of this test project.
|
test_projects/line_round_dot_setting/main.py
|
main
|
simon-ritchie/action-py-script
|
python
|
def main() -> None:
'\n \n '
ap.Stage(background_color='#333', stage_width=1000, stage_height=500)
sprite: ap.Sprite = ap.Sprite()
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=10))
sprite.graphics.move_to(x=50, y=30)
sprite.graphics.line_to(x=450, y=30)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=20))
sprite.graphics.move_to(x=50, y=60)
sprite.graphics.line_to(x=450, y=60)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=20, space_size=0))
sprite.graphics.move_to(x=50, y=90)
sprite.graphics.line_to(x=450, y=90)
sprite.graphics.line_style(color='#0af', thickness=3)
sprite.graphics.move_to(x=40, y=120)
sprite.graphics.line_to(x=460, y=120)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=10))
polyline: ap.Polyline = sprite.graphics.move_to(x=50, y=150)
sprite.graphics.line_to(x=450, y=150)
sprite.graphics.line_to(x=700, y=250)
sprite.graphics.line_to(x=700, y=150)
polyline.click(on_polyline_click)
ap.save_overall_html(dest_dir_path=_DEST_DIR_PATH)
|
def on_polyline_click(e: ap.MouseEvent[ap.Polyline], options: dict) -> None:
'\n Handler that called when polyline is clicked.\n\n Parameters\n ----------\n e : MouseEvent\n Created MouseEvent instance.\n options : dict\n Optional parameters.\n '
polyline: ap.Polyline = e.this
polyline.line_round_dot_setting = None
| -1,806,673,741,255,246,000
|
Handler that called when polyline is clicked.
Parameters
----------
e : MouseEvent
Created MouseEvent instance.
options : dict
Optional parameters.
|
test_projects/line_round_dot_setting/main.py
|
on_polyline_click
|
simon-ritchie/action-py-script
|
python
|
def on_polyline_click(e: ap.MouseEvent[ap.Polyline], options: dict) -> None:
'\n Handler that called when polyline is clicked.\n\n Parameters\n ----------\n e : MouseEvent\n Created MouseEvent instance.\n options : dict\n Optional parameters.\n '
polyline: ap.Polyline = e.this
polyline.line_round_dot_setting = None
|
def test_01_set_body_pagelink(self):
'Test the get_body_pagelink_ids and set_body_pagelink functions.'
self.set_setting('PAGE_LINK_FILTER', True)
page1 = self.create_new_page()
page2 = self.create_new_page()
content_string = 'test <a href="%s" class="page_%d">hello</a>'
content = Content(page=page2, language='en-us', type='body', body=(content_string % ('#', page1.id)))
content.save()
self.assertEqual(Content.objects.get_content(page2, 'en-us', 'body'), (content_string % (page1.get_url_path(), page1.id)))
self.assertFalse(page2.has_broken_link())
page1.delete()
self.assertEqual(Content.objects.get_content(page2, 'en-us', 'body'), 'test <a href="#" class="pagelink_broken">hello</a>')
self.assertTrue(page2.has_broken_link())
| -6,059,341,719,195,447,000
|
Test the get_body_pagelink_ids and set_body_pagelink functions.
|
pages/tests/test_pages_link.py
|
test_01_set_body_pagelink
|
redsolution/django-page-cms
|
python
|
def test_01_set_body_pagelink(self):
self.set_setting('PAGE_LINK_FILTER', True)
page1 = self.create_new_page()
page2 = self.create_new_page()
content_string = 'test <a href="%s" class="page_%d">hello</a>'
content = Content(page=page2, language='en-us', type='body', body=(content_string % ('#', page1.id)))
content.save()
self.assertEqual(Content.objects.get_content(page2, 'en-us', 'body'), (content_string % (page1.get_url_path(), page1.id)))
self.assertFalse(page2.has_broken_link())
page1.delete()
self.assertEqual(Content.objects.get_content(page2, 'en-us', 'body'), 'test <a href="#" class="pagelink_broken">hello</a>')
self.assertTrue(page2.has_broken_link())
|
def log(self, message):
'\n Logs a message for analysis of model training.\n '
self._logger.log(message)
| -2,225,675,124,089,114,000
|
Logs a message for analysis of model training.
|
rafiki/model/log.py
|
log
|
Yirui-Wang/rafiki
|
python
|
def log(self, message):
'\n \n '
self._logger.log(message)
|
def define_loss_plot(self):
'\n Convenience method of defining a plot of ``loss`` against ``epoch``.\n To be used with ``log_loss_metric()``.\n '
self.define_plot('Loss Over Epochs', ['loss'], x_axis='epoch')
| 7,711,925,971,221,801,000
|
Convenience method of defining a plot of ``loss`` against ``epoch``.
To be used with ``log_loss_metric()``.
|
rafiki/model/log.py
|
define_loss_plot
|
Yirui-Wang/rafiki
|
python
|
def define_loss_plot(self):
'\n Convenience method of defining a plot of ``loss`` against ``epoch``.\n To be used with ``log_loss_metric()``.\n '
self.define_plot('Loss Over Epochs', ['loss'], x_axis='epoch')
|
def log_loss_metric(self, loss, epoch):
'\n Convenience method for logging `loss` against `epoch`.\n To be used with ``define_loss_plot()``.\n '
self.log_metrics(loss=loss, epoch=epoch)
| -2,649,884,738,541,751,000
|
Convenience method for logging `loss` against `epoch`.
To be used with ``define_loss_plot()``.
|
rafiki/model/log.py
|
log_loss_metric
|
Yirui-Wang/rafiki
|
python
|
def log_loss_metric(self, loss, epoch):
'\n Convenience method for logging `loss` against `epoch`.\n To be used with ``define_loss_plot()``.\n '
self.log_metrics(loss=loss, epoch=epoch)
|
def define_plot(self, title, metrics, x_axis=None):
'\n Defines a plot for a set of metrics for analysis of model training.\n By default, metrics will be plotted against time.\n '
self._logger.define_plot(title, metrics, x_axis)
| 309,463,364,877,846,700
|
Defines a plot for a set of metrics for analysis of model training.
By default, metrics will be plotted against time.
|
rafiki/model/log.py
|
define_plot
|
Yirui-Wang/rafiki
|
python
|
def define_plot(self, title, metrics, x_axis=None):
'\n Defines a plot for a set of metrics for analysis of model training.\n By default, metrics will be plotted against time.\n '
self._logger.define_plot(title, metrics, x_axis)
|
def log_metrics(self, **kwargs):
'\n Logs metrics for a single point in time { <metric>: <value> }.\n <value> should be a number.\n '
self._logger.log_metrics(**kwargs)
| 2,631,083,698,204,591,000
|
Logs metrics for a single point in time { <metric>: <value> }.
<value> should be a number.
|
rafiki/model/log.py
|
log_metrics
|
Yirui-Wang/rafiki
|
python
|
def log_metrics(self, **kwargs):
'\n Logs metrics for a single point in time { <metric>: <value> }.\n <value> should be a number.\n '
self._logger.log_metrics(**kwargs)
|
def config(settings):
'\n Template for WA-COP + CAD Cloud Integration\n '
T = current.T
settings.base.system_name = T('Sahana: Washington Common Operating Picture (WA-COP)')
settings.base.system_name_short = T('Sahana')
settings.base.prepopulate_options = {'mandatory': 'CAD', 'default': ('default/users', 'CAD/Demo')}
settings.base.prepopulate = 'template:default'
settings.auth.registration_requires_verification = True
settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
settings.mail.approver = 'ADMIN'
settings.auth.registration_link_user_to = {'staff': T('Staff')}
settings.auth.registration_link_user_to_default = ['staff']
settings.auth.registration_roles = {'organisation_id': ['USER']}
settings.auth.show_utc_offset = False
settings.auth.show_link = False
settings.security.policy = 7
settings.security.map = True
settings.L10n.languages = OrderedDict([('en', 'English')])
settings.L10n.default_language = 'en'
settings.L10n.utc_offset = '-0800'
settings.L10n.date_format = '%b %d %Y'
settings.L10n.decimal_separator = '.'
settings.L10n.thousands_separator = ','
settings.L10n.default_country_code = 1
settings.ui.label_mobile_phone = 'Cell Phone'
settings.ui.label_postcode = 'ZIP Code'
settings.msg.require_international_phone_numbers = False
settings.base.paper_size = T('Letter')
settings.gis.countries = ('US',)
levels = ('L1', 'L2', 'L3')
settings.gis.check_within_parent_boundaries = False
settings.gis.geonames_username = 'mcop'
settings.gis.legend = 'float'
settings.gis.location_represent_address_only = 'icon'
settings.gis.poi_create_resources = None
settings.event.incident_teams_tab = 'Units'
settings.modules = OrderedDict([('default', Storage(name_nice='Home', restricted=False, access=None, module_type=None)), ('admin', Storage(name_nice='Administration', restricted=True, access='|1|', module_type=None)), ('appadmin', Storage(name_nice='Administration', restricted=True, module_type=None)), ('sync', Storage(name_nice='Synchronization', restricted=True, access='|1|', module_type=None)), ('translate', Storage(name_nice='Translation Functionality', module_type=None)), ('gis', Storage(name_nice='Map', restricted=True, module_type=1)), ('pr', Storage(name_nice='Persons', description='Central point to record details on People', restricted=True, access='|1|', module_type=None)), ('org', Storage(name_nice='Organizations', restricted=True, module_type=10)), ('hrm', Storage(name_nice='Contacts', restricted=True, module_type=None)), ('cms', Storage(name_nice='Content Management', restricted=True, module_type=10)), ('event', Storage(name_nice='Event Management', restricted=True, module_type=2)), ('project', Storage(name_nice='Project Management', restricted=True, module_type=None)), ('doc', Storage(name_nice='Documents', restricted=True, module_type=None)), ('stats', Storage(name_nice='Statistics', restricted=True, module_type=None))])
| -1,311,257,314,297,650,200
|
Template for WA-COP + CAD Cloud Integration
|
modules/templates/CAD/config.py
|
config
|
anurag-ks/eden
|
python
|
def config(settings):
'\n \n '
T = current.T
settings.base.system_name = T('Sahana: Washington Common Operating Picture (WA-COP)')
settings.base.system_name_short = T('Sahana')
settings.base.prepopulate_options = {'mandatory': 'CAD', 'default': ('default/users', 'CAD/Demo')}
settings.base.prepopulate = 'template:default'
settings.auth.registration_requires_verification = True
settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
settings.mail.approver = 'ADMIN'
settings.auth.registration_link_user_to = {'staff': T('Staff')}
settings.auth.registration_link_user_to_default = ['staff']
settings.auth.registration_roles = {'organisation_id': ['USER']}
settings.auth.show_utc_offset = False
settings.auth.show_link = False
settings.security.policy = 7
settings.security.map = True
settings.L10n.languages = OrderedDict([('en', 'English')])
settings.L10n.default_language = 'en'
settings.L10n.utc_offset = '-0800'
settings.L10n.date_format = '%b %d %Y'
settings.L10n.decimal_separator = '.'
settings.L10n.thousands_separator = ','
settings.L10n.default_country_code = 1
settings.ui.label_mobile_phone = 'Cell Phone'
settings.ui.label_postcode = 'ZIP Code'
settings.msg.require_international_phone_numbers = False
settings.base.paper_size = T('Letter')
settings.gis.countries = ('US',)
levels = ('L1', 'L2', 'L3')
settings.gis.check_within_parent_boundaries = False
settings.gis.geonames_username = 'mcop'
settings.gis.legend = 'float'
settings.gis.location_represent_address_only = 'icon'
settings.gis.poi_create_resources = None
settings.event.incident_teams_tab = 'Units'
settings.modules = OrderedDict([('default', Storage(name_nice='Home', restricted=False, access=None, module_type=None)), ('admin', Storage(name_nice='Administration', restricted=True, access='|1|', module_type=None)), ('appadmin', Storage(name_nice='Administration', restricted=True, module_type=None)), ('sync', Storage(name_nice='Synchronization', restricted=True, access='|1|', module_type=None)), ('translate', Storage(name_nice='Translation Functionality', module_type=None)), ('gis', Storage(name_nice='Map', restricted=True, module_type=1)), ('pr', Storage(name_nice='Persons', description='Central point to record details on People', restricted=True, access='|1|', module_type=None)), ('org', Storage(name_nice='Organizations', restricted=True, module_type=10)), ('hrm', Storage(name_nice='Contacts', restricted=True, module_type=None)), ('cms', Storage(name_nice='Content Management', restricted=True, module_type=10)), ('event', Storage(name_nice='Event Management', restricted=True, module_type=2)), ('project', Storage(name_nice='Project Management', restricted=True, module_type=None)), ('doc', Storage(name_nice='Documents', restricted=True, module_type=None)), ('stats', Storage(name_nice='Statistics', restricted=True, module_type=None))])
|
def __init__(self):
'\n Evaluates ground truth constructor\n '
| 7,175,434,617,803,589,000
|
Evaluates ground truth constructor
|
art/defences/detector/poison/ground_truth_evaluator.py
|
__init__
|
SecantZhang/adversarial-robustness-toolbox
|
python
|
def __init__(self):
'\n \n '
|
def analyze_correctness(self, assigned_clean_by_class: Union[(np.ndarray, List[np.ndarray])], is_clean_by_class: list) -> Tuple[(np.ndarray, str)]:
'\n For each training sample, determine whether the activation clustering method was correct.\n\n :param assigned_clean_by_class: Result of clustering.\n :param is_clean_by_class: is clean separated by class.\n :return: Two variables are returned:\n 1) all_errors_by_class[i]: an array indicating the correctness of each assignment\n in the ith class. Such that:\n all_errors_by_class[i] = 0 if marked poison, is poison\n all_errors_by_class[i] = 1 if marked clean, is clean\n all_errors_by_class[i] = 2 if marked poison, is clean\n all_errors_by_class[i] = 3 marked clean, is poison\n 2) Json object with confusion matrix per-class.\n '
all_errors_by_class = []
poison = 0
clean = 1
dic_json = {}
logger.debug('Error rates per class:')
for (class_i, (assigned_clean, is_clean)) in enumerate(zip(assigned_clean_by_class, is_clean_by_class)):
errors = []
for (assignment, bl_var) in zip(assigned_clean, is_clean):
bl_var = int(bl_var)
if ((assignment == poison) and (bl_var == poison)):
errors.append(0)
elif ((assignment == clean) and (bl_var == clean)):
errors.append(1)
elif ((assignment == poison) and (bl_var == clean)):
errors.append(2)
elif ((assignment == clean) and (bl_var == poison)):
errors.append(3)
else:
raise Exception('Analyze_correctness entered wrong class')
errors = np.asarray(errors)
logger.debug('-------------------%d---------------', class_i)
key_i = ('class_' + str(class_i))
matrix_i = self.get_confusion_matrix(errors)
dic_json.update({key_i: matrix_i})
all_errors_by_class.append(errors)
all_errors_by_class = np.asarray(all_errors_by_class)
conf_matrix_json = json.dumps(dic_json)
return (all_errors_by_class, conf_matrix_json)
| 2,196,615,981,718,381,000
|
For each training sample, determine whether the activation clustering method was correct.
:param assigned_clean_by_class: Result of clustering.
:param is_clean_by_class: is clean separated by class.
:return: Two variables are returned:
1) all_errors_by_class[i]: an array indicating the correctness of each assignment
in the ith class. Such that:
all_errors_by_class[i] = 0 if marked poison, is poison
all_errors_by_class[i] = 1 if marked clean, is clean
all_errors_by_class[i] = 2 if marked poison, is clean
all_errors_by_class[i] = 3 marked clean, is poison
2) Json object with confusion matrix per-class.
|
art/defences/detector/poison/ground_truth_evaluator.py
|
analyze_correctness
|
SecantZhang/adversarial-robustness-toolbox
|
python
|
def analyze_correctness(self, assigned_clean_by_class: Union[(np.ndarray, List[np.ndarray])], is_clean_by_class: list) -> Tuple[(np.ndarray, str)]:
'\n For each training sample, determine whether the activation clustering method was correct.\n\n :param assigned_clean_by_class: Result of clustering.\n :param is_clean_by_class: is clean separated by class.\n :return: Two variables are returned:\n 1) all_errors_by_class[i]: an array indicating the correctness of each assignment\n in the ith class. Such that:\n all_errors_by_class[i] = 0 if marked poison, is poison\n all_errors_by_class[i] = 1 if marked clean, is clean\n all_errors_by_class[i] = 2 if marked poison, is clean\n all_errors_by_class[i] = 3 marked clean, is poison\n 2) Json object with confusion matrix per-class.\n '
all_errors_by_class = []
poison = 0
clean = 1
dic_json = {}
logger.debug('Error rates per class:')
for (class_i, (assigned_clean, is_clean)) in enumerate(zip(assigned_clean_by_class, is_clean_by_class)):
errors = []
for (assignment, bl_var) in zip(assigned_clean, is_clean):
bl_var = int(bl_var)
if ((assignment == poison) and (bl_var == poison)):
errors.append(0)
elif ((assignment == clean) and (bl_var == clean)):
errors.append(1)
elif ((assignment == poison) and (bl_var == clean)):
errors.append(2)
elif ((assignment == clean) and (bl_var == poison)):
errors.append(3)
else:
raise Exception('Analyze_correctness entered wrong class')
errors = np.asarray(errors)
logger.debug('-------------------%d---------------', class_i)
key_i = ('class_' + str(class_i))
matrix_i = self.get_confusion_matrix(errors)
dic_json.update({key_i: matrix_i})
all_errors_by_class.append(errors)
all_errors_by_class = np.asarray(all_errors_by_class)
conf_matrix_json = json.dumps(dic_json)
return (all_errors_by_class, conf_matrix_json)
|
def get_confusion_matrix(self, values: np.ndarray) -> dict:
'\n Computes and returns a json object that contains the confusion matrix for each class.\n\n :param values: Array indicating the correctness of each assignment in the ith class.\n :return: Json object with confusion matrix per-class.\n '
dic_class = {}
true_positive = np.where((values == 0))[0].shape[0]
true_negative = np.where((values == 1))[0].shape[0]
false_positive = np.where((values == 2))[0].shape[0]
false_negative = np.where((values == 3))[0].shape[0]
tp_rate = self.calculate_and_print(true_positive, (true_positive + false_negative), 'true-positive rate')
tn_rate = self.calculate_and_print(true_negative, (false_positive + true_negative), 'true-negative rate')
fp_rate = self.calculate_and_print(false_positive, (false_positive + true_negative), 'false-positive rate')
fn_rate = self.calculate_and_print(false_negative, (true_positive + false_negative), 'false-negative rate')
dic_tp = dict(rate=round(tp_rate, 2), numerator=true_positive, denominator=(true_positive + false_negative))
if ((true_positive + false_negative) == 0):
dic_tp = dict(rate='N/A', numerator=true_positive, denominator=(true_positive + false_negative))
dic_tn = dict(rate=round(tn_rate, 2), numerator=true_negative, denominator=(false_positive + true_negative))
if ((false_positive + true_negative) == 0):
dic_tn = dict(rate='N/A', numerator=true_negative, denominator=(false_positive + true_negative))
dic_fp = dict(rate=round(fp_rate, 2), numerator=false_positive, denominator=(false_positive + true_negative))
if ((false_positive + true_negative) == 0):
dic_fp = dict(rate='N/A', numerator=false_positive, denominator=(false_positive + true_negative))
dic_fn = dict(rate=round(fn_rate, 2), numerator=false_negative, denominator=(true_positive + false_negative))
if ((true_positive + false_negative) == 0):
dic_fn = dict(rate='N/A', numerator=false_negative, denominator=(true_positive + false_negative))
dic_class.update(dict(TruePositive=dic_tp))
dic_class.update(dict(TrueNegative=dic_tn))
dic_class.update(dict(FalsePositive=dic_fp))
dic_class.update(dict(FalseNegative=dic_fn))
return dic_class
| -1,183,738,427,082,292,700
|
Computes and returns a json object that contains the confusion matrix for each class.
:param values: Array indicating the correctness of each assignment in the ith class.
:return: Json object with confusion matrix per-class.
|
art/defences/detector/poison/ground_truth_evaluator.py
|
get_confusion_matrix
|
SecantZhang/adversarial-robustness-toolbox
|
python
|
def get_confusion_matrix(self, values: np.ndarray) -> dict:
'\n Computes and returns a json object that contains the confusion matrix for each class.\n\n :param values: Array indicating the correctness of each assignment in the ith class.\n :return: Json object with confusion matrix per-class.\n '
dic_class = {}
true_positive = np.where((values == 0))[0].shape[0]
true_negative = np.where((values == 1))[0].shape[0]
false_positive = np.where((values == 2))[0].shape[0]
false_negative = np.where((values == 3))[0].shape[0]
tp_rate = self.calculate_and_print(true_positive, (true_positive + false_negative), 'true-positive rate')
tn_rate = self.calculate_and_print(true_negative, (false_positive + true_negative), 'true-negative rate')
fp_rate = self.calculate_and_print(false_positive, (false_positive + true_negative), 'false-positive rate')
fn_rate = self.calculate_and_print(false_negative, (true_positive + false_negative), 'false-negative rate')
dic_tp = dict(rate=round(tp_rate, 2), numerator=true_positive, denominator=(true_positive + false_negative))
if ((true_positive + false_negative) == 0):
dic_tp = dict(rate='N/A', numerator=true_positive, denominator=(true_positive + false_negative))
dic_tn = dict(rate=round(tn_rate, 2), numerator=true_negative, denominator=(false_positive + true_negative))
if ((false_positive + true_negative) == 0):
dic_tn = dict(rate='N/A', numerator=true_negative, denominator=(false_positive + true_negative))
dic_fp = dict(rate=round(fp_rate, 2), numerator=false_positive, denominator=(false_positive + true_negative))
if ((false_positive + true_negative) == 0):
dic_fp = dict(rate='N/A', numerator=false_positive, denominator=(false_positive + true_negative))
dic_fn = dict(rate=round(fn_rate, 2), numerator=false_negative, denominator=(true_positive + false_negative))
if ((true_positive + false_negative) == 0):
dic_fn = dict(rate='N/A', numerator=false_negative, denominator=(true_positive + false_negative))
dic_class.update(dict(TruePositive=dic_tp))
dic_class.update(dict(TrueNegative=dic_tn))
dic_class.update(dict(FalsePositive=dic_fp))
dic_class.update(dict(FalseNegative=dic_fn))
return dic_class
|
@staticmethod
def calculate_and_print(numerator: int, denominator: int, name: str) -> float:
'\n Computes and prints the rates based on the denominator provided.\n\n :param numerator: number used to compute the rate.\n :param denominator: number used to compute the rate.\n :param name: Rate name being computed e.g., false-positive rate.\n :return: Computed rate\n '
try:
res = (100 * (numerator / float(denominator)))
logger.debug('%s: %d/%d=%.3g', name, numerator, denominator, res)
return res
except ZeroDivisionError:
logger.debug("%s: couldn't calculate %d/%d", name, numerator, denominator)
return 0.0
| -2,129,083,346,196,205,600
|
Computes and prints the rates based on the denominator provided.
:param numerator: number used to compute the rate.
:param denominator: number used to compute the rate.
:param name: Rate name being computed e.g., false-positive rate.
:return: Computed rate
|
art/defences/detector/poison/ground_truth_evaluator.py
|
calculate_and_print
|
SecantZhang/adversarial-robustness-toolbox
|
python
|
@staticmethod
def calculate_and_print(numerator: int, denominator: int, name: str) -> float:
'\n Computes and prints the rates based on the denominator provided.\n\n :param numerator: number used to compute the rate.\n :param denominator: number used to compute the rate.\n :param name: Rate name being computed e.g., false-positive rate.\n :return: Computed rate\n '
try:
res = (100 * (numerator / float(denominator)))
logger.debug('%s: %d/%d=%.3g', name, numerator, denominator, res)
return res
except ZeroDivisionError:
logger.debug("%s: couldn't calculate %d/%d", name, numerator, denominator)
return 0.0
|
def basedir_def(*args):
'Return an uninterpolated path relative to $pybasedir.'
return os.path.join('$pybasedir', *args)
| -7,844,244,356,446,634,000
|
Return an uninterpolated path relative to $pybasedir.
|
shadowfiend/common/paths.py
|
basedir_def
|
RogerYuQian/shadowfiend
|
python
|
def basedir_def(*args):
return os.path.join('$pybasedir', *args)
|
def bindir_def(*args):
'Return an uninterpolated path relative to $bindir.'
return os.path.join('$bindir', *args)
| 3,999,146,093,527,564,300
|
Return an uninterpolated path relative to $bindir.
|
shadowfiend/common/paths.py
|
bindir_def
|
RogerYuQian/shadowfiend
|
python
|
def bindir_def(*args):
return os.path.join('$bindir', *args)
|
def state_path_def(*args):
'Return an uninterpolated path relative to $state_path.'
return os.path.join('$state_path', *args)
| 2,190,866,738,796,461,000
|
Return an uninterpolated path relative to $state_path.
|
shadowfiend/common/paths.py
|
state_path_def
|
RogerYuQian/shadowfiend
|
python
|
def state_path_def(*args):
return os.path.join('$state_path', *args)
|
def basedir_rel(*args):
'Return a path relative to $pybasedir.'
return os.path.join(CONF.pybasedir, *args)
| -4,652,185,622,669,104,000
|
Return a path relative to $pybasedir.
|
shadowfiend/common/paths.py
|
basedir_rel
|
RogerYuQian/shadowfiend
|
python
|
def basedir_rel(*args):
return os.path.join(CONF.pybasedir, *args)
|
def bindir_rel(*args):
'Return a path relative to $bindir.'
return os.path.join(CONF.bindir, *args)
| -9,031,136,238,445,476,000
|
Return a path relative to $bindir.
|
shadowfiend/common/paths.py
|
bindir_rel
|
RogerYuQian/shadowfiend
|
python
|
def bindir_rel(*args):
return os.path.join(CONF.bindir, *args)
|
def state_path_rel(*args):
'Return a path relative to $state_path.'
return os.path.join(CONF.state_path, *args)
| 6,254,008,708,169,499,000
|
Return a path relative to $state_path.
|
shadowfiend/common/paths.py
|
state_path_rel
|
RogerYuQian/shadowfiend
|
python
|
def state_path_rel(*args):
return os.path.join(CONF.state_path, *args)
|
def save2csv(dst_fh, row):
'\n Appends a list with data to a dst_fh csv\n args:\n dst_fh: str, output file\n row: list, list of values to write in a row\n '
with open(dst_fh, 'a', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, delimiter=',', lineterminator='\n', quotechar='"', quoting=csv.QUOTE_MINIMAL)
try:
out.writerow(row)
except UnicodeEncodeError:
pass
| -3,922,397,457,484,646,000
|
Appends a list with data to a dst_fh csv
args:
dst_fh: str, output file
row: list, list of values to write in a row
|
src/utils.py
|
save2csv
|
BookOps-CAT/CBH-migration
|
python
|
def save2csv(dst_fh, row):
'\n Appends a list with data to a dst_fh csv\n args:\n dst_fh: str, output file\n row: list, list of values to write in a row\n '
with open(dst_fh, 'a', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, delimiter=',', lineterminator='\n', quotechar='"', quoting=csv.QUOTE_MINIMAL)
try:
out.writerow(row)
except UnicodeEncodeError:
pass
|
def test_admin_index(self):
' test index because customdashboard with MenuModule is may used'
adminindex = reverse('admin:index')
response = self.client.get(adminindex, follow=True, extra={'app_label': 'admin'})
self.assertIn(response.status_code, (200, 302))
| 8,092,868,403,751,705,000
|
test index because customdashboard with MenuModule is may used
|
pagetools/menus/tests/test_admin.py
|
test_admin_index
|
theithec/django-pagetools
|
python
|
def test_admin_index(self):
' '
adminindex = reverse('admin:index')
response = self.client.get(adminindex, follow=True, extra={'app_label': 'admin'})
self.assertIn(response.status_code, (200, 302))
|
def test_allow_inheritance_from_interface(f, s):
'Allow inheritance from interface.'
user_class = f(s.User)
user = user_class(last_login=datetime(1999, 12, 31))
assert (not user.is_active())
| -2,168,000,348,716,013,800
|
Allow inheritance from interface.
|
tests/test_subtyping.py
|
test_allow_inheritance_from_interface
|
proofit404/generics
|
python
|
def test_allow_inheritance_from_interface(f, s):
user_class = f(s.User)
user = user_class(last_login=datetime(1999, 12, 31))
assert (not user.is_active())
|
def histogram(x: np.ndarray, bins: int=10, range: Tuple[(float, float)]=(0, 10), weights: Optional[np.ndarray]=None, flow: bool=False) -> Tuple[(np.ndarray, Optional[np.ndarray], np.ndarray)]:
'Calculate the histogram for the data ``x``.\n\n Parameters\n ----------\n x : :obj:`numpy.ndarray`\n data to histogram\n bins : int\n number of bins\n range : (float, float)\n axis range\n weights : :obj:`numpy.ndarray`, optional\n array of weights for ``x``\n flow : bool\n include over and underflow content in first and last bins\n\n Returns\n -------\n count : :obj:`numpy.ndarray`\n The values of the histogram\n error : :obj:`numpy.ndarray`, optional\n The poission uncertainty on the bin heights\n edges : :obj:`numpy.ndarray`\n The bin edges\n\n Notes\n -----\n If the dtype of the ``weights`` is not the same as ``x``, then it\n is converted to the dtype of ``x``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from humba import histogram\n >>> x = np.random.randn(100000)\n >>> w = np.random.uniform(0.4, 0.5, x.shape[0])\n >>> hist1, _, edges = humba.histogram(x, bins=50, range=(-5, 5))\n >>> hist2, _, edges = humba.histogram(x, bins=50, range=(-5, 5), flow=True)\n >>> hist3, error, edges = histogram(x, bins=50, range=(-5, 5), weights=w)\n >>> hist4, error, edges = histogram(x, bins=50, range=(-3, 3), weights=w, flow=True)\n\n '
edges = np.linspace(range[0], range[1], (bins + 1))
if (weights is not None):
assert (x.shape == weights.shape), 'x and weights must have identical shape'
if (x.dtype == np.float64):
hfunc = jits._hfloat64_weighted
elif (x.dtype == np.float32):
hfunc = jits._hfloat32_weighted
else:
raise TypeError('dtype of input must be float32 or float64')
(res, err) = hfunc(x, weights.astype(x.dtype), bins, range[0], range[1], flow)
return (res, err, edges)
else:
if (x.dtype == np.float64):
hfunc = jits._hfloat64
elif (x.dtype == np.float32):
hfunc = jits._hfloat32
else:
raise TypeError('dtype of input must be float32 or float64')
res = hfunc(x, bins, range[0], range[1], flow)
return (res, None, edges)
| 8,530,521,508,059,037,000
|
Calculate the histogram for the data ``x``.
Parameters
----------
x : :obj:`numpy.ndarray`
data to histogram
bins : int
number of bins
range : (float, float)
axis range
weights : :obj:`numpy.ndarray`, optional
array of weights for ``x``
flow : bool
include over and underflow content in first and last bins
Returns
-------
count : :obj:`numpy.ndarray`
The values of the histogram
error : :obj:`numpy.ndarray`, optional
The poission uncertainty on the bin heights
edges : :obj:`numpy.ndarray`
The bin edges
Notes
-----
If the dtype of the ``weights`` is not the same as ``x``, then it
is converted to the dtype of ``x``.
Examples
--------
>>> import numpy as np
>>> from humba import histogram
>>> x = np.random.randn(100000)
>>> w = np.random.uniform(0.4, 0.5, x.shape[0])
>>> hist1, _, edges = humba.histogram(x, bins=50, range=(-5, 5))
>>> hist2, _, edges = humba.histogram(x, bins=50, range=(-5, 5), flow=True)
>>> hist3, error, edges = histogram(x, bins=50, range=(-5, 5), weights=w)
>>> hist4, error, edges = histogram(x, bins=50, range=(-3, 3), weights=w, flow=True)
|
humba/core.py
|
histogram
|
douglasdavis/humba
|
python
|
def histogram(x: np.ndarray, bins: int=10, range: Tuple[(float, float)]=(0, 10), weights: Optional[np.ndarray]=None, flow: bool=False) -> Tuple[(np.ndarray, Optional[np.ndarray], np.ndarray)]:
'Calculate the histogram for the data ``x``.\n\n Parameters\n ----------\n x : :obj:`numpy.ndarray`\n data to histogram\n bins : int\n number of bins\n range : (float, float)\n axis range\n weights : :obj:`numpy.ndarray`, optional\n array of weights for ``x``\n flow : bool\n include over and underflow content in first and last bins\n\n Returns\n -------\n count : :obj:`numpy.ndarray`\n The values of the histogram\n error : :obj:`numpy.ndarray`, optional\n The poission uncertainty on the bin heights\n edges : :obj:`numpy.ndarray`\n The bin edges\n\n Notes\n -----\n If the dtype of the ``weights`` is not the same as ``x``, then it\n is converted to the dtype of ``x``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from humba import histogram\n >>> x = np.random.randn(100000)\n >>> w = np.random.uniform(0.4, 0.5, x.shape[0])\n >>> hist1, _, edges = humba.histogram(x, bins=50, range=(-5, 5))\n >>> hist2, _, edges = humba.histogram(x, bins=50, range=(-5, 5), flow=True)\n >>> hist3, error, edges = histogram(x, bins=50, range=(-5, 5), weights=w)\n >>> hist4, error, edges = histogram(x, bins=50, range=(-3, 3), weights=w, flow=True)\n\n '
edges = np.linspace(range[0], range[1], (bins + 1))
if (weights is not None):
assert (x.shape == weights.shape), 'x and weights must have identical shape'
if (x.dtype == np.float64):
hfunc = jits._hfloat64_weighted
elif (x.dtype == np.float32):
hfunc = jits._hfloat32_weighted
else:
raise TypeError('dtype of input must be float32 or float64')
(res, err) = hfunc(x, weights.astype(x.dtype), bins, range[0], range[1], flow)
return (res, err, edges)
else:
if (x.dtype == np.float64):
hfunc = jits._hfloat64
elif (x.dtype == np.float32):
hfunc = jits._hfloat32
else:
raise TypeError('dtype of input must be float32 or float64')
res = hfunc(x, bins, range[0], range[1], flow)
return (res, None, edges)
|
def mwv_histogram(x: np.ndarray, weights: np.ndarray, bins: int=10, range: Tuple[(float, float)]=(0, 10), flow: bool=False) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
'Histogram the same data but with multiple weight variations.\n\n Parameters\n ----------\n x : :obj:`numpy.ndarray`\n data to histogram\n weights : :obj:`numpy.ndarray`, optional\n multidimensional array of weights for ``x`` the first element\n of the ``shape`` attribute must be equal to the length of ``x``.\n bins : int\n number of bins\n range : (float, float)\n axis range\n flow : bool\n include over and underflow content in first and last bins\n\n Returns\n -------\n count : :obj:`numpy.ndarray`\n The values of the histograms calculated from the weights\n Shape will be (bins, ``weights.shape[0]``)\n error : :obj:`numpy.ndarray`\n The poission uncertainty on the bin heights (shape will be\n the same as ``count``.\n edges : :obj:`numpy.ndarray`\n The bin edges\n\n Notes\n -----\n If ``x`` is not the same dtype as ``weights``, then it is converted\n to the dtype of ``weights`` (for multi weight histograms we expect\n the weights array to be larger than the data array so we prefer to\n cast the smaller chunk of data).\n\n '
edges = np.linspace(range[0], range[1], (bins + 1))
assert (x.shape[0] == weights.shape[0]), 'weights shape is not compatible with x'
if (weights.dtype == np.float64):
hfunc = jits._hfloat64_multiweights
elif (weights.dtype == np.float32):
hfunc = jits._hfloat32_multiweights
else:
raise TypeError('dtype of input must be float32 or float64')
(res, err) = hfunc(x.astype(weights.dtype), weights, bins, range[0], range[1], flow)
return (res, err, edges)
| -6,720,209,729,624,591,000
|
Histogram the same data but with multiple weight variations.
Parameters
----------
x : :obj:`numpy.ndarray`
data to histogram
weights : :obj:`numpy.ndarray`, optional
multidimensional array of weights for ``x`` the first element
of the ``shape`` attribute must be equal to the length of ``x``.
bins : int
number of bins
range : (float, float)
axis range
flow : bool
include over and underflow content in first and last bins
Returns
-------
count : :obj:`numpy.ndarray`
The values of the histograms calculated from the weights
Shape will be (bins, ``weights.shape[0]``)
error : :obj:`numpy.ndarray`
The poission uncertainty on the bin heights (shape will be
the same as ``count``.
edges : :obj:`numpy.ndarray`
The bin edges
Notes
-----
If ``x`` is not the same dtype as ``weights``, then it is converted
to the dtype of ``weights`` (for multi weight histograms we expect
the weights array to be larger than the data array so we prefer to
cast the smaller chunk of data).
|
humba/core.py
|
mwv_histogram
|
douglasdavis/humba
|
python
|
def mwv_histogram(x: np.ndarray, weights: np.ndarray, bins: int=10, range: Tuple[(float, float)]=(0, 10), flow: bool=False) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
'Histogram the same data but with multiple weight variations.\n\n Parameters\n ----------\n x : :obj:`numpy.ndarray`\n data to histogram\n weights : :obj:`numpy.ndarray`, optional\n multidimensional array of weights for ``x`` the first element\n of the ``shape`` attribute must be equal to the length of ``x``.\n bins : int\n number of bins\n range : (float, float)\n axis range\n flow : bool\n include over and underflow content in first and last bins\n\n Returns\n -------\n count : :obj:`numpy.ndarray`\n The values of the histograms calculated from the weights\n Shape will be (bins, ``weights.shape[0]``)\n error : :obj:`numpy.ndarray`\n The poission uncertainty on the bin heights (shape will be\n the same as ``count``.\n edges : :obj:`numpy.ndarray`\n The bin edges\n\n Notes\n -----\n If ``x`` is not the same dtype as ``weights``, then it is converted\n to the dtype of ``weights`` (for multi weight histograms we expect\n the weights array to be larger than the data array so we prefer to\n cast the smaller chunk of data).\n\n '
edges = np.linspace(range[0], range[1], (bins + 1))
assert (x.shape[0] == weights.shape[0]), 'weights shape is not compatible with x'
if (weights.dtype == np.float64):
hfunc = jits._hfloat64_multiweights
elif (weights.dtype == np.float32):
hfunc = jits._hfloat32_multiweights
else:
raise TypeError('dtype of input must be float32 or float64')
(res, err) = hfunc(x.astype(weights.dtype), weights, bins, range[0], range[1], flow)
return (res, err, edges)
|
def template_2a_1():
'\n Returns:\n QuantumCircuit: template as a quantum circuit.\n '
qc = QuantumCircuit(1)
qc.x(0)
qc.x(0)
return qc
| 2,524,713,596,045,054,500
|
Returns:
QuantumCircuit: template as a quantum circuit.
|
qiskit/circuit/library/template_circuits/toffoli/template_2a_1.py
|
template_2a_1
|
AustinGilliam/qiskit-terra
|
python
|
def template_2a_1():
'\n Returns:\n QuantumCircuit: template as a quantum circuit.\n '
qc = QuantumCircuit(1)
qc.x(0)
qc.x(0)
return qc
|
@classmethod
def setUpClass(cls):
' Set up class-wide resources (test data) '
super(ErrorWarningTests, cls).setUpClass()
logging.getLogger('dataactcore').setLevel(logging.ERROR)
logging.getLogger('dataactvalidator').setLevel(logging.ERROR)
with create_app().app_context():
cls.monkeypatch = MonkeyPatch()
sess = GlobalDB.db().session
cls.session = sess
admin_user = sess.query(User).filter((User.email == cls.test_users['admin_user'])).one()
cls.validator = ValidationManager(directory=CONFIG_SERVICES['error_report_path'])
cls.submission_id = insert_submission(sess, admin_user.user_id, cgac_code='SYS', start_date='01/2001', end_date='03/2001', is_quarter=True)
cls.submission = sess.query(Submission).filter_by(submission_id=cls.submission_id).one()
cls.val_job = insert_job(cls.session, FILE_TYPE_DICT['appropriations'], JOB_STATUS_DICT['ready'], JOB_TYPE_DICT['csv_record_validation'], cls.submission_id, filename=JOB_TYPE_DICT['csv_record_validation'])
cls.original_reports = set(os.listdir(CONFIG_SERVICES['error_report_path']))
tas1 = TASFactory(account_num=1, allocation_transfer_agency='019', agency_identifier='072', beginning_period_of_availa=None, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0306', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas2 = TASFactory(account_num=2, allocation_transfer_agency=None, agency_identifier='019', beginning_period_of_availa='2016', ending_period_of_availabil='2016', availability_type_code=None, main_account_code='0113', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas3 = TASFactory(account_num=3, allocation_transfer_agency=None, agency_identifier='028', beginning_period_of_availa=None, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0406', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas4 = TASFactory(account_num=4, allocation_transfer_agency=None, agency_identifier='028', beginning_period_of_availa='2010', ending_period_of_availabil='2011', availability_type_code=None, main_account_code='0406', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas5 = TASFactory(account_num=5, allocation_transfer_agency='069', agency_identifier='013', beginning_period_of_availa=None, ending_period_of_availabil=None, availability_type_code='X', main_account_code='2050', sub_account_code='005', internal_start_date='01-01-2000', financial_indicator2='F')
tas6 = TASFactory(account_num=6, allocation_transfer_agency='028', agency_identifier='028', beginning_period_of_availa=None, ending_period_of_availabil=None, availability_type_code='X', main_account_code='8007', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas7 = TASFactory(account_num=7, allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa=None, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0100', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas8 = TASFactory(account_num=8, allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2010', ending_period_of_availabil='2011', availability_type_code=None, main_account_code='0100', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas9 = TASFactory(account_num=9, allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2014', ending_period_of_availabil='2015', availability_type_code=None, main_account_code='0100', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas10 = TASFactory(account_num=10, allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2015', ending_period_of_availabil='2016', availability_type_code=None, main_account_code='0100', sub_account_code='000', internal_start_date='01-01-2000')
sess.add_all([tas1, tas2, tas3, tas4, tas5, tas6, tas7, tas8, tas9, tas10])
gtas1 = SF133Factory(tas=concat_tas_dict(tas1.component_dict()), allocation_transfer_agency='019', agency_identifier='072', beginning_period_of_availa=None, line=1009, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0306', sub_account_code='000', period=6, fiscal_year=2001)
gtas2 = SF133Factory(tas=concat_tas_dict(tas2.component_dict()), allocation_transfer_agency=None, agency_identifier='019', beginning_period_of_availa='2016', line=1009, ending_period_of_availabil='2016', availability_type_code=None, main_account_code='0113', sub_account_code='000', period=6, fiscal_year=2001)
gtas3 = SF133Factory(tas=concat_tas_dict(tas3.component_dict()), allocation_transfer_agency=None, agency_identifier='028', beginning_period_of_availa=None, line=1009, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0406', sub_account_code='000', period=6, fiscal_year=2001)
gtas4 = SF133Factory(tas=concat_tas_dict(tas4.component_dict()), allocation_transfer_agency=None, agency_identifier='028', beginning_period_of_availa='2010', line=1009, ending_period_of_availabil='2011', availability_type_code=None, main_account_code='0406', sub_account_code='000', period=6, fiscal_year=2001)
gtas5 = SF133Factory(tas=concat_tas_dict(tas5.component_dict()), allocation_transfer_agency='069', agency_identifier='013', beginning_period_of_availa=None, line=1009, ending_period_of_availabil=None, availability_type_code='X', main_account_code='2050', sub_account_code='005', period=6, fiscal_year=2001)
gtas6 = SF133Factory(tas=concat_tas_dict(tas6.component_dict()), allocation_transfer_agency='028', agency_identifier='028', beginning_period_of_availa=None, line=1009, ending_period_of_availabil=None, availability_type_code='X', main_account_code='8007', sub_account_code='000', period=6, fiscal_year=2001)
gtas7 = SF133Factory(tas=concat_tas_dict(tas7.component_dict()), allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa=None, line=1009, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas8 = SF133Factory(tas=concat_tas_dict(tas8.component_dict()), allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2010', line=1009, ending_period_of_availabil='2011', availability_type_code=None, main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas9 = SF133Factory(tas=concat_tas_dict(tas9.component_dict()), allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2014', line=1009, ending_period_of_availabil='2015', availability_type_code=None, main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas10 = SF133Factory(tas=concat_tas_dict(tas10.component_dict()), allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2015', line=1009, ending_period_of_availabil='2016', availability_type_code=None, main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
sess.add_all([gtas1, gtas2, gtas3, gtas4, gtas5, gtas6, gtas7, gtas8, gtas9, gtas10])
sess.commit()
| -1,095,766,638,879,940,900
|
Set up class-wide resources (test data)
|
tests/integration/error_warning_file_tests.py
|
setUpClass
|
RonSherfey/data-act-broker-backend
|
python
|
@classmethod
def setUpClass(cls):
' '
super(ErrorWarningTests, cls).setUpClass()
logging.getLogger('dataactcore').setLevel(logging.ERROR)
logging.getLogger('dataactvalidator').setLevel(logging.ERROR)
with create_app().app_context():
cls.monkeypatch = MonkeyPatch()
sess = GlobalDB.db().session
cls.session = sess
admin_user = sess.query(User).filter((User.email == cls.test_users['admin_user'])).one()
cls.validator = ValidationManager(directory=CONFIG_SERVICES['error_report_path'])
cls.submission_id = insert_submission(sess, admin_user.user_id, cgac_code='SYS', start_date='01/2001', end_date='03/2001', is_quarter=True)
cls.submission = sess.query(Submission).filter_by(submission_id=cls.submission_id).one()
cls.val_job = insert_job(cls.session, FILE_TYPE_DICT['appropriations'], JOB_STATUS_DICT['ready'], JOB_TYPE_DICT['csv_record_validation'], cls.submission_id, filename=JOB_TYPE_DICT['csv_record_validation'])
cls.original_reports = set(os.listdir(CONFIG_SERVICES['error_report_path']))
tas1 = TASFactory(account_num=1, allocation_transfer_agency='019', agency_identifier='072', beginning_period_of_availa=None, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0306', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas2 = TASFactory(account_num=2, allocation_transfer_agency=None, agency_identifier='019', beginning_period_of_availa='2016', ending_period_of_availabil='2016', availability_type_code=None, main_account_code='0113', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas3 = TASFactory(account_num=3, allocation_transfer_agency=None, agency_identifier='028', beginning_period_of_availa=None, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0406', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas4 = TASFactory(account_num=4, allocation_transfer_agency=None, agency_identifier='028', beginning_period_of_availa='2010', ending_period_of_availabil='2011', availability_type_code=None, main_account_code='0406', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas5 = TASFactory(account_num=5, allocation_transfer_agency='069', agency_identifier='013', beginning_period_of_availa=None, ending_period_of_availabil=None, availability_type_code='X', main_account_code='2050', sub_account_code='005', internal_start_date='01-01-2000', financial_indicator2='F')
tas6 = TASFactory(account_num=6, allocation_transfer_agency='028', agency_identifier='028', beginning_period_of_availa=None, ending_period_of_availabil=None, availability_type_code='X', main_account_code='8007', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas7 = TASFactory(account_num=7, allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa=None, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0100', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas8 = TASFactory(account_num=8, allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2010', ending_period_of_availabil='2011', availability_type_code=None, main_account_code='0100', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas9 = TASFactory(account_num=9, allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2014', ending_period_of_availabil='2015', availability_type_code=None, main_account_code='0100', sub_account_code='000', internal_start_date='01-01-2000', financial_indicator2='F')
tas10 = TASFactory(account_num=10, allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2015', ending_period_of_availabil='2016', availability_type_code=None, main_account_code='0100', sub_account_code='000', internal_start_date='01-01-2000')
sess.add_all([tas1, tas2, tas3, tas4, tas5, tas6, tas7, tas8, tas9, tas10])
gtas1 = SF133Factory(tas=concat_tas_dict(tas1.component_dict()), allocation_transfer_agency='019', agency_identifier='072', beginning_period_of_availa=None, line=1009, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0306', sub_account_code='000', period=6, fiscal_year=2001)
gtas2 = SF133Factory(tas=concat_tas_dict(tas2.component_dict()), allocation_transfer_agency=None, agency_identifier='019', beginning_period_of_availa='2016', line=1009, ending_period_of_availabil='2016', availability_type_code=None, main_account_code='0113', sub_account_code='000', period=6, fiscal_year=2001)
gtas3 = SF133Factory(tas=concat_tas_dict(tas3.component_dict()), allocation_transfer_agency=None, agency_identifier='028', beginning_period_of_availa=None, line=1009, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0406', sub_account_code='000', period=6, fiscal_year=2001)
gtas4 = SF133Factory(tas=concat_tas_dict(tas4.component_dict()), allocation_transfer_agency=None, agency_identifier='028', beginning_period_of_availa='2010', line=1009, ending_period_of_availabil='2011', availability_type_code=None, main_account_code='0406', sub_account_code='000', period=6, fiscal_year=2001)
gtas5 = SF133Factory(tas=concat_tas_dict(tas5.component_dict()), allocation_transfer_agency='069', agency_identifier='013', beginning_period_of_availa=None, line=1009, ending_period_of_availabil=None, availability_type_code='X', main_account_code='2050', sub_account_code='005', period=6, fiscal_year=2001)
gtas6 = SF133Factory(tas=concat_tas_dict(tas6.component_dict()), allocation_transfer_agency='028', agency_identifier='028', beginning_period_of_availa=None, line=1009, ending_period_of_availabil=None, availability_type_code='X', main_account_code='8007', sub_account_code='000', period=6, fiscal_year=2001)
gtas7 = SF133Factory(tas=concat_tas_dict(tas7.component_dict()), allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa=None, line=1009, ending_period_of_availabil=None, availability_type_code='X', main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas8 = SF133Factory(tas=concat_tas_dict(tas8.component_dict()), allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2010', line=1009, ending_period_of_availabil='2011', availability_type_code=None, main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas9 = SF133Factory(tas=concat_tas_dict(tas9.component_dict()), allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2014', line=1009, ending_period_of_availabil='2015', availability_type_code=None, main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas10 = SF133Factory(tas=concat_tas_dict(tas10.component_dict()), allocation_transfer_agency=None, agency_identifier='049', beginning_period_of_availa='2015', line=1009, ending_period_of_availabil='2016', availability_type_code=None, main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
sess.add_all([gtas1, gtas2, gtas3, gtas4, gtas5, gtas6, gtas7, gtas8, gtas9, gtas10])
sess.commit()
|
def setUp(self):
'Test set-up.'
super(ErrorWarningTests, self).setUp()
| 4,622,662,541,926,604,000
|
Test set-up.
|
tests/integration/error_warning_file_tests.py
|
setUp
|
RonSherfey/data-act-broker-backend
|
python
|
def setUp(self):
super(ErrorWarningTests, self).setUp()
|
def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.List = channel.unary_unary('/api.OrganizationService/List', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationRequest.SerializeToString, response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationResponse.FromString)
self.Get = channel.unary_unary('/api.OrganizationService/Get', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationRequest.SerializeToString, response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationResponse.FromString)
self.Create = channel.unary_unary('/api.OrganizationService/Create', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationRequest.SerializeToString, response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationResponse.FromString)
self.Update = channel.unary_unary('/api.OrganizationService/Update', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString)
self.Delete = channel.unary_unary('/api.OrganizationService/Delete', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString)
self.ListUsers = channel.unary_unary('/api.OrganizationService/ListUsers', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersRequest.SerializeToString, response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersResponse.FromString)
self.GetUser = channel.unary_unary('/api.OrganizationService/GetUser', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserRequest.SerializeToString, response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserResponse.FromString)
self.AddUser = channel.unary_unary('/api.OrganizationService/AddUser', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.AddOrganizationUserRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString)
self.UpdateUser = channel.unary_unary('/api.OrganizationService/UpdateUser', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationUserRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString)
self.DeleteUser = channel.unary_unary('/api.OrganizationService/DeleteUser', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationUserRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString)
| -7,631,543,553,442,211,000
|
Constructor.
Args:
channel: A grpc.Channel.
|
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
|
__init__
|
GaiaFL/chirpstack-api
|
python
|
def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.List = channel.unary_unary('/api.OrganizationService/List', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationRequest.SerializeToString, response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationResponse.FromString)
self.Get = channel.unary_unary('/api.OrganizationService/Get', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationRequest.SerializeToString, response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationResponse.FromString)
self.Create = channel.unary_unary('/api.OrganizationService/Create', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationRequest.SerializeToString, response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationResponse.FromString)
self.Update = channel.unary_unary('/api.OrganizationService/Update', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString)
self.Delete = channel.unary_unary('/api.OrganizationService/Delete', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString)
self.ListUsers = channel.unary_unary('/api.OrganizationService/ListUsers', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersRequest.SerializeToString, response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersResponse.FromString)
self.GetUser = channel.unary_unary('/api.OrganizationService/GetUser', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserRequest.SerializeToString, response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserResponse.FromString)
self.AddUser = channel.unary_unary('/api.OrganizationService/AddUser', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.AddOrganizationUserRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString)
self.UpdateUser = channel.unary_unary('/api.OrganizationService/UpdateUser', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationUserRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString)
self.DeleteUser = channel.unary_unary('/api.OrganizationService/DeleteUser', request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationUserRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString)
|
def List(self, request, context):
'Get organization list.\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
| -4,753,088,458,980,007,000
|
Get organization list.
|
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
|
List
|
GaiaFL/chirpstack-api
|
python
|
def List(self, request, context):
'\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
def Get(self, request, context):
'Get data for a particular organization.\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
| 4,950,086,142,057,745,000
|
Get data for a particular organization.
|
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
|
Get
|
GaiaFL/chirpstack-api
|
python
|
def Get(self, request, context):
'\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
def Create(self, request, context):
'Create a new organization.\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
| -730,394,343,294,820,000
|
Create a new organization.
|
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
|
Create
|
GaiaFL/chirpstack-api
|
python
|
def Create(self, request, context):
'\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.